1 /*
2 CheMPS2: a spin-adapted implementation of DMRG for ab initio quantum chemistry
3 Copyright (C) 2013-2018 Sebastian Wouters
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License along
16 with this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #include <stdlib.h>
21 #include <math.h>
22 #include <assert.h>
23
24 #include "TensorOperator.h"
25 #include "Lapack.h"
26 #include "Special.h"
27 #include "Wigner.h"
28
TensorOperator(const int boundary_index,const int two_j,const int n_elec,const int n_irrep,const bool moving_right,const bool prime_last,const bool jw_phase,const SyBookkeeper * bk_up,const SyBookkeeper * bk_down)29 CheMPS2::TensorOperator::TensorOperator( const int boundary_index, const int two_j, const int n_elec, const int n_irrep, const bool moving_right, const bool prime_last, const bool jw_phase, const SyBookkeeper * bk_up, const SyBookkeeper * bk_down ) : Tensor(){
30
31 // Copy the variables
32 this->index = boundary_index;
33 this->two_j = two_j;
34 this->n_elec = n_elec;
35 this->n_irrep = n_irrep;
36 this->moving_right = moving_right;
37 this->prime_last = prime_last;
38 this->jw_phase = jw_phase;
39 this->bk_up = bk_up;
40 this->bk_down = bk_down;
41
42 assert( two_j >= 0 );
43 assert( n_irrep >= 0 );
44 assert( n_irrep < bk_up->getNumberOfIrreps() );
45
46 nKappa = 0;
47 for ( int n_up = bk_up->gNmin( index ); n_up <= bk_up->gNmax( index ); n_up++ ){
48 for ( int two_s_up = bk_up->gTwoSmin( index, n_up ); two_s_up <= bk_up->gTwoSmax( index, n_up ); two_s_up += 2 ){
49 for ( int irrep_up = 0; irrep_up < bk_up->getNumberOfIrreps(); irrep_up++ ){
50 const int dim_up = bk_up->gCurrentDim( index, n_up, two_s_up, irrep_up );
51 if ( dim_up > 0 ){
52 const int irrep_down = Irreps::directProd( n_irrep, irrep_up );
53 const int n_down = n_up + n_elec;
54 for ( int two_s_down = two_s_up - two_j; two_s_down <= two_s_up + two_j; two_s_down += 2 ){
55 if ( two_s_down >= 0 ){
56 const int dim_down = bk_down->gCurrentDim( index, n_down, two_s_down, irrep_down );
57 if ( dim_down > 0 ){
58 nKappa++;
59 }
60 }
61 }
62 }
63 }
64 }
65 }
66
67 sector_nelec_up = new int[ nKappa ];
68 sector_irrep_up = new int[ nKappa ];
69 sector_spin_up = new int[ nKappa ];
70 sector_spin_down = (( two_j == 0 ) ? sector_spin_up : new int[ nKappa ] );
71 kappa2index = new int[ nKappa + 1 ];
72 kappa2index[ 0 ] = 0;
73
74 nKappa = 0;
75 for ( int n_up = bk_up->gNmin( index ); n_up <= bk_up->gNmax( index ); n_up++ ){
76 for ( int two_s_up = bk_up->gTwoSmin( index, n_up ); two_s_up <= bk_up->gTwoSmax( index, n_up ); two_s_up += 2 ){
77 for ( int irrep_up = 0; irrep_up < bk_up->getNumberOfIrreps(); irrep_up++ ){
78 const int dim_up = bk_up->gCurrentDim( index, n_up, two_s_up, irrep_up );
79 if ( dim_up > 0 ){
80 const int irrep_down = Irreps::directProd( n_irrep, irrep_up );
81 const int n_down = n_up + n_elec;
82 for ( int two_s_down = two_s_up - two_j; two_s_down <= two_s_up + two_j; two_s_down += 2 ){
83 if ( two_s_down >= 0 ){
84 const int dim_down = bk_down->gCurrentDim( index, n_down, two_s_down, irrep_down );
85 if ( dim_down > 0 ){
86 sector_nelec_up [ nKappa ] = n_up;
87 sector_irrep_up [ nKappa ] = irrep_up;
88 sector_spin_up [ nKappa ] = two_s_up;
89 sector_spin_down[ nKappa ] = two_s_down;
90 kappa2index[ nKappa + 1 ] = kappa2index[ nKappa ] + dim_up * dim_down;
91 nKappa++;
92 }
93 }
94 }
95 }
96 }
97 }
98 }
99
100 storage = new double[ kappa2index[ nKappa ] ];
101
102 }
103
~TensorOperator()104 CheMPS2::TensorOperator::~TensorOperator(){
105
106 delete [] sector_nelec_up;
107 delete [] sector_irrep_up;
108 delete [] sector_spin_up;
109 delete [] kappa2index;
110 delete [] storage;
111 if ( two_j != 0 ){ delete [] sector_spin_down; }
112
113 }
114
gNKappa() const115 int CheMPS2::TensorOperator::gNKappa() const { return nKappa; }
116
gStorage()117 double * CheMPS2::TensorOperator::gStorage() { return storage; }
118
gKappa(const int N1,const int TwoS1,const int I1,const int N2,const int TwoS2,const int I2) const119 int CheMPS2::TensorOperator::gKappa( const int N1, const int TwoS1, const int I1, const int N2, const int TwoS2, const int I2 ) const{
120
121 if ( Irreps::directProd( I1, n_irrep ) != I2 ){ return -1; }
122 if ( N2 != N1 + n_elec ){ return -1; }
123 if ( abs( TwoS1 - TwoS2 ) > two_j ){ return -1; }
124
125 if ( two_j == 0 ){
126 for ( int cnt = 0; cnt < nKappa; cnt++ ){
127 if (( sector_nelec_up[ cnt ] == N1 ) && ( sector_spin_up[ cnt ] == TwoS1 ) && ( sector_irrep_up[ cnt ] == I1 )){ return cnt; }
128 }
129 } else {
130 for ( int cnt = 0; cnt < nKappa; cnt++ ){
131 if (( sector_nelec_up[ cnt ] == N1 ) && ( sector_spin_up[ cnt ] == TwoS1 ) && ( sector_irrep_up[ cnt ] == I1 ) && ( sector_spin_down[ cnt ] == TwoS2 )){ return cnt; }
132 }
133 }
134
135 return -1;
136
137 }
138
gKappa2index(const int kappa) const139 int CheMPS2::TensorOperator::gKappa2index( const int kappa ) const{ return kappa2index[ kappa ]; }
140
gStorage(const int N1,const int TwoS1,const int I1,const int N2,const int TwoS2,const int I2)141 double * CheMPS2::TensorOperator::gStorage( const int N1, const int TwoS1, const int I1, const int N2, const int TwoS2, const int I2 ){
142
143 int kappa = gKappa( N1, TwoS1, I1, N2, TwoS2, I2 );
144 if ( kappa == -1 ){ return NULL; }
145 return storage + kappa2index[ kappa ];
146
147 }
148
gIndex() const149 int CheMPS2::TensorOperator::gIndex() const { return index; }
150
get_2j() const151 int CheMPS2::TensorOperator::get_2j() const{ return two_j; }
152
get_nelec() const153 int CheMPS2::TensorOperator::get_nelec() const{ return n_elec; }
154
get_irrep() const155 int CheMPS2::TensorOperator::get_irrep() const { return n_irrep; }
156
clear()157 void CheMPS2::TensorOperator::clear(){
158
159 for ( int cnt = 0; cnt < kappa2index[ nKappa ]; cnt++ ){ storage[ cnt ] = 0.0; }
160
161 }
162
update(TensorOperator * previous,TensorT * mps_tensor_up,TensorT * mps_tensor_down,double * workmem)163 void CheMPS2::TensorOperator::update( TensorOperator * previous, TensorT * mps_tensor_up, TensorT * mps_tensor_down, double * workmem ){
164
165 clear();
166
167 if ( moving_right ){
168 for ( int ikappa = 0; ikappa < nKappa; ikappa++ ){
169 update_moving_right( ikappa, previous, mps_tensor_up, mps_tensor_down, workmem );
170 }
171 } else {
172 for ( int ikappa = 0; ikappa < nKappa; ikappa++ ){
173 update_moving_left( ikappa, previous, mps_tensor_up, mps_tensor_down, workmem );
174 }
175 }
176
177 }
178
update_moving_right(const int ikappa,TensorOperator * previous,TensorT * mps_tensor_up,TensorT * mps_tensor_down,double * workmem)179 void CheMPS2::TensorOperator::update_moving_right( const int ikappa, TensorOperator * previous, TensorT * mps_tensor_up, TensorT * mps_tensor_down, double * workmem ){
180
181 const int n_right_up = sector_nelec_up[ ikappa ];
182 const int n_right_down = n_right_up + n_elec;
183 const int two_s_right_up = sector_spin_up[ ikappa ];
184 const int two_s_right_down = sector_spin_down[ ikappa ];
185 const int irrep_right_up = sector_irrep_up[ ikappa ];
186 const int irrep_right_down = Irreps::directProd( irrep_right_up, n_irrep );
187
188 int dim_right_up = bk_up->gCurrentDim( index, n_right_up, two_s_right_up, irrep_right_up );
189 int dim_right_down = bk_down->gCurrentDim( index, n_right_down, two_s_right_down, irrep_right_down );
190
191 for ( int geval = 0; geval < 6; geval++ ){
192 int n_left_up, n_left_down, two_s_left_up, two_s_left_down, irrep_left_up, irrep_left_down;
193 switch ( geval ){
194 case 0: // MPS tensor sector (I,J,N) = (0,0,0)
195 two_s_left_up = two_s_right_up;
196 two_s_left_down = two_s_right_down;
197 n_left_up = n_right_up;
198 n_left_down = n_right_down;
199 irrep_left_up = irrep_right_up;
200 irrep_left_down = irrep_right_down;
201 break;
202 case 1: // MPS tensor sector (I,J,N) = (0,0,2)
203 two_s_left_up = two_s_right_up;
204 two_s_left_down = two_s_right_down;
205 n_left_up = n_right_up - 2;
206 n_left_down = n_right_down - 2;
207 irrep_left_up = irrep_right_up;
208 irrep_left_down = irrep_right_down;
209 break;
210 case 2: // MPS tensor sector (I,J,N) = (Ilocal,1/2,1)
211 two_s_left_up = two_s_right_up - 1;
212 two_s_left_down = two_s_right_down - 1;
213 n_left_up = n_right_up - 1;
214 n_left_down = n_right_down - 1;
215 irrep_left_up = Irreps::directProd( irrep_right_up, bk_up->gIrrep( index - 1 ) );
216 irrep_left_down = Irreps::directProd( irrep_right_down, bk_up->gIrrep( index - 1 ) ); // bk_up and bk_down treat the same orbitals and ordering
217 break;
218 case 3: // MPS tensor sector (I,J,N) = (Ilocal,1/2,1)
219 two_s_left_up = two_s_right_up - 1;
220 two_s_left_down = two_s_right_down + 1;
221 n_left_up = n_right_up - 1;
222 n_left_down = n_right_down - 1;
223 irrep_left_up = Irreps::directProd( irrep_right_up, bk_up->gIrrep( index - 1 ) );
224 irrep_left_down = Irreps::directProd( irrep_right_down, bk_up->gIrrep( index - 1 ) ); // bk_up and bk_down treat the same orbitals and ordering
225 break;
226 case 4: // MPS tensor sector (I,J,N) = (Ilocal,1/2,1)
227 two_s_left_up = two_s_right_up + 1;
228 two_s_left_down = two_s_right_down - 1;
229 n_left_up = n_right_up - 1;
230 n_left_down = n_right_down - 1;
231 irrep_left_up = Irreps::directProd( irrep_right_up, bk_up->gIrrep( index - 1 ) );
232 irrep_left_down = Irreps::directProd( irrep_right_down, bk_up->gIrrep( index - 1 ) ); // bk_up and bk_down treat the same orbitals and ordering
233 break;
234 case 5: // MPS tensor sector (I,J,N) = (Ilocal,1/2,1)
235 two_s_left_up = two_s_right_up + 1;
236 two_s_left_down = two_s_right_down + 1;
237 n_left_up = n_right_up - 1;
238 n_left_down = n_right_down - 1;
239 irrep_left_up = Irreps::directProd( irrep_right_up, bk_up->gIrrep( index - 1 ) );
240 irrep_left_down = Irreps::directProd( irrep_right_down, bk_up->gIrrep( index - 1 ) ); // bk_up and bk_down treat the same orbitals and ordering
241 break;
242 }
243
244 if ( abs( two_s_left_up - two_s_left_down ) <= two_j ){
245
246 int dim_left_up = bk_up->gCurrentDim( index - 1, n_left_up, two_s_left_up, irrep_left_up );
247 int dim_left_down = bk_down->gCurrentDim( index - 1, n_left_down, two_s_left_down, irrep_left_down );
248
249 if (( dim_left_up > 0 ) && ( dim_left_down > 0 )){
250
251 double * mps_block_up = mps_tensor_up->gStorage( n_left_up, two_s_left_up, irrep_left_up, n_right_up, two_s_right_up, irrep_right_up );
252 double * mps_block_down = mps_tensor_down->gStorage( n_left_down, two_s_left_down, irrep_left_down, n_right_down, two_s_right_down, irrep_right_down );
253 double * left_block = previous->gStorage( n_left_up, two_s_left_up, irrep_left_up, n_left_down, two_s_left_down, irrep_left_down );
254
255 // Prefactor
256 double alpha = 1.0;
257 if ( geval >= 2 ){
258 if ( two_j == 0 ){
259 alpha = ( ( jw_phase ) ? -1.0 : 1.0 );
260 } else {
261 if ( prime_last ){
262 alpha = Special::phase( two_s_right_up + two_s_left_down + two_j + ( ( jw_phase ) ? 3 : 1 ) )
263 * sqrt( ( two_s_left_down + 1.0 ) * ( two_s_right_up + 1.0 ) )
264 * Wigner::wigner6j( two_s_left_up, two_s_left_down, two_j, two_s_right_down, two_s_right_up, 1 );
265 } else {
266 alpha = Special::phase( two_s_right_down + two_s_left_up + two_j + ( ( jw_phase ) ? 3 : 1 ) )
267 * sqrt( ( two_s_left_up + 1.0 ) * ( two_s_right_down + 1.0 ) )
268 * Wigner::wigner6j( two_s_left_down, two_s_left_up, two_j, two_s_right_up, two_s_right_down, 1 );
269 }
270 }
271 }
272
273 // prefactor * mps_block_up^T * left_block --> mem
274 char trans = 'T';
275 char notr = 'N';
276 double beta = 0.0; //set
277 dgemm_(&trans, ¬r, &dim_right_up, &dim_left_down, &dim_left_up,
278 &alpha, mps_block_up, &dim_left_up, left_block, &dim_left_up,
279 &beta, workmem, &dim_right_up);
280
281 // mem * mps_block_down --> storage
282 alpha = 1.0;
283 beta = 1.0; //add
284 dgemm_(¬r, ¬r, &dim_right_up, &dim_right_down, &dim_left_down,
285 &alpha, workmem, &dim_right_up, mps_block_down, &dim_left_down,
286 &beta, storage + kappa2index[ikappa], &dim_right_up);
287 }
288 }
289 }
290
291 }
292
update_moving_left(const int ikappa,TensorOperator * previous,TensorT * mps_tensor_up,TensorT * mps_tensor_down,double * workmem)293 void CheMPS2::TensorOperator::update_moving_left( const int ikappa, TensorOperator * previous, TensorT * mps_tensor_up, TensorT * mps_tensor_down, double * workmem ){
294
295 const int n_left_up = sector_nelec_up[ ikappa ];
296 const int n_left_down = n_left_up + n_elec;
297 const int two_s_left_up = sector_spin_up[ ikappa ];
298 const int two_s_left_down = sector_spin_down[ ikappa ];
299 const int irrep_left_up = sector_irrep_up[ ikappa ];
300 const int irrep_left_down = Irreps::directProd( irrep_left_up, n_irrep );
301
302 int dim_left_up = bk_up->gCurrentDim( index, n_left_up, two_s_left_up, irrep_left_up );
303 int dim_left_down = bk_down->gCurrentDim( index, n_left_down, two_s_left_down, irrep_left_down );
304
305 for ( int geval = 0; geval < 6; geval++ ){
306 int n_right_up, n_right_down, two_s_right_up, two_s_right_down, irrep_right_up, irrep_right_down;
307 switch ( geval ){
308 case 0: // MPS tensor sector (I,J,N) = (0,0,0)
309 two_s_right_up = two_s_left_up;
310 two_s_right_down = two_s_left_down;
311 n_right_up = n_left_up;
312 n_right_down = n_left_down;
313 irrep_right_up = irrep_left_up;
314 irrep_right_down = irrep_left_down;
315 break;
316 case 1: // MPS tensor sector (I,J,N) = (0,0,2)
317 two_s_right_up = two_s_left_up;
318 two_s_right_down = two_s_left_down;
319 n_right_up = n_left_up + 2;
320 n_right_down = n_left_down + 2;
321 irrep_right_up = irrep_left_up;
322 irrep_right_down = irrep_left_down;
323 break;
324 case 2: // MPS tensor sector (I,J,N) = (Ilocal,1/2,1)
325 two_s_right_up = two_s_left_up - 1;
326 two_s_right_down = two_s_left_down - 1;
327 n_right_up = n_left_up + 1;
328 n_right_down = n_left_down + 1;
329 irrep_right_up = Irreps::directProd( irrep_left_up, bk_up->gIrrep( index ) );
330 irrep_right_down = Irreps::directProd( irrep_left_down, bk_up->gIrrep( index ) ); // bk_up and bk_down treat the same orbitals and ordering
331 break;
332 case 3: // MPS tensor sector (I,J,N) = (Ilocal,1/2,1)
333 two_s_right_up = two_s_left_up - 1;
334 two_s_right_down = two_s_left_down + 1;
335 n_right_up = n_left_up + 1;
336 n_right_down = n_left_down + 1;
337 irrep_right_up = Irreps::directProd( irrep_left_up, bk_up->gIrrep( index ) );
338 irrep_right_down = Irreps::directProd( irrep_left_down, bk_up->gIrrep( index ) ); // bk_up and bk_down treat the same orbitals and ordering
339 break;
340 case 4: // MPS tensor sector (I,J,N) = (Ilocal,1/2,1)
341 two_s_right_up = two_s_left_up + 1;
342 two_s_right_down = two_s_left_down - 1;
343 n_right_up = n_left_up + 1;
344 n_right_down = n_left_down + 1;
345 irrep_right_up = Irreps::directProd( irrep_left_up, bk_up->gIrrep( index ) );
346 irrep_right_down = Irreps::directProd( irrep_left_down, bk_up->gIrrep( index ) ); // bk_up and bk_down treat the same orbitals and ordering
347 break;
348 case 5: // MPS tensor sector (I,J,N) = (Ilocal,1/2,1)
349 two_s_right_up = two_s_left_up + 1;
350 two_s_right_down = two_s_left_down + 1;
351 n_right_up = n_left_up + 1;
352 n_right_down = n_left_down + 1;
353 irrep_right_up = Irreps::directProd( irrep_left_up, bk_up->gIrrep( index ) );
354 irrep_right_down = Irreps::directProd( irrep_left_down, bk_up->gIrrep( index ) ); // bk_up and bk_down treat the same orbitals and ordering
355 break;
356 }
357
358 if ( abs( two_s_right_up - two_s_right_down ) <= two_j ){
359
360 int dim_right_up = bk_up->gCurrentDim( index + 1, n_right_up, two_s_right_up, irrep_right_up );
361 int dim_right_down = bk_down->gCurrentDim( index + 1, n_right_down, two_s_right_down, irrep_right_down );
362
363 if (( dim_right_up > 0 ) && ( dim_right_down > 0 )){
364
365 double * mps_block_up = mps_tensor_up->gStorage( n_left_up, two_s_left_up, irrep_left_up, n_right_up, two_s_right_up, irrep_right_up );
366 double * mps_block_down = mps_tensor_down->gStorage( n_left_down, two_s_left_down, irrep_left_down, n_right_down, two_s_right_down, irrep_right_down );
367 double * right_block = previous->gStorage( n_right_up, two_s_right_up, irrep_right_up, n_right_down, two_s_right_down, irrep_right_down );
368
369 // Prefactor
370 double alpha = 1.0;
371 if ( geval >= 2 ){
372 if ( two_j == 0 ){
373 alpha = ( ( jw_phase ) ? -1.0 : 1.0 ) * (( two_s_right_up + 1.0 ) / ( two_s_left_up + 1 ));
374 } else {
375 if ( prime_last ){
376 alpha = Special::phase( two_s_right_up + two_s_left_down + two_j + ( ( jw_phase ) ? 3 : 1 ) )
377 * ( two_s_right_down + 1 ) * sqrt( ( two_s_right_up + 1.0 ) / ( two_s_left_down + 1 ) )
378 * Wigner::wigner6j( two_s_right_up, two_s_right_down, two_j, two_s_left_down, two_s_left_up, 1 );
379 } else {
380 alpha = Special::phase( two_s_right_down + two_s_left_up + two_j + ( ( jw_phase ) ? 3 : 1 ) )
381 * ( two_s_right_up + 1 ) * sqrt( ( two_s_right_down + 1.0 ) / ( two_s_left_up + 1 ) )
382 * Wigner::wigner6j( two_s_right_down, two_s_right_up, two_j, two_s_left_up, two_s_left_down, 1 );
383 }
384 }
385 }
386
387 // prefactor * mps_block_up * right_block --> mem
388 char notr = 'N';
389 double beta = 0.0; //set
390 dgemm_(¬r, ¬r, &dim_left_up, &dim_right_down, &dim_right_up,
391 &alpha, mps_block_up, &dim_left_up, right_block, &dim_right_up,
392 &beta, workmem, &dim_left_up);
393
394 // mem * mps_block_down^T --> storage
395 char trans = 'T';
396 alpha = 1.0;
397 beta = 1.0; //add
398 dgemm_(¬r, &trans, &dim_left_up, &dim_left_down, &dim_right_down,
399 &alpha, workmem, &dim_left_up, mps_block_down, &dim_left_down,
400 &beta, storage + kappa2index[ikappa], &dim_left_up);
401 }
402 }
403 }
404
405 }
406
daxpy(double alpha,TensorOperator * to_add)407 void CheMPS2::TensorOperator::daxpy( double alpha, TensorOperator * to_add ){
408
409 assert( nKappa == to_add->gNKappa() );
410 assert( kappa2index[ nKappa ] == to_add->gKappa2index( to_add->gNKappa() ) );
411 int inc = 1;
412 daxpy_( kappa2index + nKappa, &alpha, to_add->gStorage(), &inc, storage, &inc );
413
414 }
415
daxpy_transpose_tensorCD(const double alpha,TensorOperator * to_add)416 void CheMPS2::TensorOperator::daxpy_transpose_tensorCD( const double alpha, TensorOperator * to_add ){
417
418 assert( nKappa == to_add->gNKappa() );
419 assert( kappa2index[ nKappa ] == to_add->gKappa2index( to_add->gNKappa() ) );
420 assert( n_elec == 0 );
421 assert( ( two_j == 0 ) || ( two_j == 2 ) );
422
423 for ( int ikappa = 0; ikappa < nKappa; ikappa++ ){
424
425 const int irrep_up = sector_irrep_up[ ikappa ];
426 const int irrep_down = Irreps::directProd( irrep_up, n_irrep );
427 const int two_s_up = sector_spin_up[ ikappa ];
428 const int two_s_down = sector_spin_down[ ikappa ];
429 const int n_updown = sector_nelec_up[ ikappa ];
430
431 const int dim_up = bk_up->gCurrentDim( index, n_updown, two_s_up, irrep_up );
432 const int dim_down = bk_down->gCurrentDim( index, n_updown, two_s_down, irrep_down );
433
434 double prefactor = alpha;
435 /*
436 This phase factor comes historically from the TensorD and is not valid in general,
437 as it is tightly coupled to the specific change from (for moving_right == true ):
438 < 1/2 m1 1/2 -m2 | 1 (m1-m2) > * (-1)^{1/2-m2} * < j_L' j_L^z' 1 (m1-m2) | j_L j_L^z >
439 = < 1/2 m2 1/2 -m1 | 1 (m2-m1) > * (-1)^{1/2-m1} * < j_L j_L^z 1 (m1-m2) | j_L' j_L^z' > * prefactor
440 */
441 if ( two_s_up != two_s_down ){
442 prefactor *= Special::phase( two_s_up - two_s_down )
443 * sqrt(( moving_right ) ? (( two_s_up + 1.0 ) / ( two_s_down + 1 )) : (( two_s_down + 1.0 ) / ( two_s_up + 1 )));
444 }
445
446 double * block = to_add->gStorage( n_updown, two_s_down, irrep_down, n_updown, two_s_up, irrep_up );
447 for ( int irow = 0; irow < dim_up; irow++ ){
448 for ( int icol = 0; icol < dim_down; icol++ ){
449 storage[ kappa2index[ikappa] + irow + dim_up * icol ] += prefactor * block[ icol + dim_down * irow ];
450 }
451 }
452
453 }
454
455 }
456
inproduct(TensorOperator * buddy,const char trans) const457 double CheMPS2::TensorOperator::inproduct( TensorOperator * buddy, const char trans ) const{
458
459 if ( buddy == NULL ){ return 0.0; }
460
461 assert( get_2j() == buddy->get_2j() );
462 assert( n_elec == buddy->get_nelec() );
463 assert( n_irrep == buddy->get_irrep() );
464
465 double value = 0.0;
466
467 if ( trans == 'N' ){
468
469 int length = kappa2index[ nKappa ];
470 int inc = 1;
471 value = ddot_( &length, storage, &inc, buddy->gStorage(), &inc );
472
473 } else {
474
475 assert( n_elec == 0 );
476 for ( int ikappa = 0; ikappa < nKappa; ikappa++ ){
477
478 const int n_updown = sector_nelec_up[ ikappa ];
479 const int two_j_up = sector_spin_up[ ikappa ];
480 const int two_j_down = sector_spin_down[ ikappa ];
481 const int irrep_up = sector_irrep_up[ ikappa ];
482 const int irrep_down = Irreps::directProd( irrep_up, n_irrep );
483
484 double * my_block = storage + kappa2index[ ikappa ];
485 double * buddy_block = buddy->gStorage( n_updown, two_j_down, irrep_down, n_updown, two_j_up, irrep_up );
486 const int dim_up = bk_up->gCurrentDim( index, n_updown, two_j_up, irrep_up );
487 const int dim_down = bk_down->gCurrentDim( index, n_updown, two_j_down, irrep_down );
488
489 double temp = 0.0;
490 for ( int row = 0; row < dim_up; row++ ){
491 for ( int col = 0; col < dim_down; col++ ){
492 temp += my_block[ row + dim_up * col ] * buddy_block[ col + dim_down * row ];
493 }
494 }
495
496 const double prefactor = (( get_2j() == 0 ) ? 1.0 : ( sqrt( ( two_j_up + 1.0 ) / ( two_j_down + 1 ) ) * Special::phase( two_j_up - two_j_down ) ));
497 value += prefactor * temp;
498
499 }
500 }
501
502 return value;
503
504 }
505
506
507