StatMech
Loading...
Searching...
No Matches
TimeEvolution.cpp File Reference

Functions

__global__ void EnergyStddev_kernel (Integer_t dim, Real_t *res_d, Complex_t *Hamiltonian_d, Integer_t LDT)
 
void EnergyStddev (std::vector< double > &res, matrix_gpu< Complex_t > const &Hamiltonian_d, TransSector const &Sector, GPUconfig const &GPUconf)
 
void __global__ StatePreparation (double time, Integer_t Nstates, Integer_t *dIndex, Integer_t dim_sub, Complex_t *dState, Integer_t LDS, Complex_t *dh_tot, Integer_t LDH, double *dEigenEnergy)
 
void EnergyStddev (std::vector< double > &res, matrix< Complex_t > const &Hamiltonian, TransSector const &Sector, void *GPUconf=nullptr)
 
void StatePreparation (double time, std::vector< Integer_t > &Index, Integer_t dim_sub, matrix< Complex_t > &State, matrix< Complex_t > &h_tot, std::vector< double > &eigenEnergy)
 
int main (int argc, char **argv)
 

Variables

namespace filesystem = std::experimental::filesystem
 

Function Documentation

◆ EnergyStddev() [1/2]

void EnergyStddev ( std::vector< double > &  res,
matrix< Complex_t > const &  Hamiltonian,
TransSector const &  Sector,
void *  GPUconf = nullptr 
)
98 {
99 Integer_t dim = SectorDimension(Sector);
100 res.resize(dim);
101 std::fill(res.begin(), res.end(), 0.0);
102
103 #pragma omp parallel for
104 for(size_t j = 0; j < dim; j++) {
105 for(size_t k = 0; k < j; k++) res.at(j) += real( conj(Hamiltonian.at(j,k))*Hamiltonian.at(j,k) );
106 for(size_t k = j+1; k < dim; k++) res.at(j) += real( conj(Hamiltonian.at(j,k))*Hamiltonian.at(j,k) );
107 res.at(j) = sqrt(res.at(j));
108 }
109 }
std::vector< TransSector > Sector(n_max+1)
MKL_INT Integer_t
Definition mytypes.hpp:359

◆ EnergyStddev() [2/2]

void EnergyStddev ( std::vector< double > &  res,
matrix_gpu< Complex_t > const &  Hamiltonian_d,
TransSector const &  Sector,
GPUconfig const &  GPUconf 
)
70 {
71 Integer_t dim = SectorDimension(Sector);
72 res.resize(dim);
73
74 // magma_cprint_gpu(dim, dim, Hamiltonian_d.ptr(), Hamiltonian_d.LD(), GPUconf.queue());
75 matrix_gpu<Real_t> Intermediate_d(Hamiltonian_d.LD(), dim);
76 EnergyStddev_kernel<<<GPUconf.dimGrid(),GPUconf.dimBlock(),GPUconf.shared(),GPUconf.stream()>>>(dim, Intermediate_d.ptr(), Hamiltonian_d.ptr(), Hamiltonian_d.LD());
77
78 std::vector<Real_t> ones(dim,1.0);
79 matrix_gpu<Real_t> ones_d(dim, 1);
80 matrix_gpu<Real_t> res_d(dim, 1);
81 magma_setvector(dim, sizeof(Real_t), &*ones.begin(), 1, ones_d.ptr(), 1, GPUconf.queue());
82 gemv(MagmaNoTrans, dim, dim, Intermediate_d, ones_d, res_d, GPUconf.queue());
83 // magma_sprint_gpu(dim, dim, Intermediate_d.ptr(), Intermediate_d.LD(), GPUconf.queue());
84 magma_getvector(dim, sizeof(Real_t), res_d.ptr(), 1, &*ones.begin(), 1, GPUconf.queue());
85 #pragma omp parallel for
86 for (size_t j = 0; j < dim; j++) res[j] = (double)sqrt(ones[j]);
87 }
__global__ void EnergyStddev_kernel(Integer_t dim, Real_t *res_d, Complex_t *Hamiltonian_d, Integer_t LDT)
Definition TimeEvolution.cpp:59
void dimBlock(int x, int y, int z)
Definition mytypes.hpp:291
void queue(magma_queue_t x)
Definition mytypes.hpp:297
void shared(size_t x)
Definition mytypes.hpp:296
void dimGrid(int x, int y, int z)
Definition mytypes.hpp:286
cudaStream_t stream() const
Definition mytypes.hpp:303
GPUconfig GPUconf(dim3(nBlock, nBlock, 1), dim3(nThread, nThread, 1), 0, queue)
double Real_t
Definition mytypes.hpp:37

◆ EnergyStddev_kernel()

__global__ void EnergyStddev_kernel ( Integer_t  dim,
Real_t res_d,
Complex_t Hamiltonian_d,
Integer_t  LDT 
)
59 {
60 int const idx = blockIdx.x*blockDim.x +threadIdx.x;
61 int const idy = blockIdx.y*blockDim.y +threadIdx.y;
62 if(idx>=dim || idy>=dim) return;
63 if(idx == idy) {
64 res_d[idx+LDT*idy] = 0;
65 return;
66 }
67 res_d[idx+LDT*idy] = real( conj(Hamiltonian_d[idx+LDT*idy])*Hamiltonian_d[idx+LDT*idy] );
68 }

◆ main()

int main ( int  argc,
char **  argv 
)
122 {
124 double const dE = (argc>=Nargs_base+1) ? std::atof(argv[Nargs_base]) : 0.02;
125
126 Integer_t dim_sub; //全ヒルベルト空間の次元
127 Integer_t info, failed=0; // カウンタ
128 double gE, EnergyRange, OpRange, OpMin, sum;
129
130 if( !Initialize(argc, argv, Nargs_common) ) {
131 std::cerr << "Error: Initialization failed." << std::endl;
132 std::exit(EX_USAGE);
133 }
134 debug_print("# Successfully initialized.");
135
136 //******************** Check for the directory structure ********************
137 debug_print("# Checking for the directory structure.");
138 std::ofstream OutFs;
139 //******************** (END) Check for the directory structure ********************
140
141 //***************************************************************************
142 //******************** Allocation & Initialization **************************
143 //***************************************************************************
144#ifdef GPU
145 magma_init();
146 magma_queue_t queue = NULL;
147 magma_int_t dev = 0;
148 magma_getdevice( &dev );
149 magma_queue_create( dev, &queue );
150#else
151 void* GPUconf = nullptr;
152#endif
153
154 //******************** Translation invariance ********************
155 debug_print("# Calculating translation-invariant sectors.");
157 //******************** (END)Translation invariance ********************
158
159 //********** Allocate CPU memories **********//
160 debug_print("# Allocating CPU memories.");
161 double time;
162 constexpr double Tmax = 10000, Emin=0.4, Emax=1-Emin;
163 constexpr Integer_t Nstep = 1000+1;
164 Integer_t const dim_max = SectorDimension(Sector[n_max]);
165 Integer_t NdataInShell, Id;
166 double MCAverage, shellWidth;
167
168 std::vector<Integer_t> Index(dim_max);
169 std::vector<double> eigenEnergy(dim_max);
170 std::vector<double> EXPvalue(dim_max);
171 std::vector<double> energyExpValue(dim_max);
172 std::vector<double> energyStddev(dim_max);
173 std::vector<Complex_t> ComplexVector_temp(dim_max);
174 matrix<Complex_t> h(dloc_h , dloc_h );
175 matrix<Complex_t> loc(dloc_op, dloc_op);
176 matrix<Complex_t> Dynamics(dim_max, Nstep);
177#ifndef GPU
178 matrix<Complex_t> h_tot(dim_max, dim_max);
179 matrix<Complex_t> loc_tot(dim_max, dim_max);
180 matrix<Complex_t> State(dim_max, dim_max);
181 #define dh h
182 #define dloc loc
183 #define dh_tot h_tot
184 #define dloc_tot loc_tot
185 #define dState State
186#endif
187 //********** (END) Allocate CPU memories **********//
188
189#ifdef GPU
190 //********** Allocate GPU memories **********//
191 debug_print("# Allocating GPU memories.");
192 constexpr Integer_t GPU_UNIT = 32;
193 Integer_t const LDT = magma_roundup(dim_max, GPU_UNIT);
194 std::vector<Complex_t> tempVector(dim_max);
195 matrix_gpu<Complex_t> dh(dloc_h , dloc_h );
196 matrix_gpu<Complex_t> dloc(dloc_op, dloc_op);
197 matrix_gpu<Complex_t> dh_tot(LDT, dim_max);
198 matrix_gpu<Complex_t> dloc_tot(LDT, dim_max);
199 matrix_gpu<Complex_t> dState(LDT, dim_max);
200 matrix_gpu<Integer_t> dIndex(dim_max, 1);
201 matrix_gpu<double> dEigenEnergy(dim_max, 1);
202 matrix_gpu<Complex_t> dDynamics(LDT, Nstep);
203 matrix_gpu<Complex_t> dComplexMatrix_temp1(LDT, dim_max);
204 matrix_gpu<Complex_t> dComplexMatrix_temp2(LDT, dim_max);
205 //********** (END) Allocate GPU memories **********//
206
207 //********** Determine GPU configuration **********//
209 //********** (END) Determine GPU configuration **********//
210#endif // #ifdef GPU
211
212 double start, t_int, end, temp_t;
213 double T_diag=0, T_post=0, T_pre=0;
214 init_genrand(SEED);
215 start = getETtime();
216 for(Integer_t repetition = 0;repetition < repMin; ++repetition) {
217 generateLocal_h( h, dloc_h, -1);
218 generateLocal_op(loc, dloc_op, -1);
219 }
220 end = getETtime();
221 std::cout << "(init_genrand): time=" << std::fixed << (end-start) << std::endl;
222 //***************************************************************************
223 //******************** (END) Allocation & Initialization ********************
224 //***************************************************************************
225
226 start = getETtime();
227 end = start;
228 for(Integer_t repetition = repMin;repetition <= repMax; ++repetition) {
229 // 局所ハミルトニアンと局所物理量をランダムにとる ******************************//
230 generateLocal_h( h, dloc_h, -1);
231 generateLocal_op(loc, dloc_op, -1);
232
233 debug_print("# Seting matrix to GPU.");
234 #ifdef GPU
235 magma_setmatrix(dloc_h, dloc_h, sizeof(Complex_t), &*h.begin(), dloc_h, dh.ptr(), dloc_h, queue);
236 magma_setmatrix(dloc_op, dloc_op, sizeof(Complex_t), &*loc.begin(), dloc_op, dloc.ptr(), dloc_op, queue);
237 #endif
238
239 // magma_cprint_gpu(dloc_h, dloc_h, dh.ptr(), dh.LD(), queue);
240 // magma_cprint_gpu(dloc_op, dloc_op, dloc.ptr(), dloc.LD(), queue);
241
242 std::string outDirName(baseDirName);
243 {
244 std::stringstream buff;
245 buff << "/RawData/Sample_No" << repetition;
246 outDirName += buff.str();
247 outDirName = std::regex_replace(outDirName, std::regex("//"), "/");
248 filesystem::create_directories(outDirName);
249 }
250
251 for(size_t n = n_max;n >= n_min; --n) {
252 debug_print("# (rep,n)=(" << repetition << "," << n << ")");
253 dim_sub = SectorDimension( Sector[n] );
254 ComplexVector_temp.resize(dim_sub);
255 energyExpValue.resize(dim_sub);
256 energyExpValue.resize(dim_sub);
257
258 debug_print("# Constructing global matrices in the sector.");
259 temp_t = getETtime();
260 {
261 dim_sub = constructGlobal_h( dh_tot, dh, num_h, Sector.at(n), GPUconf);
262 constructGlobal_op(dloc_tot, dloc, num_op, Sector.at(n), GPUconf);
263 #ifdef GPU
264 magma_queue_sync(queue);
265 #endif
266 }
267 {
268 #ifdef GPU
269 magma_getvector(dim_sub, sizeof(Complex_t), dh_tot.ptr(), dh_tot.LD()+1,&* ComplexVector_temp.begin(), 1, queue);
270 #endif
271 #pragma omp parallel for
272 for (size_t j = 0; j < dim_sub; j++) {
273 #ifdef GPU
274 energyExpValue.at(j) = real( ComplexVector_temp.at(j) );
275 #else
276 energyExpValue.at(j) = real( h_tot.at(j,j) );
277 #endif
278 }
279 EnergyStddev(energyStddev, dh_tot, Sector.at(n), GPUconf);
280 }
281 T_pre += getETtime() -temp_t;
282
283 // std::cerr << "N=" << n << ", dim_sub = " << dim_sub << ", dloc_tot.LD() = " << dloc_tot.LD() << std::endl;
284 // std::cerr << "isnan_kernel(before, dh_tot)" << std::endl;
285 // isnan_kernelMatrixElementsInSector<<<GPUconf.dimGrid(),GPUconf.dimBlock(),GPUconf.shared(),GPUconf.stream()>>>(dim_sub, dh_tot.ptr(), dh_tot.LD());
286 // std::cerr << "isnan_kernel(before, dloc_tot)" << std::endl;
287 // isnan_kernelMatrixElementsInSector<<<GPUconf.dimGrid(),GPUconf.dimBlock(),GPUconf.shared(),GPUconf.stream()>>>(dim_sub, dloc_tot.ptr(), dloc_tot.LD());
288 temp_t = getETtime();
289 {
290 debug_print("# Calculating eigenstate expectation values.");
291 info = EigenMatrixElements(eigenEnergy, dim_sub, dh_tot, dloc_tot, GPUconf);
292 if(info != 0) {
294 continue;
295 }
296 #ifdef GPU
297 magma_getvector(dim_sub, sizeof(Complex_t), dloc_tot.ptr(), dloc_tot.LD()+1, &*tempVector.begin(), 1, queue);
298 for(size_t j = 0;j < dim_sub; ++j) EXPvalue[j] = (double)real( tempVector.at(j) );
299 #else
300 for(size_t j = 0;j < dim_sub; ++j) EXPvalue[j] = real(dloc_tot.at(j,j));
301 #endif
302
303 EnergyRange = eigenEnergy[dim_sub-1] - eigenEnergy[0];
304 gE = eigenEnergy[0];
305 #pragma omp parallel for
306 for(size_t j = 0;j < dim_sub; ++j) {
307 eigenEnergy[j] = ( eigenEnergy[j] -gE )/EnergyRange;
308 energyExpValue[j] = ( energyExpValue[j] -gE )/EnergyRange;
309 energyStddev[j] = energyStddev[j] /EnergyRange;
310 }
311 }
312 T_diag += getETtime() -temp_t;
313 // std::cerr << "isnan_kernel(after, dh_tot)" << std::endl;
314 // isnan_kernelMatrixElementsInSector<<<GPUconf.dimGrid(),GPUconf.dimBlock(),GPUconf.shared(),GPUconf.stream()>>>(dim_sub, dh_tot.ptr(), dh_tot.LD());
315 // std::cerr << "isnan_kernel(after, dloc_tot)" << std::endl;
316 // isnan_kernelMatrixElementsInSector<<<GPUconf.dimGrid(),GPUconf.dimBlock(),GPUconf.shared(),GPUconf.stream()>>>(dim_sub, dloc_tot.ptr(), dloc_tot.LD());
317 // magma_queue_sync(queue);
318
319 // print(energyExpValue, dim_sub);
320 // print(energyStddev, dim_sub);
321
322 Index.resize(dim_sub);
323 std::iota(Index.begin(), Index.end(), 0);
324 std::sort(Index.begin(), Index.end(), [&energyExpValue](size_t x, size_t y) {
325 return energyExpValue[x] < energyExpValue[y];
326 });
327
328 temp_t = getETtime();
329 { debug_print("# Writing results to a file.");
330 std::stringstream buff("");
331 buff << "/FockStateEnergy" << PRECISION << "_N" << n << ".txt";
332 std::string filename(outDirName); filename += buff.str();
333 OutFs.open(filename); checkIsFileOpen(OutFs, filename);
334 OutFs << std::right << std::showpos << std::scientific << std::setprecision(6);
335 OutFs << "# EnergyRange= " << EnergyRange << "\n"
336 << "# gE= " << gE << "\n"
337 << "# 1.(State No.) 2.(Normalized energy) 3.(Energy) 4.(Normalized Energy Stddev)" << "\n\n";
338 for(size_t j = 0;j < dim_sub; ++j) {
339 info = Index.at(j);
340 OutFs << info << " "
341 << energyExpValue[info] << " "
342 << energyExpValue[info]*EnergyRange + gE << " "
343 << energyStddev[info] << std::endl;
344 }
345 OutFs.close();
346 }
347 T_post += getETtime() -temp_t;
348
349 Index.resize(0);
350 for(size_t j = 0;j < dim_sub; ++j) {
351 // if(Emin < energyExpValue[j] && energyExpValue[j] < Emax) {
352 // Index.push_back(j);
353 // }
354 if(Emin < energyExpValue[j]-energyStddev[j] && energyExpValue[j]+energyStddev[j] < Emax) {
355 Index.push_back(j);
356 }
357 }
358 print(Index, Index.size());
359
360 #ifdef GPU
361 magma_setvector(Index.size(), sizeof(Integer_t), &*Index.begin(), 1, dIndex.ptr(), 1, queue);
362 magma_setvector(dim_sub, sizeof(double), &*eigenEnergy.begin(), 1, dEigenEnergy.ptr(), 1, queue);
363 magma_queue_sync(queue);
364 #endif
365 for(size_t p = 0;p < Nstep;++p) {
366 time = (Tmax*p)/(double)(Nstep-1);
367 #ifdef GPU
368 StatePreparation<<<GPUconf.dimGrid(),GPUconf.dimBlock(),GPUconf.shared(),GPUconf.stream()>>>(time, Index.size(), dIndex.ptr(), dim_sub, dState.ptr(), dState.LD(), dh_tot.ptr(), dh_tot.LD(), dEigenEnergy.ptr());
369 magma_queue_sync(queue);
370 #else
371 StatePreparation(time, Index, dim_sub, State, h_tot, eigenEnergy);
372 #endif
373 #ifdef GPU
374 matrixProduct_hemm(MagmaLeft, MagmaUpper, dim_sub, Index.size(), dloc_tot, dState, dComplexMatrix_temp1, queue);
375 matrixProduct_gemm(MagmaConjTrans, MagmaNoTrans, Index.size(), Index.size(), dim_sub, dState, dComplexMatrix_temp1, dComplexMatrix_temp2, queue);
376 magma_copyvector(Index.size(), sizeof(Complex_t), dComplexMatrix_temp2.ptr(), dComplexMatrix_temp2.LD()+1, dDynamics.ptr()+dDynamics.LD()*p, 1, queue);
377 #else
378 for(size_t j = 0; j < Index.size(); j++) {
379 Id = Index.at(j);
380 Dynamics.at(j,p) = ComplexOne<>*QuantumExpValue_he(dim_sub, State.begin()+State.LD()*j, dloc_tot, ComplexVector_temp);
381 }
382 #endif
383 }
384 #ifdef GPU
385 // magma_cprint_gpu(Index.size(), Nstep, dDynamics.ptr(), dDynamics.LD(), queue);
386 magma_getmatrix(Index.size(), Nstep, sizeof(Complex_t), dDynamics.ptr(), dDynamics.LD(), &*Dynamics.begin(), Dynamics.LD(), queue);
387 magma_queue_sync(queue);
388 #endif
389
390 // if( n==9 ) magma_cprint_gpu(dim_sub, dim_sub, dloc_tot.ptr(), dloc_tot.LD(), queue);
391 OpRange = SpectralRange(OpMin, dim_sub, dloc_tot);
392 for(size_t j = 0; j < Index.size(); j++) {
393 Id = Index.at(j);
394 shellWidth = dE;
395 MCAverage = MicroCanonicalAverage(NdataInShell, energyExpValue[Id], shellWidth, dim_sub, eigenEnergy, EXPvalue);
396 for(size_t k = 1; isnan(MCAverage); ++k) {
397 shellWidth = k*energyStddev[Id];
398 MCAverage = MicroCanonicalAverage(NdataInShell, energyExpValue[Id], shellWidth, dim_sub, eigenEnergy, EXPvalue);
399 }
400
401
402 debug_print("# Writing results to a file.");
403 std::stringstream buff("");
404 buff << "/N" << n << "_" << PRECISION << "_StateNo" << Id << ".txt";
405 filesystem::create_directories(outDirName+"/Dynamics");
406 std::string filename(outDirName+"/Dynamics"); filename += buff.str();
407 OutFs.open(filename); checkIsFileOpen(OutFs, filename);
408 OutFs << std::right << std::showpos << std::scientific << std::setprecision(6);
409 OutFs << "# EnergyRange= " << EnergyRange << "\n"
410 << "# gE= " << gE << "\n"
411 << "# NormalizedEnergyEXPvalue= " << energyExpValue[Id] << "\n"
412 << "# NormalizedEnergyStddev= " << energyStddev[Id] << "\n"
413 << "# OpRange= " << OpRange << "\n"
414 << "# OpMin= " << OpMin << "\n"
415 << "# NormalizedMCAverage= " << (MCAverage -OpMin)/OpRange << "\n"
416 << "# shellWidth= " << shellWidth << "\n"
417 << "# NdataInShell= " << NdataInShell << "\n"
418 << "# 1.(time) 2.(EXPvalue) 3.(Normalized EXPvalue) 4.(Cumulative of Normalized EXPvalue)" << "\n\n";
419 sum = 0;
420 for(size_t p = 0;p < Nstep;++p) {
421 sum += real(Dynamics.at(j,p));
422 time = (Tmax*p)/(double)(Nstep-1);
423 OutFs << time << " "
424 << real(Dynamics.at(j,p)) << " "
425 << ( real(Dynamics.at(j,p)) -OpMin) / OpRange << " "
426 << (sum/(double)(p+1) -OpMin) / OpRange
427 << std::endl;
428 }
429 OutFs.close();
430 }
431
432
433
434 }
435 if(repetition%10 == 9) {
436 t_int = end; end = getETtime();
437 std::cerr << "(total=" << std::setw(6) << repetition+1
438 << "): timeINT=" << std::setprecision(6) << std::setw(8) << (end-t_int)
439 << ", timeTOT=" << std::setprecision(6) << std::setw(8) << (end-start)
440 << ", T_construct=" << std::setprecision(6) << std::setw(10) << T_pre << "(" << std::setprecision(1) << 100*T_pre /(end-start) << "%)"
441 << ", T_diag=" << std::setprecision(6) << std::setw(8) << T_diag << "(" << std::setprecision(1) << 100*T_diag/(end-start) << "%)"
442 << ", T_process=" << std::setprecision(6) << std::setw(8) << T_post << "(" << std::setprecision(1) << 100*T_post/(end-start) << "%)"
443 << std::endl;
444 }
445 }
446
447 Finalize(argc, argv);
448 #ifdef GPU
449 magma_finalize();
450 #endif
451 return 0;
452}
double getETtime()
Definition EnergySpectrum.c:14
void __global__ StatePreparation(double time, Integer_t Nstates, Integer_t *dIndex, Integer_t dim_sub, Complex_t *dState, Integer_t LDS, Complex_t *dh_tot, Integer_t LDH, double *dEigenEnergy)
Definition TimeEvolution.cpp:89
void EnergyStddev(std::vector< double > &res, matrix_gpu< Complex_t > const &Hamiltonian_d, TransSector const &Sector, GPUconfig const &GPUconf)
Definition TimeEvolution.cpp:70
Definition mytypes.hpp:147
Calculate the microcanonical averages with respect to a given sorted vector 'eigVal'.
Definition MicroCanonicalAverage.hpp:25
bool checkIsFileOpen(std::ifstream &file, std::string const &filename)
Definition file_util.hpp:22
debug_print("# Determining GPU configuration.")
Integer_t const num_op
Definition setVariablesForEnsemble.cpp:39
baseDirName
Definition setVariablesForEnsemble.cpp:50
constexpr int Nargs_base
Definition setVariablesForEnsemble.cpp:13
Integer_t const num_h
Definition setVariablesForEnsemble.cpp:38
Integer_t const repMin
Definition setVariablesForEnsemble.cpp:31
Integer_t const n_min
Definition setVariablesForEnsemble.cpp:28
Integer_t const repMax
Definition setVariablesForEnsemble.cpp:32
Integer_t const dloc_op
Definition setVariablesForEnsemble.cpp:41
Integer_t const dloc_h
Definition setVariablesForEnsemble.cpp:40
Integer_t const n_max
Definition setVariablesForEnsemble.cpp:27
constexpr double Emin
Definition setVariablesForMCAverage.cpp:4
constexpr double Emax
Definition setVariablesForMCAverage.cpp:5
double const dE
Definition setVariablesForMCAverage.cpp:2
double SpectralRange(double &OpMin, Integer_t const dim, matrix< Complex_t< double > > &Operator)
Definition statmech.cpp:49
Integer_t EigenMatrixElements(std::vector< double > &Eigenvalue, Integer_t const dim, matrix< Complex_t< double > > &Hamiltonian, matrix< Complex_t< double > > &Operator, void *GPUconf)
Definition statmech.cpp:89
std::stringstream buff("")

◆ StatePreparation() [1/2]

void __global__ StatePreparation ( double  time,
Integer_t  Nstates,
Integer_t dIndex,
Integer_t  dim_sub,
Complex_t dState,
Integer_t  LDS,
Complex_t dh_tot,
Integer_t  LDH,
double *  dEigenEnergy 
)
89 {
90 int const idx = blockIdx.x*blockDim.x +threadIdx.x;
91 int const idy = blockIdx.y*blockDim.y +threadIdx.y;
92 if(idx>=dim_sub || idy>=Nstates) return;
93 Integer_t stateId = dIndex[idy];
94 Real_t phase = -dEigenEnergy[idx]*time;
95 dState[idx+LDS*idy] = conj(dh_tot[stateId+LDH*idx])*MAGMA_CEXP(phase);
96 }
__device__ Complex_t< double > MAGMA_CEXP(Real_t theta)
Definition generateRM.cuh:132

◆ StatePreparation() [2/2]

void StatePreparation ( double  time,
std::vector< Integer_t > &  Index,
Integer_t  dim_sub,
matrix< Complex_t > &  State,
matrix< Complex_t > &  h_tot,
std::vector< double > &  eigenEnergy 
)
111 {
112 #pragma omp parallel for
113 for(size_t k = 0; k < dim_sub; k++) {
114 Complex_t phase = -ComplexI<>*eigenEnergy[k]*time;
115 for(size_t j = 0; j < Index.size(); j++) {
116 State.at(k,j) = conj(h_tot.at( Index[j] ,k))*std::exp(phase);
117 }
118 }
119 }

Variable Documentation

◆ filesystem

namespace filesystem = std::experimental::filesystem