46 {
48
49 Eigen::DenseBase<Derived1>& res = *const_cast< Eigen::DenseBase<Derived1>* >(resPtr);
50 Eigen::DenseBase<Derived2> const& dEigVal = *dEigValPtr;
51 Eigen::DenseBase<Derived3> const& dEigVec = *dEigVecPtr;
54 int const dimSpace = subSpacePtr->
dim();
55
56 int const opOrdinal = blockIdx.x + gridDim.x * blockIdx.y;
57 if(opOrdinal >= transEqDim) return;
58 #ifndef NDEBUG
59 if(opOrdinal == 0 && threadIdx.x == 0)
60 printf("%s\n\t transEqDim=%d, dimSpace=%d, dWorkPtr->rows()=%d\n", __PRETTY_FUNCTION__,
61 int(transEqDim), int(dimSpace), int(dWorkPtr->rows()));
62 #endif
63
64 extern __shared__ int sharedMem[];
65 Real* expValPtr = reinterpret_cast<Real*>(sharedMem);
66 Eigen::Map< Eigen::VectorX<Real> > expVal(expValPtr, dimSpace);
67 Real* eigValPtr = expValPtr + expVal.size();
68 int configStorage[14];
69 Eigen::Map< Eigen::VectorX<int> > config(&configStorage[0], dmBodyOpSpace.
sysSize());
70
71 for(int j = threadIdx.x; j < dimSpace; j += blockDim.x) { expVal[j] = 0; }
72 __syncthreads();
73
74 int* outStatePtr = reinterpret_cast<int*>(expValPtr + expVal.size());
75 Scalar* coeffPtr =
reinterpret_cast<Scalar*
>(outStatePtr + blockDim.x);
76 for(
int j = 0; j != round_up(subSpacePtr->
totalSpace().dim(), blockDim.x) / blockDim.x; ++j) {
77 int const inState = threadIdx.x + j * blockDim.x;
78 if(inState < subSpacePtr->totalSpace().dim()) {
79 dmBodyOpSpace.
action(outStatePtr[threadIdx.x], coeffPtr[threadIdx.x],
80 transEqClassRep[opOrdinal], inState, config);
81 coeffPtr[threadIdx.x] = adjointBasis.
valuePtr()[outStatePtr[threadIdx.x]]
82 * coeffPtr[threadIdx.x]
83 * conj(adjointBasis.
valuePtr()[inState]);
84 }
85 __syncthreads();
86
87 for(int k = 0; k != blockDim.x; ++k) {
88 int const inState = k + j * blockDim.x;
89 if(inState >= subSpacePtr->
totalSpace().dim())
continue;
92 Scalar const coeff = coeffPtr[k];
93 for(int expvalId = threadIdx.x; expvalId < dimSpace; expvalId += blockDim.x) {
94 atomicAdd(&expVal[expvalId],
95 real(conj(dEigVec(id2, expvalId)) * coeff * dEigVec(id1, expvalId)));
96 }
97 }
98 __syncthreads();
99 }
100
101 int const nEigVals
103 for(int j = threadIdx.x; j < nEigVals * dimSpace; j += blockDim.x) {
104 *(eigValPtr + j) = dEigVal(j % dimSpace);
105 }
106 __syncthreads();
107
108 int const k = threadIdx.x % nEigVals;
109 Eigen::Map< Eigen::VectorX<Real> > eigVal(eigValPtr + k * dimSpace, dimSpace);
110 Real const opTransPeriod = transPeriod[opOrdinal];
111 for(int j = threadIdx.x; j < dimSpace; j += blockDim.x) {
112 int idMin = j, idMax = j;
113 for(idMin = j; idMin >= 0 && eigVal(j) - eigVal(idMin) <=
dE; --idMin) {};
114 ++idMin;
115 for(idMax = j; idMax < dimSpace && eigVal(idMax) - eigVal(j) <=
dE; ++idMax) {};
116 --idMax;
117 assert(idMin >= 0);
118 assert(idMax < dimSpace);
119 assert(idMin <= idMax);
120
121 Real MCave = 0;
122 for(int k = idMin; k != idMax + 1; ++k) MCave += expVal[k];
123 MCave /= static_cast<Real>(idMax - idMin + 1);
124 Real const diff = opTransPeriod * (expVal[j] - MCave) * (expVal[j] - MCave);
125 atomicAdd(&res(j,
get_smid()), diff);
126 }
127}
__device__ unsigned dynamic_smem_size()
Definition ETHmeasure.hpp:25
__device__ unsigned get_smid(void)
Definition ETHmeasure.hpp:30
Definition mytypes.hpp:147
__host__ __device__ int dim() const
Definition HilbertSpace.hpp:34
Definition OperatorSpace.hpp:213
__host__ __device__ int sysSize() const
Definition HilbertSpace.hpp:265
__host__ __device__ void action(int &resBasisNum, Complex_t< RealType > &coeff, int opNum, int basisNum) const
Definition OperatorSpace.hpp:83
Definition MatrixUtils.cuh:280
__host__ __device__ int * innerIndexPtr() const
Definition MatrixUtils.cuh:421
__host__ __device__ Scalar_t * valuePtr() const
Definition MatrixUtils.cuh:422
typename Eigen::NumTraits< ScalarType_ >::Real Real
Definition HilbertSpace.hpp:572
__host__ __device__ TotalSpace const & totalSpace() const
Definition HilbertSpace.hpp:671
double const dE
Definition setVariablesForMCAverage.cpp:2