Actual source code: bvorthogcuda.cu

slepc-3.18.3 2023-03-24
Report Typos and Errors
  1: /*
  2:    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  3:    SLEPc - Scalable Library for Eigenvalue Problem Computations
  4:    Copyright (c) 2002-, Universitat Politecnica de Valencia, Spain

  6:    This file is part of SLEPc.
  7:    SLEPc is distributed under a 2-clause BSD license (see LICENSE).
  8:    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  9: */
 10: /*
 11:    BV orthogonalization routines (CUDA)
 12: */

 14: #include <slepc/private/bvimpl.h>
 15: #include <slepcblaslapack.h>
 16: #include <slepccublas.h>

 18: /*
 19:    BV_CleanCoefficients_CUDA - Sets to zero all entries of column j of the bv buffer
 20: */
 21: PetscErrorCode BV_CleanCoefficients_CUDA(BV bv,PetscInt j,PetscScalar *h)
 22: {
 23:   PetscScalar    *d_hh,*d_a;
 24:   PetscInt       i;

 26:   if (!h) {
 27:     VecCUDAGetArray(bv->buffer,&d_a);
 28:     PetscLogGpuTimeBegin();
 29:     d_hh = d_a + j*(bv->nc+bv->m);
 30:     cudaMemset(d_hh,0,(bv->nc+j)*sizeof(PetscScalar));
 31:     PetscLogGpuTimeEnd();
 32:     VecCUDARestoreArray(bv->buffer,&d_a);
 33:   } else { /* cpu memory */
 34:     for (i=0;i<bv->nc+j;i++) h[i] = 0.0;
 35:   }
 36:   return 0;
 37: }

 39: /*
 40:    BV_AddCoefficients_CUDA - Add the contents of the scratch (0-th column) of the bv buffer
 41:    into column j of the bv buffer
 42:  */
 43: PetscErrorCode BV_AddCoefficients_CUDA(BV bv,PetscInt j,PetscScalar *h,PetscScalar *c)
 44: {
 45:   PetscScalar    *d_h,*d_c,sone=1.0;
 46:   PetscInt       i;
 47:   PetscCuBLASInt idx=0,one=1;
 48:   cublasHandle_t cublasv2handle;

 50:   if (!h) {
 51:     PetscCUBLASGetHandle(&cublasv2handle);
 52:     VecCUDAGetArray(bv->buffer,&d_c);
 53:     d_h = d_c + j*(bv->nc+bv->m);
 54:     PetscCuBLASIntCast(bv->nc+j,&idx);
 55:     PetscLogGpuTimeBegin();
 56:     cublasXaxpy(cublasv2handle,idx,&sone,d_c,one,d_h,one);
 57:     PetscLogGpuTimeEnd();
 58:     PetscLogGpuFlops(1.0*(bv->nc+j));
 59:     VecCUDARestoreArray(bv->buffer,&d_c);
 60:   } else { /* cpu memory */
 61:     for (i=0;i<bv->nc+j;i++) h[i] += c[i];
 62:     PetscLogFlops(1.0*(bv->nc+j));
 63:   }
 64:   return 0;
 65: }

 67: /*
 68:    BV_SetValue_CUDA - Sets value in row j (counted after the constraints) of column k
 69:    of the coefficients array
 70: */
 71: PetscErrorCode BV_SetValue_CUDA(BV bv,PetscInt j,PetscInt k,PetscScalar *h,PetscScalar value)
 72: {
 73:   PetscScalar    *d_h,*a;

 75:   if (!h) {
 76:     VecCUDAGetArray(bv->buffer,&a);
 77:     PetscLogGpuTimeBegin();
 78:     d_h = a + k*(bv->nc+bv->m) + bv->nc+j;
 79:     cudaMemcpy(d_h,&value,sizeof(PetscScalar),cudaMemcpyHostToDevice);
 80:     PetscLogCpuToGpu(sizeof(PetscScalar));
 81:     PetscLogGpuTimeEnd();
 82:     VecCUDARestoreArray(bv->buffer,&a);
 83:   } else { /* cpu memory */
 84:     h[bv->nc+j] = value;
 85:   }
 86:   return 0;
 87: }

 89: /*
 90:    BV_SquareSum_CUDA - Returns the value h'*h, where h represents the contents of the
 91:    coefficients array (up to position j)
 92: */
 93: PetscErrorCode BV_SquareSum_CUDA(BV bv,PetscInt j,PetscScalar *h,PetscReal *sum)
 94: {
 95:   const PetscScalar *d_h;
 96:   PetscScalar       dot;
 97:   PetscInt          i;
 98:   PetscCuBLASInt    idx=0,one=1;
 99:   cublasHandle_t    cublasv2handle;

101:   if (!h) {
102:     PetscCUBLASGetHandle(&cublasv2handle);
103:     VecCUDAGetArrayRead(bv->buffer,&d_h);
104:     PetscCuBLASIntCast(bv->nc+j,&idx);
105:     PetscLogGpuTimeBegin();
106:     cublasXdotc(cublasv2handle,idx,d_h,one,d_h,one,&dot);
107:     PetscLogGpuTimeEnd();
108:     PetscLogGpuFlops(2.0*(bv->nc+j));
109:     *sum = PetscRealPart(dot);
110:     VecCUDARestoreArrayRead(bv->buffer,&d_h);
111:   } else { /* cpu memory */
112:     *sum = 0.0;
113:     for (i=0;i<bv->nc+j;i++) *sum += PetscRealPart(h[i]*PetscConj(h[i]));
114:     PetscLogFlops(2.0*(bv->nc+j));
115:   }
116:   return 0;
117: }

119: /* pointwise multiplication */
120: __global__ void PointwiseMult_kernel(PetscInt xcount,PetscScalar *a,const PetscScalar *b,PetscInt n)
121: {
122:   PetscInt x;

124:   x = xcount*gridDim.x*blockDim.x+blockIdx.x*blockDim.x+threadIdx.x;
125:   if (x<n) a[x] *= PetscRealPart(b[x]);
126: }

128: /* pointwise division */
129: __global__ void PointwiseDiv_kernel(PetscInt xcount,PetscScalar *a,const PetscScalar *b,PetscInt n)
130: {
131:   PetscInt x;

133:   x = xcount*gridDim.x*blockDim.x+blockIdx.x*blockDim.x+threadIdx.x;
134:   if (x<n) a[x] /= PetscRealPart(b[x]);
135: }

137: /*
138:    BV_ApplySignature_CUDA - Computes the pointwise product h*omega, where h represents
139:    the contents of the coefficients array (up to position j) and omega is the signature;
140:    if inverse=TRUE then the operation is h/omega
141: */
142: PetscErrorCode BV_ApplySignature_CUDA(BV bv,PetscInt j,PetscScalar *h,PetscBool inverse)
143: {
144:   PetscScalar       *d_h;
145:   const PetscScalar *d_omega,*omega;
146:   PetscInt          i,xcount;
147:   dim3              blocks3d, threads3d;

149:   if (!(bv->nc+j)) return 0;
150:   if (!h) {
151:     VecCUDAGetArray(bv->buffer,&d_h);
152:     VecCUDAGetArrayRead(bv->omega,&d_omega);
153:     SlepcKernelSetGrid1D(bv->nc+j,&blocks3d,&threads3d,&xcount);
154:     PetscLogGpuTimeBegin();
155:     if (inverse) {
156:       for (i=0;i<xcount;i++) PointwiseDiv_kernel<<<blocks3d,threads3d>>>(i,d_h,d_omega,bv->nc+j);
157:     } else {
158:       for (i=0;i<xcount;i++) PointwiseMult_kernel<<<blocks3d,threads3d>>>(i,d_h,d_omega,bv->nc+j);
159:     }
160:     cudaGetLastError();
161:     PetscLogGpuTimeEnd();
162:     PetscLogGpuFlops(1.0*(bv->nc+j));
163:     VecCUDARestoreArrayRead(bv->omega,&d_omega);
164:     VecCUDARestoreArray(bv->buffer,&d_h);
165:   } else {
166:     VecGetArrayRead(bv->omega,&omega);
167:     if (inverse) for (i=0;i<bv->nc+j;i++) h[i] /= PetscRealPart(omega[i]);
168:     else for (i=0;i<bv->nc+j;i++) h[i] *= PetscRealPart(omega[i]);
169:     VecRestoreArrayRead(bv->omega,&omega);
170:     PetscLogFlops(1.0*(bv->nc+j));
171:   }
172:   return 0;
173: }

175: /*
176:    BV_SquareRoot_CUDA - Returns the square root of position j (counted after the constraints)
177:    of the coefficients array
178: */
179: PetscErrorCode BV_SquareRoot_CUDA(BV bv,PetscInt j,PetscScalar *h,PetscReal *beta)
180: {
181:   const PetscScalar *d_h;
182:   PetscScalar       hh;

184:   if (!h) {
185:     VecCUDAGetArrayRead(bv->buffer,&d_h);
186:     PetscLogGpuTimeBegin();
187:     cudaMemcpy(&hh,d_h+bv->nc+j,sizeof(PetscScalar),cudaMemcpyDeviceToHost);
188:     PetscLogGpuToCpu(sizeof(PetscScalar));
189:     PetscLogGpuTimeEnd();
190:     BV_SafeSqrt(bv,hh,beta);
191:     VecCUDARestoreArrayRead(bv->buffer,&d_h);
192:   } else BV_SafeSqrt(bv,h[bv->nc+j],beta);
193:   return 0;
194: }

196: /*
197:    BV_StoreCoefficients_CUDA - Copy the contents of the coefficients array to an array dest
198:    provided by the caller (only values from l to j are copied)
199: */
200: PetscErrorCode BV_StoreCoefficients_CUDA(BV bv,PetscInt j,PetscScalar *h,PetscScalar *dest)
201: {
202:   const PetscScalar *d_h,*d_a;
203:   PetscInt          i;

205:   if (!h) {
206:     VecCUDAGetArrayRead(bv->buffer,&d_a);
207:     PetscLogGpuTimeBegin();
208:     d_h = d_a + j*(bv->nc+bv->m)+bv->nc;
209:     cudaMemcpy(dest-bv->l,d_h,(j-bv->l)*sizeof(PetscScalar),cudaMemcpyDeviceToHost);
210:     PetscLogGpuToCpu((j-bv->l)*sizeof(PetscScalar));
211:     PetscLogGpuTimeEnd();
212:     VecCUDARestoreArrayRead(bv->buffer,&d_a);
213:   } else {
214:     for (i=bv->l;i<j;i++) dest[i-bv->l] = h[bv->nc+i];
215:   }
216:   return 0;
217: }