English 中文(简体)
Opencl: GPU 执行时间总是为零
原标题:Opencl: GPU Execution Time is always Zero

我正在试图打印 GPU 上某些函数的执行时间。 但 GPU 上的计时总是转换为 0 。 另外, 当我选择 CL_ DEVICE_ TYPE_ CPU 时, 以下的计算效果很好 。

 errcode = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_CPU, 1, &device_id, &ret_num_devices);

此操作精细, 显示执行时间的非零值, 但如果我选择 CL_ DEVICE_ TYPE_ GPU, 那么它总是显示 0, 不论数据点和线条的总数是多少 。 请注意, 在这两种情况下( CL_ DEVICE_ TYPE_ CPU 和 CL_ DEVICE_ TYPE_ GPU), 我打印执行时间都是一样的 。 这是我的主机代码, 我的内核代码在两种情况下都是一样的( 开立 CLU是什么! ) 。 以下是代码部分 :

  // openCL code to get platform and device ids
errcode = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
  errcode = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_GPU, 1, &device_id, &ret_num_devices);

// to create context

   clGPUContext = clCreateContext( NULL, 1, &device_id, NULL, NULL, &errcode);
   //Create a command-queue
   clCommandQue = clCreateCommandQueue(clGPUContext, 
              device_id, CL_QUEUE_PROFILING_ENABLE, &errcode);

// Setup device memory
   d_instances= clCreateBuffer(clGPUContext,CL_MEM_READ_ONLY |    
  CL_MEM_COPY_HOST_PTR,mem_size_i,instances->data, &errcode);
  d_centroids = clCreateBuffer(clGPUContext,CL_MEM_READ_WRITE,mem_size_c, NULL, &errcode);
  d_distance = clCreateBuffer(clGPUContext,CL_MEM_READ_WRITE,mem_size_d,NULL, &errcode);
// d_dist_X = clCreateBuffer(clGPUContext,CL_MEM_READ_WRITE,mem_size4,NULL, &errcode);
//d_dist_Y = clCreateBuffer(clGPUContext,CL_MEM_READ_WRITE,mem_size4,NULL, &errcode);

//to build program
clProgram = clCreateProgramWithSource(clGPUContext,1, (const char **)&source_str,(const 
  size_t*)&source_size, &errcode);

  errcode = clBuildProgram(clProgram, 0,NULL, NULL, NULL, NULL);

  if (errcode == CL_BUILD_PROGRAM_FAILURE) 
{
    // Determine the size of the log
    size_t log_size;
    clGetProgramBuildInfo(clProgram, device_id, CL_PROGRAM_BUILD_LOG, 0, NULL, 
  &log_size);

    // Allocate memory for the log
    char *log = (char *) malloc(log_size);

    // Get the log
    clGetProgramBuildInfo(clProgram, device_id, CL_PROGRAM_BUILD_LOG, log_size, log, 

 NULL);

    // Print the log
    printf("%s
", log);
}
clKernel = clCreateKernel(clProgram,"distance_finding", &errcode);

// Launch OpenCL kernel
size_t localWorkSize[1], globalWorkSize[1];
if(num_instances >= 500)
{
    localWorkSize[0] = 500;
    float block1=num_instances/localWorkSize[0];
    int block= (int)(ceil(block1));
    globalWorkSize[0] = block*localWorkSize[0];
}
else
{
    localWorkSize[0]=num_instances;
    globalWorkSize[0]=num_instances;
}

int iteration=1;
while(iteration < MAX_ITERATIONS)
{
    errcode = clEnqueueWriteBuffer(clCommandQue,d_centroids , CL_TRUE, 0, 
 mem_size_c, (void*)centroids->data, 0, NULL, NULL);
    errcode = clEnqueueWriteBuffer(clCommandQue,d_distance , CL_TRUE, 0, mem_size_d, 

 (void*)distance->data, 0, NULL, NULL);

    //set kernel arguments
    errcode = clSetKernelArg(clKernel, 0,sizeof(cl_mem), (void *)&d_instances);
    errcode = clSetKernelArg(clKernel, 1,sizeof(cl_mem), (void *)&d_centroids);
    errcode = clSetKernelArg(clKernel, 2,sizeof(cl_mem), (void *)&d_distance);
    errcode = clSetKernelArg(clKernel, 3,sizeof(unsigned int), (void *)

  &num_instances);
    errcode = clSetKernelArg(clKernel,4,sizeof(unsigned int),(void *)&clusters);
    errcode = clSetKernelArg(clKernel,5,sizeof(unsigned int),(void *)&dimensions);

    errcode = clEnqueueNDRangeKernel(clCommandQue,clKernel, 1, NULL, 
  globalWorkSize,localWorkSize, 0, NULL, &myEvent);

    clFinish(clCommandQue); // wait for all events to finish
    clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_START,sizeof(cl_ulong), 

 &startTime, NULL);
    clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END,sizeof(cl_ulong), 

 &endTime, NULL);
    kernelExecTimeNs = endTime-startTime;
    gpu_time+= kernelExecTimeNs;

    // Retrieve result from device
    errcode = clEnqueueReadBuffer(clCommandQue,d_distance, CL_TRUE, 0, 
 mem_size_d,distance->data, 0, NULL, NULL);

以毫秒打印时间

printf("

 Time taken by GPU is %llu ms",gpu_time/1000000);

如果我计算 GPU 时间的方式是错误的, 为什么它会使用 CPU (通过更改 CL_ DEVICE_ TYPE_ CPU)? 这里有什么问题?

已编辑 :

<强度 > 系统信息

AMD APP SDK 2.4 AMD ATI FirePro GL 3D, having 800 cores

< 坚固 > Kerenel < /坚 >

 #pragma OPENCL EXTENSION cl_khr_fp64:enable
double distance_cal(__local float* cent,float* data,int dimensions)
{
float dist1=0.00;
for(int i=0;i<dimensions;i++)
    dist1 += ((data[i]-cent[i]) * (data[i]-cent[i]));
double sq_dist=sqrt(dist1);
return sq_dist;
}
void fetch_col(float* data,__constant float* x,int col,int dimension,int len)
{
//hari[i]=8;
for(int i=0;i<dimension;i++)
{
data[i]=x[col];
    col=col+len;
}
}
void fetch_col_cen(__local float* data,__global float* x,int col,int dimension,int len)
{
//hari[i]=8;
for(int i=0;i<dimension;i++)
{
data[i]=x[col];
    col=col+len;
}
}


 __kernel void distance_finding(__constant float* data,__global float* cen,__global float* 
 dist,int       inst,int clus,const int dimensions)
  {
int idx=get_global_id(0);
float data_col[4];
fetch_col(  data_col,data,idx,dimensions,inst);

for(int i=0;i<clus;i++)
{
    int k=i*inst; // take each dimension value for each cluster data

    __local float cent[4];
    barrier(CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE);
    fetch_col_cen(cent,cen,i,dimensions,clus);

    dist[idx+k]=distance_cal(cent,data_col,dimensions);// calculate distance wrt     
 each data n each centroid

}

}
最佳回答

clenqueueNDRangeKernel () 是非同步的, 如果它使用 GPU, 因此您只能看到它输入请求所需的时间, 而不是执行请求的时间 。

但通常我会写 C+++ 代码来做计时, 并将 < code> start_time 放在指令前, 并且将 < code> end_time 放在指令后

clFinish(cmd_queue); 

就像你对 C++ 时间代码所做的那样, 这将是一个很好的测试, 如果您确定您的 GPU 不应该用 < code> 0 秒完成 。

问题回答

一个容易的检查方法就是在内核内引入异常长的操作。 如果当实际执行中出现明显滞后时,该操作显示为零, 您就会得到答案 。

也就是说,我