天天看点

cuda练习(二):灰度统计直方图编写代码测试源码

编写代码

首先将上次的转灰度图的程序拷过来用于生成灰度图

共编写了cpu、gpu_wrong_naive、gpu_naive、gpu_usesharemem四种方式实现

cpu版本

cpu版本代码很简单:

void getGrayHistincpu(unsigned char * const grayData, 
                    unsigned int * const hist,
                    uint imgheight,
                    uint imgwidth)
{
    for(int i = 0; i < imgheight; i++)
    {
        for (int j = 0; j < imgwidth; j++)
        {
            hist[grayData[i*imgwidth+j]]++;
        }
    }
}
           

gpu版本1——直接照搬 gpu_wrong_naive

__global__ void getGrayHistincuda_wrong_naive(unsigned char * const grayData, 
                                unsigned int * const hist,
                                uint imgheight,
                                uint imgwidth)  //会发生冲突,数值每次会变化
{
    const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
    const unsigned int idy = blockDim.y * blockIdx.y + threadIdx.y;    

    if(idx < imgwidth && idy < imgheight)
    {
        const unsigned long pid = imgwidth * idy + idx;
        const unsigned char value = grayData[pid];
        hist[value]++;
    }
}
           

这个代码有问题,因为各个线程会同时访问同一块全局内存,数值会不正确

gpu版本2——原子操作 gpu_naive

__global__ void getGrayHistincuda_naive(unsigned char * const grayData, 
                                            unsigned int * const hist,
                                            uint imgheight,
                                            uint imgwidth)  //使用原子操作保证数值正确
{
    const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
    const unsigned int idy = blockDim.y * blockIdx.y + threadIdx.y;    

    if(idx < imgwidth && idy < imgheight)
    {
        const unsigned long pid = imgwidth * idy + idx;
        const unsigned char value = grayData[pid];
        atomicAdd(&(hist[value]), 1);
    }
}
           

这个代码结果是正确的,但是因为有很多线程(我这里设置的是一个像素交由一个线程处理)相互竞争全局内存,速度并不快

gpu版本3——共享内存 gpu_usesharemem

__global__ void getGrayHistincuda_usesharemem(unsigned char * const grayData, 
                                            unsigned int * const hist,
                                            uint imgheight,
                                            uint imgwidth)  //使用共享内存加速
{
    __shared__ unsigned int hist_shared[256];   //共享内存仅在线程块内共享
    const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
    const unsigned int idy = blockDim.y * blockIdx.y + threadIdx.y;  
    const unsigned char inner_idx = threadIdx.y * blockDim.x + threadIdx.x;

    hist_shared[inner_idx%256] = 0;   //清空数据,由于每个块的inner_idx可以超过256,所以这样可以保证hist_shared被全部清零

    __syncthreads();    //等待其他线程完成

    if(idx < imgwidth && idy < imgheight)
    {
        const unsigned long pid = imgwidth * idy + idx;
        const unsigned char value = grayData[pid];
        atomicAdd(&(hist_shared[value]), 1);
    }

    __syncthreads();

    if(threadIdx.y < 8) //每个线程块将自己共享内存中的值合并到全局内存中去
    {
        atomicAdd(&(hist[inner_idx]), hist_shared[inner_idx]);
    }

}
           

这个程序比上一个速度更快,加速的原因有二:

  • 使用了更快的共享内存
  • 共享内存由线程块独占,因此各个线程块在写入共享内存时,不会与其他线程块冲突;另外,在合并共享内存时,也减少了冲突

测试

正确性

以cpu代码做参考,只有gpu_wrong_naive是错的,原因是没有进行原子操作加法

速度

方法 时间
cpu 0.00069200
gpu_wrong_naive 0.00013200
gpu_naive 0.00021600
gpu_use_share_mem 0.00011300

可以看到,使用共享内存确实可以加速

源码

#include <iostream>
#include <time.h>
#include "opencv2/highgui.hpp"
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace std;

Mat rgb2gray(Mat& srcImage);

void getGrayHistincpu(unsigned char * const grayData, 
                    unsigned int * const hist,
                    uint imgheight,
                    uint imgwidth)
{
    for(int i = 0; i < imgheight; i++)
    {
        for (int j = 0; j < imgwidth; j++)
        {
            hist[grayData[i*imgwidth+j]]++;
        }
    }
}

__global__ void getGrayHistincuda_wrong_naive(unsigned char * const grayData, 
                                unsigned int * const hist,
                                uint imgheight,
                                uint imgwidth)  //会发生冲突,数值每次会变化
{
    const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
    const unsigned int idy = blockDim.y * blockIdx.y + threadIdx.y;    

    if(idx < imgwidth && idy < imgheight)
    {
        const unsigned long pid = imgwidth * idy + idx;
        const unsigned char value = grayData[pid];
        hist[value]++;
    }
}

__global__ void getGrayHistincuda_naive(unsigned char * const grayData, 
                                            unsigned int * const hist,
                                            uint imgheight,
                                            uint imgwidth)  //使用原子操作保证数值正确
{
    const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
    const unsigned int idy = blockDim.y * blockIdx.y + threadIdx.y;    

    if(idx < imgwidth && idy < imgheight)
    {
        const unsigned long pid = imgwidth * idy + idx;
        const unsigned char value = grayData[pid];
        atomicAdd(&(hist[value]), 1);
    }
}

__global__ void getGrayHistincuda_usesharemem(unsigned char * const grayData, 
                                            unsigned int * const hist,
                                            uint imgheight,
                                            uint imgwidth)  //使用共享内存加速
{
    __shared__ unsigned int hist_shared[256];   //共享内存仅在线程块内共享
    const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
    const unsigned int idy = blockDim.y * blockIdx.y + threadIdx.y;  
    const unsigned char inner_idx = threadIdx.y * blockDim.x + threadIdx.x;

    hist_shared[inner_idx%256] = 0;   //清空数据,由于每个块的inner_idx可以超过256,所以这样可以保证hist_shared被全部清零

    __syncthreads();    //等待其他线程完成

    if(idx < imgwidth && idy < imgheight)
    {
        const unsigned long pid = imgwidth * idy + idx;
        const unsigned char value = grayData[pid];
        atomicAdd(&(hist_shared[value]), 1);
    }

    __syncthreads();

    if(threadIdx.y < 8) //每个线程块将自己共享内存中的值合并到全局内存中去
    {
        atomicAdd(&(hist[inner_idx]), hist_shared[inner_idx]);
    }

}

#define PHASE 3

int main(void)
{
    Mat srcImage = imread("./test.jpg");
    Mat grayImage = rgb2gray(srcImage);

    const uint imgheight = grayImage.rows;
    const uint imgwidth = grayImage.cols;

    dim3 threadsPerBlock(32, 32);
    dim3 blocksPerGrid((imgwidth + threadsPerBlock.x - 1)/threadsPerBlock.x, 
                        (imgheight + threadsPerBlock.y - 1)/threadsPerBlock.y);

    unsigned char *gpuGrayData;
    unsigned int *gpuGrayHist;

    unsigned int grayHist[256] = {0};

    cudaMalloc((void**)&gpuGrayData, imgwidth*imgheight*sizeof(unsigned char));
    cudaMalloc((void**)&gpuGrayHist, 256*(sizeof(unsigned int)));

    cudaMemcpy(gpuGrayData, grayImage.data, imgwidth*imgheight*sizeof(unsigned char), cudaMemcpyHostToDevice);

    clock_t start, end;

#if PHASE == 0

    start = clock();
    getGrayHistincpu(grayImage.data, grayHist, imgheight, imgwidth);
    end = clock();
    for(int i = 0; i < 256; i++) printf("(%d, %d)\n", i, grayHist[i]);
    printf("cuda_wrong_naive run time %.8f\n", (float)(end-start)/CLOCKS_PER_SEC);


#elif PHASE == 1

    memset(grayHist, 0, 256*sizeof(unsigned int));
    cudaMemcpy(gpuGrayHist, grayHist, 256*sizeof(unsigned int), cudaMemcpyHostToDevice);   //清零gpu内存
    start = clock();
    getGrayHistincuda_wrong_naive<< <blocksPerGrid, threadsPerBlock>> >(gpuGrayData, gpuGrayHist,
                                                                        imgheight, imgwidth);
    cudaDeviceSynchronize();
    end = clock();
    cudaMemcpy(grayHist, gpuGrayHist, 256*sizeof(unsigned int), cudaMemcpyDeviceToHost);
    for(int i = 0; i < 256; i++) printf("(%d, %d)\n", i, grayHist[i]);
    printf("cuda_wrong_naive run time %.8f\n", (float)(end-start)/CLOCKS_PER_SEC);

#elif PHASE == 2

    memset(grayHist, 0, 256*sizeof(unsigned int));
    cudaMemcpy(gpuGrayHist, grayHist, 256*sizeof(unsigned int), cudaMemcpyHostToDevice);   //清零gpu内存
    start = clock();
    getGrayHistincuda_naive<< <blocksPerGrid, threadsPerBlock>> >(gpuGrayData, gpuGrayHist, 
                                                                        imgheight, imgwidth);
    cudaDeviceSynchronize();
    end = clock();
    cudaMemcpy(grayHist, gpuGrayHist, 256*sizeof(unsigned int), cudaMemcpyDeviceToHost);
    for(int i = 0; i < 256; i++) printf("(%d, %d)\n", i, grayHist[i]);
    printf("cuda_naive run time %.8f\n", (float)(end-start)/CLOCKS_PER_SEC);

#elif PHASE == 3

    memset(grayHist, 0, 256*sizeof(unsigned int));
    cudaMemcpy(gpuGrayHist, grayHist, 256*sizeof(unsigned int), cudaMemcpyHostToDevice);   //清零gpu内存
    start = clock();
    getGrayHistincuda_usesharemem<< <blocksPerGrid, threadsPerBlock>> >(gpuGrayData, gpuGrayHist, 
                                                                        imgheight, imgwidth);
    cudaDeviceSynchronize();
    end = clock();
    cudaMemcpy(grayHist, gpuGrayHist, 256*sizeof(unsigned int), cudaMemcpyDeviceToHost);
    for(int i = 0; i < 256; i++) printf("(%d, %d)\n", i, grayHist[i]);
    printf("cuda_naive run time %.8f\n", (float)(end-start)/CLOCKS_PER_SEC);
    
#endif

    cudaFree(gpuGrayData);
    cudaFree(gpuGrayHist);

    return 0;

}