CUDA:所有矢量元素的级联求和

CUDA: cascaded summation of all vector elements

本文关键字:元素 级联 求和 CUDA      更新时间:2023-10-16

我已经为我的GPU和CPU上的浮点值的大矢量实现了一个级联加法函数。这仅仅意味着这个向量壳的所有元素都被总结成一个结果。CPU 算法非常简单且工作正常,但 GPU 算法始终与预期结果相差 35200。

算法的最小工作代码以及与 CPU 的比较如下。

输出始终是这样的:

CPU Time: 22.760059 ms, bandwidth: 3.514929 GB/s
GPU Time (improved): 12.077088 ms, bandwidth: 6.624114 GB/s
- CPU result does not match GPU result in improved atomic add.
   CPU: 10000000.000000, GPU: 10035200.000000, diff:-35200.000000

我用 cuda-memcheck 检查了它,但在那次运行中没有发生错误。我尝试了很多不同的东西,但没有一个奏效。如果不是由于浮点数据类型的不准确性,因为我将所有浮点数更改为整数并且仍然得到完全相同的结果。

这是我的代码:

#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <chrono>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
void reductionWithCudaImproved(float *result, const float *input);
__global__ void reductionKernelImproved(float *result, const float *input);
void reductionCPU(float *result, const float *input);
#define SIZE 10000000
#define TILE 32
#define ILP 8
#define BLOCK_X_IMPR (TILE / ILP)
#define BLOCK_Y_IMPR 32
#define BLOCK_COUNT_X_IMPR 100
int main()
{
    int i;
    float *input;
    float resultCPU, resultGPU;
    double cpuTime, cpuBandwidth;
    input = (float*)malloc(SIZE * sizeof(float));
    resultCPU = 0.0;
    resultGPU = 0.0;
    srand((int)time(NULL));
    auto start = std::chrono::high_resolution_clock::now();
    auto end = std::chrono::high_resolution_clock::now();
    for (i = 0; i < SIZE; i++)
        input[i] = 1.0;
    start = std::chrono::high_resolution_clock::now();
    reductionCPU(&resultCPU, input);
    end = std::chrono::high_resolution_clock::now();
    std::chrono::duration<double> diff = end - start;
    cpuTime = (diff.count() * 1000);
    cpuBandwidth = (sizeof(float) * SIZE * 2) / (cpuTime * 1000000);
    printf("CPU Time: %f ms, bandwidth: %f GB/snn", cpuTime, cpuBandwidth);
    reductionWithCudaImproved(&resultGPU, input);
    if (resultCPU != resultGPU)
        printf("- CPU result does not match GPU result in improved atomic add. CPU: %f, GPU: %f, diff:%fnn", resultCPU, resultGPU, (resultCPU - resultGPU));
    else
        printf("+ CPU result matches GPU result in improved atomic add. CPU: %f, GPU: %fnn", resultCPU, resultGPU);
    return 0;
}
void reductionCPU(float *result, const float *input)
{
    for (int i = 0; i < SIZE; i++)
        *result += input[i];
}
__global__ void reductionKernelImproved(float *result, const float *input)
{
    int i;
    int col = (blockDim.x * blockIdx.x + threadIdx.x) * ILP;
    int row = blockDim.y * blockIdx.y + threadIdx.y;
    int index = row * blockDim.x * BLOCK_COUNT_X_IMPR + col;
    __shared__ float interResult;
    if (threadIdx.x == 0 && threadIdx.y == 0)
        interResult = 0.0;
    __syncthreads();
#pragma unroll ILP
    for (i = 0; i < ILP; i++)
    {
        if (index < SIZE)
        {
            atomicAdd(&interResult, input[index]);
            index++;
        }
    }
    __syncthreads();
    if (threadIdx.x == 0 && threadIdx.y == 0)
        atomicAdd(result, interResult);
}
void reductionWithCudaImproved(float *result, const float *input)
{
    dim3 dim_grid, dim_block;
    float *dev_input = 0;
    float *dev_result = 0;
    cudaEvent_t start, stop;
    float elapsed = 0;
    double gpuBandwidth;
    dim_block.x = BLOCK_X_IMPR;
    dim_block.y = BLOCK_Y_IMPR;
    dim_block.z = 1;
    dim_grid.x = BLOCK_COUNT_X_IMPR;
    dim_grid.y = (int)ceil((float)SIZE / (float)(TILE * dim_block.y* BLOCK_COUNT_X_IMPR));
    dim_grid.z = 1;
    cudaSetDevice(0);
    cudaMalloc((void**)&dev_input, SIZE * sizeof(float));
    cudaMalloc((void**)&dev_result, sizeof(float));
    cudaMemcpy(dev_input, input, SIZE * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(dev_result, result, sizeof(float), cudaMemcpyHostToDevice);
    cudaEventCreate(&start);
    cudaEventCreate(&stop);
    cudaEventRecord(start);
    reductionKernelImproved << <dim_grid, dim_block >> >(dev_result, dev_input);
    cudaEventRecord(stop);
    cudaEventSynchronize(stop);
    cudaEventElapsedTime(&elapsed, start, stop);
    gpuBandwidth = (sizeof(float) * SIZE * 2) / (elapsed * 1000000);
    printf("GPU Time (improved): %f ms, bandwidth: %f GB/sn", elapsed, gpuBandwidth);
    cudaDeviceSynchronize();
    cudaMemcpy(result, dev_result, sizeof(float), cudaMemcpyDeviceToHost);
    cudaFree(dev_input);
    cudaFree(dev_result);
    return;
}

我认为您的内核调用中有重叠的索引:

int col = (blockDim.x * blockIdx.x + threadIdx.x) * ILP;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int index = row * blockDim.x * BLOCK_COUNT_X_IMPR + col;

如果我没记错的话,你的blockDim.x = 4,BLOCK_COUNT_X_IMPR = 100,所以每行将跳转400个索引。但是,您的 col 可以高达 400 * 8。

考虑:

blockIdx = (12, 0)
threadIdx = (3, 0)
=> col = (12*4 + 3) * 8 = 408
   row = 0
   index = 408
blockIdx = (0, 0)
threadIdx = (1, 1)
=> col = (0*4 + 1) * 8 = 8
   row = 1
   index = 1 * 400 + 8 = 408

所以我想你应该重写你的索引

// gridDim.x = BLOCK_COUNT_X_IMPR
int index = row * blockDim.x * gridDim.x * ILP + col;