Heisenbug at the core of CUDA, access to global memory

About two years ago, I wrote a kernel to work on several numerical grids at the same time. There was a very strange behavior, which led to incorrect results. When searching for errors using printf () - operators inside the kernel, the error disappeared.

Due to time constraints, I kept it this way, although I recently decided that this is not suitable for the coding style. So I reviewed my core and welded it to what you see below.

__launch_bounds__(672, 2)
__global__ void heisenkernel(float *d_u, float *d_r, float *d_du, int radius,
        int numNodesPerGrid, int numBlocksPerSM, int numGridsPerSM, int numGrids)
{
    __syncthreads();
    int id_sm           = blockIdx.x /   numBlocksPerSM;                                    // (arbitrary) ID of Streaming Multiprocessor (SM) this thread works upon           - (constant over lifetime of thread)
    int id_blockOnSM    = blockIdx.x % numBlocksPerSM;                                      // Block number on this specific SM                                                 - (constant over lifetime of thread)
    int id_r            = id_blockOnSM  * (blockDim.x - 2*radius) + threadIdx.x - radius;   // Grid point number this thread is to work upon                                    - (constant over lifetime of thread)
    int id_grid         = id_sm         * numGridsPerSM;                                    // Grid ID this thread is to work upon                                              - (not constant over lifetime of thread)

    while(id_grid < numGridsPerSM * (id_sm + 1))    // this loops over numGridsPerSM grids
    {
        __syncthreads();
        int id_numInArray       = id_grid * numNodesPerGrid + id_r;     // Entry in array this thread is responsible for (read and possibly write)  - (not constant over lifetime of thread)
        float uchange           = 0.0f;
        //uchange                   = 1.0f;                                 // if this line is uncommented, results will be computed correctly ("Solution 1")
        float du                = 0.0f;

        if((threadIdx.x > radius-1) && (threadIdx.x < blockDim.x - radius) && (id_r < numNodesPerGrid) && (id_grid < numGrids))
        {
            if (id_r == 0)  // FO-forward difference
                du = (d_u[id_numInArray+1] - d_u[id_numInArray])/(d_r[id_numInArray+1] - d_r[id_numInArray]);
            else if (id_r == numNodesPerGrid - 1)  // FO-rearward difference
                du = (d_u[id_numInArray] - d_u[id_numInArray-1])/(d_r[id_numInArray] - d_r[id_numInArray-1]);
            else if (id_r == 1 || id_r == numNodesPerGrid - 2) //SO-central difference
                du = (d_u[id_numInArray+1] - d_u[id_numInArray-1])/(d_r[id_numInArray+1] - d_r[id_numInArray-1]);
            else if(id_r > 1 && id_r < numNodesPerGrid - 2)
                du = d_fourpoint_constant * ((d_u[id_numInArray+1] - d_u[id_numInArray-1])/(d_r[id_numInArray+1] - d_r[id_numInArray-1])) + (1-d_fourpoint_constant) * ((d_u[id_numInArray+2] - d_u[id_numInArray-2])/(d_r[id_numInArray+2] - d_r[id_numInArray-2]));
            else
                du = 0;
        }

        __syncthreads();
        if((threadIdx.x > radius-1 && threadIdx.x < blockDim.x - radius) && (id_r < numNodesPerGrid) && (id_grid < numGrids))
        {
            d_u[    id_numInArray] = d_u[id_numInArray] * uchange;          // if this line is commented out, results will be computed correctly ("Solution 2")
            d_du[   id_numInArray] = du;
        }

    __syncthreads();
    ++id_grid;
}

This kernel calculates the derivative of a certain value at all grid points for a series of numerical 1D grids.

Things to consider: (see the full code base below)

  • The grid consists of 1300 grid points.
  • (- /)
  • 37 ( : , while )
  • .
  • , .
  • , ( 666, 667, 668, 669 , , , )
  • - , , .

u_arr, du_arr r_arr ( d_u, d_du d_r). 1300 . while 37 .

, , . .

:

0 , ( 666-669), . , - . . , , , , , , ( .... ).

printf() , : "" , , , , . , ( : ). Nsight Eclipse.

Memcheck/Racecheck:

cuda-memcheck (memcheck racecheck) /, . Valgrind , , - API CUDA, .

() , cuda-memcheck --tool racecheck , d_u, .

:

​​ CUDA (2.0, 3.0 3.5) , ( ).

() :

  • 2 x GTX 460, , X-,
  • : 340.46
  • Cuda Toolkit 6.5
  • Linux Kernel 3.11.0-12-generic (Linux Mint 16 - Xfce)

:

, , , , , , CUDA. , printf() - ( ), memcheck (cuda-memcheck valgrind) .

, ​​ , , . , , , .

"", ​​ , .

() , - . , , , . " 1", , / d_u uchange = 1.0.

, ​​ , d_u, - d_du. , , -arch=sm_20. -arch=sm_35 parallelism , .

heisenbug.cu:

#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>

const float r_sol = 6.955E8f;
__constant__ float d_fourpoint_constant = 0.2f;

__launch_bounds__(672, 2)
__global__ void heisenkernel(float *d_u, float *d_r, float *d_du, int radius,
        int numNodesPerGrid, int numBlocksPerSM, int numGridsPerSM, int numGrids)
{
    __syncthreads();
    int id_sm           = blockIdx.x / numBlocksPerSM;                                      // (arbitrary) ID of Streaming Multiprocessor (SM) this thread works upon           - (constant over lifetime of thread)
    int id_blockOnSM    = blockIdx.x % numBlocksPerSM;                                      // Block number on this specific SM                                                 - (constant over lifetime of thread)
    int id_r            = id_blockOnSM  * (blockDim.x - 2*radius) + threadIdx.x - radius;   // Grid point number this thread is to work upon                                    - (constant over lifetime of thread)
    int id_grid         = id_sm         * numGridsPerSM;                                    // Grid ID this thread is to work upon                                              - (not constant over lifetime of thread)

    while(id_grid < numGridsPerSM * (id_sm + 1))    // this loops over numGridsPerSM grids
    {
        __syncthreads();
        int id_numInArray       = id_grid * numNodesPerGrid + id_r;     // Entry in array this thread is responsible for (read and possibly write)  - (not constant over lifetime of thread)
        float uchange           = 0.0f;
        //uchange                   = 1.0f;                                 // if this line is uncommented, results will be computed correctly ("Solution 1")
        float du                = 0.0f;

        if((threadIdx.x > radius-1) && (threadIdx.x < blockDim.x - radius) && (id_r < numNodesPerGrid) && (id_grid < numGrids))
        {
            if (id_r == 0)  // FO-forward difference
                du = (d_u[id_numInArray+1] - d_u[id_numInArray])/(d_r[id_numInArray+1] - d_r[id_numInArray]);
            else if (id_r == numNodesPerGrid - 1)  // FO-rearward difference
                du = (d_u[id_numInArray] - d_u[id_numInArray-1])/(d_r[id_numInArray] - d_r[id_numInArray-1]);
            else if (id_r == 1 || id_r == numNodesPerGrid - 2) //SO-central difference
                du = (d_u[id_numInArray+1] - d_u[id_numInArray-1])/(d_r[id_numInArray+1] - d_r[id_numInArray-1]);
            else if(id_r > 1 && id_r < numNodesPerGrid - 2)
                du = d_fourpoint_constant * ((d_u[id_numInArray+1] - d_u[id_numInArray-1])/(d_r[id_numInArray+1] - d_r[id_numInArray-1])) + (1-d_fourpoint_constant) * ((d_u[id_numInArray+2] - d_u[id_numInArray-2])/(d_r[id_numInArray+2] - d_r[id_numInArray-2]));
            else
                du = 0;
        }

        __syncthreads();
        if((threadIdx.x > radius-1 && threadIdx.x < blockDim.x - radius) && (id_r < numNodesPerGrid) && (id_grid < numGrids))
        {
            d_u[    id_numInArray] = d_u[id_numInArray] * uchange;          // if this line is commented out, results will be computed correctly ("Solution 2")
            d_du[   id_numInArray] = du;
        }

        __syncthreads();
        ++id_grid;
    }
}

bool gridValuesEqual(float *matarray, uint id0, uint id1, const char *label, int numNodesPerGrid){

    bool retval = true;
    for(uint i=0; i<numNodesPerGrid; ++i)
        if(matarray[id0 * numNodesPerGrid + i] != matarray[id1 * numNodesPerGrid + i])
        {
            printf("value %s at position %u of grid %u not equal that of grid %u: %E != %E, diff: %E\n",
                    label, i, id0, id1, matarray[id0 * numNodesPerGrid + i], matarray[id1 * numNodesPerGrid + i],
                    matarray[id0 * numNodesPerGrid + i] - matarray[id1 * numNodesPerGrid + i]);
            retval = false;
        }
    return retval;
}

int main(int argc, const char* argv[])
{
    float *d_u;
    float *d_du;
    float *d_r;

    float *u_arr;
    float *du_arr;
    float *r_arr;

    int numNodesPerGrid = 1300;
    int numBlocksPerSM  = 2;
    int numGridsPerSM   = 37;
    int numSM           = 7;
    int TPB             = 672;
    int radius          = 2;
    int numGrids        = 259;
    int memsize_grid    = sizeof(float) * numNodesPerGrid;

    int numBlocksPerGrid    = numNodesPerGrid / (TPB - 2 * radius) + (numNodesPerGrid%(TPB - 2 * radius) == 0 ? 0 : 1);

    printf("---------------------------------------------------------------------------\n");
    printf("--- Heisenbug Extermination Tracker ---------------------------------------\n");
    printf("---------------------------------------------------------------------------\n\n");

    cudaSetDevice(0);
    cudaDeviceReset();

    cudaMalloc((void **) &d_u,      memsize_grid * numGrids);
    cudaMalloc((void **) &d_du,     memsize_grid * numGrids);
    cudaMalloc((void **) &d_r,      memsize_grid * numGrids);

    u_arr   = new float[numGrids * numNodesPerGrid];
    du_arr  = new float[numGrids * numNodesPerGrid];
    r_arr   = new float[numGrids * numNodesPerGrid];

    for(uint k=0; k<numGrids; ++k)
        for(uint i=0; i<numNodesPerGrid; ++i)
        {
            uint index  = k * numNodesPerGrid + i;

            if (i < 585)
                r_arr[index] = i * (6000.0f);
            else
            {
                if (i == 585)
                    r_arr[index] = r_arr[index - 1] + 8.576E-6f * r_sol;
                else
                    r_arr[index] = r_arr[index - 1] + 1.02102f  * ( r_arr[index - 1] - r_arr[index - 2] );
            }

            u_arr[index]    = 1E-10f * (i+1);
            du_arr[index]   = 0.0f;
        }

    /*
    printf("\n\nbefore kernel start\n\n");
    for(uint k=0; k<numGrids; ++k)
        printf("matrix->du_arr[k*paramH.numNodes + 668]:\t%E\n", du_arr[k*numNodesPerGrid + 668]);//*/

    bool equal = true;
    for(int k=1; k<numGrids; ++k)
    {
        equal &= gridValuesEqual(u_arr, 0, k, "u", numNodesPerGrid);
        equal &= gridValuesEqual(du_arr, 0, k, "du", numNodesPerGrid);
        equal &= gridValuesEqual(r_arr, 0, k, "r", numNodesPerGrid);
    }

    if(!equal)
        printf("Input values are not identical for different grids!\n\n");
    else
        printf("All grids contain the same values at same grid points.!\n\n");

    cudaMemcpy(d_u, u_arr,      memsize_grid * numGrids, cudaMemcpyHostToDevice);
    cudaMemcpy(d_du, du_arr,    memsize_grid * numGrids, cudaMemcpyHostToDevice);
    cudaMemcpy(d_r, r_arr,      memsize_grid * numGrids, cudaMemcpyHostToDevice);

    printf("Configuration:\n\n");
    printf("numNodesPerGrid:\t%i\nnumBlocksPerSM:\t\t%i\nnumGridsPerSM:\t\t%i\n", numNodesPerGrid, numBlocksPerSM, numGridsPerSM);
    printf("numSM:\t\t\t\t%i\nTPB:\t\t\t\t%i\nradius:\t\t\t\t%i\nnumGrids:\t\t\t%i\nmemsize_grid:\t\t%i\n", numSM, TPB, radius, numGrids, memsize_grid);
    printf("numBlocksPerGrid:\t%i\n\n", numBlocksPerGrid);
    printf("Kernel launch parameters:\n\n");
    printf("moduleA2_3<<<%i, %i, %i>>>(...)\n\n", numBlocksPerSM * numSM, TPB, 0);
    printf("Launching Kernel...\n\n");

    heisenkernel<<<numBlocksPerSM * numSM, TPB, 0>>>(d_u, d_r, d_du, radius, numNodesPerGrid, numBlocksPerSM, numGridsPerSM, numGrids);
    cudaDeviceSynchronize();

    cudaMemcpy(u_arr, d_u,      memsize_grid * numGrids, cudaMemcpyDeviceToHost);
    cudaMemcpy(du_arr, d_du,    memsize_grid * numGrids, cudaMemcpyDeviceToHost);
    cudaMemcpy(r_arr, d_r,      memsize_grid * numGrids, cudaMemcpyDeviceToHost);

    /*
    printf("\n\nafter kernel finished\n\n");
    for(uint k=0; k<numGrids; ++k)
        printf("matrix->du_arr[k*paramH.numNodes + 668]:\t%E\n", du_arr[k*numNodesPerGrid + 668]);//*/

    equal = true;
    for(int k=1; k<numGrids; ++k)
    {
        equal &= gridValuesEqual(u_arr, 0, k, "u", numNodesPerGrid);
        equal &= gridValuesEqual(du_arr, 0, k, "du", numNodesPerGrid);
        equal &= gridValuesEqual(r_arr, 0, k, "r", numNodesPerGrid);
    }

    if(!equal)
        printf("Results are wrong!!\n");
    else
        printf("All went well!\n");

    cudaFree(d_u);
    cudaFree(d_du);
    cudaFree(d_r);

    delete [] u_arr;
    delete [] du_arr;
    delete [] r_arr;

    return 0;
}

Makefile:

CUDA            = 1
DEFINES         = 

ifeq ($(CUDA), 1)
    DEFINES     += -DCUDA
    CUDAPATH    = /usr/local/cuda-6.5
    CUDAINCPATH = -I$(CUDAPATH)/include
    CUDAARCH    = -arch=sm_20
endif

CXX             = g++
CXXFLAGS        = -pipe -g -std=c++0x -fPIE -O0 $(DEFINES)
VALGRIND        = valgrind
VALGRIND_FLAGS  = -v --leak-check=yes --log-file=out.memcheck
CUDAMEMCHECK    = cuda-memcheck
CUDAMC_FLAGS    = --tool memcheck
RACECHECK       = $(CUDAMEMCHECK)
RACECHECK_FLAGS = --tool racecheck  
INCPATH         = -I. $(CUDAINCPATH)
LINK            = g++
LFLAGS          = -O0
LIBS            = 

ifeq ($(CUDA), 1)
    NVCC        = $(CUDAPATH)/bin/nvcc
    LIBS        += -L$(CUDAPATH)/lib64/ 
    LIBS        += -lcuda -lcudart -lcudadevrt
    NVCCFLAGS   = -g -G -O0 --ptxas-options=-v
    NVCCFLAGS   += -lcuda -lcudart -lcudadevrt -lineinfo --machine 64 -x cu $(CUDAARCH) $(DEFINES)
endif 

all: 
    $(NVCC) $(NVCCFLAGS) $(INCPATH) -c -o $(DST_DIR)heisenbug.o $(SRC_DIR)heisenbug.cu
    $(LINK) $(LFLAGS) -o heisenbug heisenbug.o $(LIBS)

clean:
    rm heisenbug.o
    rm heisenbug

memrace: all
    ./heisenbug > out
    $(VALGRIND) $(VALGRIND_FLAGS) ./heisenbug > out.memcheck.log
    $(CUDAMEMCHECK) $(CUDAMC_FLAGS) ./heisenbug > out.cudamemcheck
    $(RACECHECK) $(RACECHECK_FLAGS) ./heisenbug > out.racecheck
+4
1

, , , :

, .

d_u.

:

• , , ( 666, 667, 668, 669 , , , )

, d_u, , .

CUDA . 2 , 666, 667, 668, 669. , :

  • , .
  • , , .

( ), , . , CUDA .

, cuda-memcheck -tool racecheck , __shared__. ​​ __shared__, cuda-memcheck -.

cuda-memcheck, , , , .

in-kernel printf , . , / . , , ( ) .

Linux Mint CUDA, . , ; .

+9

All Articles