Can't access Unified Memory after i ran kernel function using it

2

so I am calling cudaMallocManaged for 2 functions in my code and it works fine for first function (backwardMask()) after i call it i can easily access my data from host, but my problem is with kernel function seriesLength() - because i am doing cudaMallocManaged on my indexmask and then (before calling seriesLength()) i can easily access/modify this indexmask on host , after i call seriesLength() it is modyfing my indexmask and also has no problem with accessing it, but after this function returns i am unable to read indexmask on host and am getting exception (status code 0xC0000022).

This is very weird error because I am doing it analogically to the first function(backwardMask()), which works correctly.

Any ideas/explainations will be highly appreciated.

Here is seriesLengths kernal function code:

__global__ void seriesLengths(int* scannedbw,int* indexmask,int* numOfSeries,int n){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
 for (int i = index; i < n;i+=stride)
 {
    if (i == (n - 1))
    {
        *numOfSeries = scannedbw[i];
        indexmask[scannedbw[i]] = n;
    }
    if (i == 0)
    {
        indexmask[0] = 0;
    }
    else if (scannedbw[i] != scannedbw[i - 1])
    {
        indexmask[scannedbw[i] - 1] = i;
    }
 }
}

Code of kernel funct backwardMask:

__global__ void backwardMask(const char *in, int* bwMask,int n)
{
    int index = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = blockDim.x * gridDim.x;
    for (int i = index; i < n;i+=stride)
    {
        if (i == 0)
            bwMask[i] = 1;
        else 
        {
            bwMask[i] = (in[i] != in[i - 1]);
        }
    }
}

Main function:

int main()
{
    int N=1024;
    srand(time(0));
    char* t;
    int* bwmask;
    cudaMallocManaged(&t, N*sizeof(char));
    cudaMallocManaged(&bwmask, N*sizeof(int));
    for (int i = 0; i < N; i++)
    {
        if(i<300)
        t[i] = 'a' + rand() % 2;
        else
            t[i] = 'a' + rand() % 20;

    }

    for (int j = 0; j < 60; j++)
        std::cout << t[j];
    std::cout << std::endl;
    int blockSize = 256;
    int numBlocks = (N + blockSize - 1) / blockSize;
    backwardMask<<<numBlocks, blockSize >>>(t,bwmask , N);
    cudaDeviceSynchronize();
    for (int j = 0; j < 60; j++)
        std::cout << bwmask[j];
    std::cout << std::endl;
    //now inclusive prefix sum for bwmask
    int* scannedbwmask;
    cudaMallocManaged(&scannedbwmask, N*sizeof(int));

    thrust::inclusive_scan(bwmask, bwmask + N, scannedbwmask);
    cudaDeviceSynchronize();

    int numOfSeries;
    //seriesLengths shows us lengths of each series by i-(i-1) and starting index of each series
    int* indexmask;
    cudaMallocManaged(&indexmask, (N+1)*sizeof(int));
    seriesLengths<<<numBlocks, blockSize>>>(scannedbwmask, indexmask, &numOfSeries, N);
    cudaDeviceSynchronize();

// accessing indexmask here gives us exception
    std::cout << indexmask[3];
    /*for (int j = 0; j < 60; j++)
        std::cout << indexmask[j];
    std::cout << std::endl;*/
    std::cout << "numseries " << numOfSeries;


    getch();
    return 0;
}
c++
parallel-processing
cuda
gpu
asked on Stack Overflow Apr 28, 2017 by user3112193

1 Answer

3

Change numOfSeries to be pointer to int

int* numOfSeries;

then malloc memory for it:

cudaMallocManaged(&numOfSeries, sizeof(int));

and pass it like that:

seriesLengths<<<numBlocks, blockSize>>>(scannedbwmask, indexmask, numOfSeries, N);
answered on Stack Overflow Apr 28, 2017 by Matthew Grossman

User contributions licensed under CC BY-SA 3.0