Coursera MP3

5166 단어
blablabla
// MP 3: Due Sunday, Dec 30, 2012 at 11:59 p.m. PST
#include    <wb.h>

#define wbCheck(stmt) do {                                 \
        cudaError_t err = stmt;                            \
        if (err != cudaSuccess) {                          \
            wbLog(ERROR, "Failed to run stmt ", #stmt);    \
            return -1;                                     \
        }                                                  \
    } while(0)
#define TILE_WIDTH 16
#define TILE_HEIGHT 16
// Compute C = A * B
__global__ void matrixMultiplyShared(float * A, float * B, float * C,
			             int numARows, int numAColumns,
			             int numBRows, int numBColumns,
			             int numCRows, int numCColumns) {
    //@@ Insert code to implement matrix multiplication here
    //@@ You have to use shared memory for this MP
    __shared__ float Ads[TILE_WIDTH][TILE_HEIGHT];
    __shared__ float Bds[TILE_WIDTH][TILE_HEIGHT];

    int bx=blockIdx.x; int by=blockIdx.y;
    int tx=threadIdx.x; int ty=threadIdx.y;

    int Row=by*TILE_HEIGHT+ty;
    int Col=bx*TILE_WIDTH+tx;

    float Pvalue=0;
    for(int m=0;m<ceil(numAColumns/(float)TILE_WIDTH);++m){
        if(Row<numARows&&m*TILE_WIDTH+tx<numAColumns){
            Ads[ty][tx]=A[Row*numAColumns+(m*TILE_WIDTH+tx)];
        }else{
            Ads[ty][tx]=0;
        }
        if(m*TILE_HEIGHT+ty<numBRows&&Col<numBColumns){
            Bds[ty][tx]=B[(m*TILE_HEIGHT+ty)*numBColumns+Col];
        }else{
            Bds[ty][tx]=0;
        }
       
        __syncthreads();

        if(Row<numCRows&&Col<numCColumns){
            for(int k=0;k<TILE_HEIGHT;++k){
                Pvalue+=Ads[ty][k]*Bds[k][tx];
            }
        }
        __syncthreads();
        if(Row<numCRows&&Col<numCColumns)
            C[Row*numCColumns+Col]=Pvalue;
    }

}

int main(int argc, char ** argv) {
    wbArg_t args;
    float * hostA; // The A matrix
    float * hostB; // The B matrix
    float * hostC; // The output C matrix
    float * deviceA;
    float * deviceB;
    float * deviceC;
    int numARows; // number of rows in the matrix A
    int numAColumns; // number of columns in the matrix A
    int numBRows; // number of rows in the matrix B
    int numBColumns; // number of columns in the matrix B
    int numCRows; // number of rows in the matrix C (you have to set this)
    int numCColumns; // number of columns in the matrix C (you have to set this)

    args = wbArg_read(argc, argv);

    wbTime_start(Generic, "Importing data and creating memory on host");
    hostA = (float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
    hostB = (float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
    //@@ Set numCRows and numCColumns
    numCRows = numARows;
    numCColumns = numBColumns;
    //@@ Allocate the hostC matrix
    hostC=(float*)malloc(numCRows*numCColumns*sizeof(float));
    wbTime_stop(Generic, "Importing data and creating memory on host");

    wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
    wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);

    wbTime_start(GPU, "Allocating GPU memory.");
    //@@ Allocate GPU memory here
    cudaMalloc((void**)&deviceA,numARows*numAColumns*sizeof(float));
    cudaMalloc((void**)&deviceB,numBRows*numBColumns*sizeof(float));
    cudaMalloc((void**)&deviceC,numCRows*numCColumns*sizeof(float));
    wbTime_stop(GPU, "Allocating GPU memory.");

    wbTime_start(GPU, "Copying input memory to the GPU.");
    //@@ Copy memory to the GPU here
    cudaMemcpy(deviceA,hostA,numARows*numAColumns*sizeof(float),cudaMemcpyHostToDevice);
    cudaMemcpy(deviceB,hostB,numBRows*numBColumns*sizeof(float),cudaMemcpyHostToDevice);
    wbTime_stop(GPU, "Copying input memory to the GPU.");
    
    //@@ Initialize the grid and block dimensions here
    dim3 dimBlock(TILE_HEIGHT,TILE_WIDTH,1);
    dim3 dimGrid(ceil(numCColumns/(float)TILE_HEIGHT),ceil(numCRows/(float)TILE_WIDTH),1);
    wbTime_start(Compute, "Performing CUDA computation");
    //@@ Launch the GPU Kernel here
    matrixMultiplyShared<<<dimGrid,dimBlock>>>(deviceA,deviceB,deviceC,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
    cudaThreadSynchronize();
    wbTime_stop(Compute, "Performing CUDA computation");
    
    wbTime_start(Copy, "Copying output memory to the CPU");
    //@@ Copy the GPU memory back to the CPU here
    cudaMemcpy(hostC,deviceC,numCRows*numCColumns*sizeof(float),cudaMemcpyDeviceToHost);
    wbTime_stop(Copy, "Copying output memory to the CPU");

    wbTime_start(GPU, "Freeing GPU Memory");
    //@@ Free the GPU memory here
    cudaFree(deviceA);cudaFree(deviceB);cudaFree(deviceC);
    wbTime_stop(GPU, "Freeing GPU Memory");

    wbSolution(args, hostC, numCRows, numCColumns);

    free(hostA);
    free(hostB);
    free(hostC);

    return 0;
}

좋은 웹페이지 즐겨찾기