Coursera MP3
// MP 3: Due Sunday, Dec 30, 2012 at 11:59 p.m. PST
#include <wb.h>
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
#define TILE_WIDTH 16
#define TILE_HEIGHT 16
// Compute C = A * B
__global__ void matrixMultiplyShared(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
//@@ You have to use shared memory for this MP
__shared__ float Ads[TILE_WIDTH][TILE_HEIGHT];
__shared__ float Bds[TILE_WIDTH][TILE_HEIGHT];
int bx=blockIdx.x; int by=blockIdx.y;
int tx=threadIdx.x; int ty=threadIdx.y;
int Row=by*TILE_HEIGHT+ty;
int Col=bx*TILE_WIDTH+tx;
float Pvalue=0;
for(int m=0;m<ceil(numAColumns/(float)TILE_WIDTH);++m){
if(Row<numARows&&m*TILE_WIDTH+tx<numAColumns){
Ads[ty][tx]=A[Row*numAColumns+(m*TILE_WIDTH+tx)];
}else{
Ads[ty][tx]=0;
}
if(m*TILE_HEIGHT+ty<numBRows&&Col<numBColumns){
Bds[ty][tx]=B[(m*TILE_HEIGHT+ty)*numBColumns+Col];
}else{
Bds[ty][tx]=0;
}
__syncthreads();
if(Row<numCRows&&Col<numCColumns){
for(int k=0;k<TILE_HEIGHT;++k){
Pvalue+=Ads[ty][k]*Bds[k][tx];
}
}
__syncthreads();
if(Row<numCRows&&Col<numCColumns)
C[Row*numCColumns+Col]=Pvalue;
}
}
int main(int argc, char ** argv) {
wbArg_t args;
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB = (float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
hostC=(float*)malloc(numCRows*numCColumns*sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc((void**)&deviceA,numARows*numAColumns*sizeof(float));
cudaMalloc((void**)&deviceB,numBRows*numBColumns*sizeof(float));
cudaMalloc((void**)&deviceC,numCRows*numCColumns*sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceA,hostA,numARows*numAColumns*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,hostB,numBRows*numBColumns*sizeof(float),cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 dimBlock(TILE_HEIGHT,TILE_WIDTH,1);
dim3 dimGrid(ceil(numCColumns/(float)TILE_HEIGHT),ceil(numCRows/(float)TILE_WIDTH),1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
matrixMultiplyShared<<<dimGrid,dimBlock>>>(deviceA,deviceB,deviceC,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
cudaThreadSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostC,deviceC,numCRows*numCColumns*sizeof(float),cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceA);cudaFree(deviceB);cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
이 내용에 흥미가 있습니까?
현재 기사가 여러분의 문제를 해결하지 못하는 경우 AI 엔진은 머신러닝 분석(스마트 모델이 방금 만들어져 부정확한 경우가 있을 수 있음)을 통해 가장 유사한 기사를 추천합니다:
다양한 언어의 JSONJSON은 Javascript 표기법을 사용하여 데이터 구조를 레이아웃하는 데이터 형식입니다. 그러나 Javascript가 코드에서 이러한 구조를 나타낼 수 있는 유일한 언어는 아닙니다. 저는 일반적으로 '객체'{}...
텍스트를 자유롭게 공유하거나 복사할 수 있습니다.하지만 이 문서의 URL은 참조 URL로 남겨 두십시오.
CC BY-SA 2.5, CC BY-SA 3.0 및 CC BY-SA 4.0에 따라 라이센스가 부여됩니다.