国内最全IT社区平台 联系我们 | 收藏本站
华晨云阿里云优惠2
您当前位置:首页 > 服务器 > 《GPU高性能编程CUDA实战》学习笔记(十)

《GPU高性能编程CUDA实战》学习笔记(十)

来源:程序员人生   发布时间:2016-10-05 09:04:59 阅读次数:3473次

第10章 流

GPU上履行大范围数据并行计算性能高于cpu上履行,另外,NVIDIA 图形处理器还支持1种并行性(Parallelism)。这类并行性类似于cpu多线程利用程序中的任务并行性(Task  Parallelism)。任务并行性是指并行履行两个或多个不同的任务,而不是在大量数据上履行同1个任务。

在并行环境中,任务可以是任意操作。如1个线程绘制GUI,另外一个线程通过网络下载更新包。本章介绍 CUDA 流,和如何通过流在GPU上同时履行多个任务。

10.1 本章目标

  • 了解如何分配页锁定(Page-Locked)类型的主机内存。
  • 了解CUDA流的概念。
  • 了解如何使用CUDA 流来加速利用程序。

10.2 页锁定主机内存

C 库分配主机内存使用 malloc() ;CUDA也能够分配主机内存,使用 cudaHostAlloc() 。

差异:malloc()分配的内存是标准的、可分页的(Pagable)主机内存;cudaHostAlloc()分配的内存是页锁定的主机内存。
页锁定内存:也称固定内存(Pinned Memory)或不可分页内存,它有1个重要属性------操作系统将不会对这块内存分页并交换到磁盘上,从而确保了该内存始终驻留在物理内存中。因此,操作系统能够安全地使某个利用程序访问该内存的物理地址,由于这块内存将不会被破坏或重新定位。

由于gpu知道内存的物理地址,因此可以通过“直接内存访问(Direct Memory Access,DMA)” 技术来在gpu和主机之间复制数据。由于DMA 在履行复制时无需cpu参与,这就说明 cpu极可能在dma的履行进程中将目标内存交换到磁盘上,或通过更新操作系统的可分页表来重新定位目标内存的物理地址。cpu可能会移动可分页的数据,这便可能对dma操作造成延迟。因此,固定内存很重要。

固定内存是双刃剑,所以建议: 仅对 cudaMemcpy()  调用中的源内存或目标内存,才使用页锁定内存,并且不再需要时立即释放。

#include "../common/book.h" #define SIZE (64*1024*1024) float cuda_malloc_test( int size, bool up ) { cudaEvent_t start, stop; int *a, *dev_a; float elapsedTime; HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); a = (int*)malloc( size * sizeof( *a ) ); HANDLE_NULL( a ); HANDLE_ERROR( cudaMalloc( (void**)&dev_a, size * sizeof( *dev_a ) ) ); HANDLE_ERROR( cudaEventRecord( start, 0 ) ); for (int i=0; i<100; i++) { if (up) HANDLE_ERROR( cudaMemcpy( dev_a, a, size * sizeof( *dev_a ), cudaMemcpyHostToDevice ) ); else HANDLE_ERROR( cudaMemcpy( a, dev_a, size * sizeof( *dev_a ), cudaMemcpyDeviceToHost ) ); } HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) ); free( a ); HANDLE_ERROR( cudaFree( dev_a ) ); HANDLE_ERROR( cudaEventDestroy( start ) ); HANDLE_ERROR( cudaEventDestroy( stop ) ); return elapsedTime; } float cuda_host_alloc_test( int size, bool up ) { cudaEvent_t start, stop; int *a, *dev_a; float elapsedTime; HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); HANDLE_ERROR( cudaHostAlloc( (void**)&a, size * sizeof( *a ), cudaHostAllocDefault ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_a, size * sizeof( *dev_a ) ) ); HANDLE_ERROR( cudaEventRecord( start, 0 ) ); for (int i=0; i<100; i++) { if (up) HANDLE_ERROR( cudaMemcpy( dev_a, a, size * sizeof( *a ), cudaMemcpyHostToDevice ) ); else HANDLE_ERROR( cudaMemcpy( a, dev_a, size * sizeof( *a ), cudaMemcpyDeviceToHost ) ); } HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) ); HANDLE_ERROR( cudaFreeHost( a ) ); HANDLE_ERROR( cudaFree( dev_a ) ); HANDLE_ERROR( cudaEventDestroy( start ) ); HANDLE_ERROR( cudaEventDestroy( stop ) ); return elapsedTime; } int main( void ) { float elapsedTime; float MB = (float)100*SIZE*sizeof(int)/1024/1024; // try it with cudaMalloc elapsedTime = cuda_malloc_test( SIZE, true ); printf( "Time using cudaMalloc: %3.1f ms\n", elapsedTime ); printf( "\tMB/s during copy up: %3.1f\n", MB/(elapsedTime/1000) ); elapsedTime = cuda_malloc_test( SIZE, false ); printf( "Time using cudaMalloc: %3.1f ms\n", elapsedTime ); printf( "\tMB/s during copy down: %3.1f\n", MB/(elapsedTime/1000) ); // now try it with cudaHostAlloc elapsedTime = cuda_host_alloc_test( SIZE, true ); printf( "Time using cudaHostAlloc: %3.1f ms\n", elapsedTime ); printf( "\tMB/s during copy up: %3.1f\n", MB/(elapsedTime/1000) ); elapsedTime = cuda_host_alloc_test( SIZE, false ); printf( "Time using cudaHostAlloc: %3.1f ms\n", elapsedTime ); printf( "\tMB/s during copy down: %3.1f\n", MB/(elapsedTime/1000) ); }

cudaHostAlloc() --- cudaFreeHost()
通过履行上面代码,可以看到性能提升。

10.3 CUDA 流

cudaEventRecord( , ) CUDA事件的第2个参数就是用于指定插入事件的流(Stream)。
cudaEvent_t start; cudaEventCreate( &start ) ); cudaEventRecord( start, 0 ) );
CUDA 流在加速利用程序方面起侧重要的作用,CUDA 流表示1个GPU 操作队列,并且该队列中的操作将以指定的顺序履行。我们可以在流中添加1些操作,例如核函数的启动、内存复制,和事件的启动和结束等。将这些操作添加到流的顺序也是它们的履行顺序。可以将每一个流视为GPU上的1个任务,并且这些任务可以并行履行。下面介绍使用和加速利用程序。

10.4 使用单个CUDA流

#include "../common/book.h" #define N (1024*1024) #define FULL_DATA_SIZE (N*20) __global__ void kernel( int *a, int *b, int *c ) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main( void ) { cudaDeviceProp prop; int whichDevice; HANDLE_ERROR( cudaGetDevice( &whichDevice ) ); HANDLE_ERROR( cudaGetDeviceProperties( &prop, whichDevice ) ); if (!prop.deviceOverlap) { printf( "Device will not handle overlaps, so no speed up from streams\n" ); return 0; } cudaEvent_t start, stop; float elapsedTime; cudaStream_t stream; int *host_a, *host_b, *host_c; int *dev_a, *dev_b, *dev_c; // start the timers HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); // initialize the stream HANDLE_ERROR( cudaStreamCreate( &stream ) ); // allocate the memory on the GPU HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_c, N * sizeof(int) ) ); // allocate host locked memory, used to stream HANDLE_ERROR( cudaHostAlloc( (void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); HANDLE_ERROR( cudaHostAlloc( (void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); HANDLE_ERROR( cudaHostAlloc( (void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); for (int i=0; i<FULL_DATA_SIZE; i++) { host_a[i] = rand(); host_b[i] = rand(); } HANDLE_ERROR( cudaEventRecord( start, 0 ) ); // now loop over full data, in bite-sized chunks for (int i=0; i<FULL_DATA_SIZE; i+= N) { // copy the locked memory to the device, async HANDLE_ERROR( cudaMemcpyAsync( dev_a, host_a+i, N * sizeof(int), cudaMemcpyHostToDevice, stream ) ); HANDLE_ERROR( cudaMemcpyAsync( dev_b, host_b+i, N * sizeof(int), cudaMemcpyHostToDevice, stream ) ); kernel<<<N/256,256,0,stream>>>( dev_a, dev_b, dev_c ); // copy the data from device to locked memory HANDLE_ERROR( cudaMemcpyAsync( host_c+i, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost, stream ) ); } // copy result chunk from locked to full buffer HANDLE_ERROR( cudaStreamSynchronize( stream ) ); HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) ); printf( "Time taken: %3.1f ms\n", elapsedTime ); // cleanup the streams and memory HANDLE_ERROR( cudaFreeHost( host_a ) ); HANDLE_ERROR( cudaFreeHost( host_b ) ); HANDLE_ERROR( cudaFreeHost( host_c ) ); HANDLE_ERROR( cudaFree( dev_a ) ); HANDLE_ERROR( cudaFree( dev_b ) ); HANDLE_ERROR( cudaFree( dev_c ) ); HANDLE_ERROR( cudaStreamDestroy( stream ) ); return 0; }
这里是单个流来讲明流的用法,主要看main函数。
第1件事:选择1个支持装备堆叠(Device Overlap)功能的装备。支持装备堆叠功能的GPU能够在履行1个CUDA C核函数的同时,还能在装备与主机之间履行复制操作。正如前面说的,我们将使用多个流来实现这类计算与数据传输的堆叠。

(1)创建流,
cudaStream_t stream; // initialize the stream HANDLE_ERROR( cudaStreamCreate( &stream ) );
(2)数据分配,
int *host_a, *host_b, *host_c; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_c, N * sizeof(int) ) ); // allocate host locked memory, used to stream HANDLE_ERROR( cudaHostAlloc( (void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); HANDLE_ERROR( cudaHostAlloc( (void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); HANDLE_ERROR( cudaHostAlloc( (void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); for (int i=0; i<FULL_DATA_SIZE; i++) { host_a[i] = rand(); host_b[i] = rand(); }
cudaHostAlloc() 使用主机固定内存,除复制快,其他好处1会分析。
1般情况,我们是将输入缓冲区复制到GPU,启动核函数,然后将输出缓冲区复制回主机;不过这次我们有些改动。
1.将输入缓冲区划分为更小的块,并在每一个块上履行1个包括3个步骤的进程;
2.将1部份输入缓冲区复制到GPU,这部份缓冲区运行核函数;
3.将输出缓冲区中的这部份结果复制回主机。
使用情形: GPU的内存远小于主机内存,全部缓冲区没法1次性填充到GPU,因此需要分块进行计算。
// now loop over full data, in bite-sized chunks for (int i=0; i<FULL_DATA_SIZE; i+= N) { // copy the locked memory to the device, async HANDLE_ERROR( cudaMemcpyAsync( dev_a, host_a+i, N * sizeof(int), cudaMemcpyHostToDevice, stream ) ); HANDLE_ERROR( cudaMemcpyAsync( dev_b, host_b+i, N * sizeof(int), cudaMemcpyHostToDevice, stream ) ); kernel<<<N/256,256,0,stream>>>( dev_a, dev_b, dev_c ); // copy the data from device to locked memory HANDLE_ERROR( cudaMemcpyAsync( host_c+i, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost, stream ) ); }
cudaMemcpy() 这个函数将以同步方式履行,说明,当函数返回时,复制操作已完成,并且在输出缓冲区中包括了复制进去的内容。
cudaMemcpyAsync() 异步,只是放置1个要求,表示在流中履行1次内存复制操作,这个流是通过参数stream来指定的。任何传递给cudaMemcpyAsync() 的主机内存指针必须已通过cudaHostAlloc() 分配好内存。只能以异步方式对页锁定内存进行复制操作。

10.5 使用多个CUDA 流

在任何支持内存复制和核函数的履行相互堆叠的装备上,当使用多个流时,利用程序的整体性能都会提示。
#include "../common/book.h" #define N (1024*1024) #define FULL_DATA_SIZE (N*20) __global__ void kernel( int *a, int *b, int *c ) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main( void ) { cudaDeviceProp prop; int whichDevice; HANDLE_ERROR( cudaGetDevice( &whichDevice ) ); HANDLE_ERROR( cudaGetDeviceProperties( &prop, whichDevice ) ); if (!prop.deviceOverlap) { printf( "Device will not handle overlaps, so no speed up from streams\n" ); return 0; } cudaEvent_t start, stop; float elapsedTime; cudaStream_t stream0, stream1; int *host_a, *host_b, *host_c; int *dev_a0, *dev_b0, *dev_c0; int *dev_a1, *dev_b1, *dev_c1; // start the timers HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); // initialize the streams HANDLE_ERROR( cudaStreamCreate( &stream0 ) ); HANDLE_ERROR( cudaStreamCreate( &stream1 ) ); // allocate the memory on the GPU HANDLE_ERROR( cudaMalloc( (void**)&dev_a0, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b0, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_c0, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_a1, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b1, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_c1, N * sizeof(int) ) ); // allocate host locked memory, used to stream HANDLE_ERROR( cudaHostAlloc( (void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); HANDLE_ERROR( cudaHostAlloc( (void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); HANDLE_ERROR( cudaHostAlloc( (void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); for (int i=0; i<FULL_DATA_SIZE; i++) { host_a[i] = rand(); host_b[i] = rand(); } HANDLE_ERROR( cudaEventRecord( start, 0 ) ); // now loop over full data, in bite-sized chunks for (int i=0; i<FULL_DATA_SIZE; i+= N*2) { // copy the locked memory to the device, async HANDLE_ERROR( cudaMemcpyAsync( dev_a0, host_a+i, N * sizeof(int), cudaMemcpyHostToDevice, stream0 ) ); HANDLE_ERROR( cudaMemcpyAsync( dev_b0, host_b+i, N * sizeof(int), cudaMemcpyHostToDevice, stream0 ) ); kernel<<<N/256,256,0,stream0>>>( dev_a0, dev_b0, dev_c0 ); // copy the data from device to locked memory HANDLE_ERROR( cudaMemcpyAsync( host_c+i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0 ) ); // copy the locked memory to the device, async HANDLE_ERROR( cudaMemcpyAsync( dev_a1, host_a+i+N, N * sizeof(int), cudaMemcpyHostToDevice, stream1 ) ); HANDLE_ERROR( cudaMemcpyAsync( dev_b1, host_b+i+N, N * sizeof(int), cudaMemcpyHostToDevice, stream1 ) ); kernel<<<N/256,256,0,stream1>>>( dev_a1, dev_b1, dev_c1 ); // copy the data from device to locked memory HANDLE_ERROR( cudaMemcpyAsync( host_c+i+N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1 ) ); } HANDLE_ERROR( cudaStreamSynchronize( stream0 ) ); HANDLE_ERROR( cudaStreamSynchronize( stream1 ) ); HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) ); printf( "Time taken: %3.1f ms\n", elapsedTime ); // cleanup the streams and memory HANDLE_ERROR( cudaFreeHost( host_a ) ); HANDLE_ERROR( cudaFreeHost( host_b ) ); HANDLE_ERROR( cudaFreeHost( host_c ) ); HANDLE_ERROR( cudaFree( dev_a0 ) ); HANDLE_ERROR( cudaFree( dev_b0 ) ); HANDLE_ERROR( cudaFree( dev_c0 ) ); HANDLE_ERROR( cudaFree( dev_a1 ) ); HANDLE_ERROR( cudaFree( dev_b1 ) ); HANDLE_ERROR( cudaFree( dev_c1 ) ); HANDLE_ERROR( cudaStreamDestroy( stream0 ) ); HANDLE_ERROR( cudaStreamDestroy( stream1 ) ); return 0; }

10.6 GPU的工作调度机制

流可以看作是:有序的操作序列,其中包括内存复制操作、核函数调用。硬件中没有流的概念,而是包括1个或多个引擎来履行内存复制操作,和1个引擎来履行核函数。这些引擎彼此独立的对操作进行排队。

10.7 高效地使用多个CUDA流

如果同时调度某个流的所有操作,那末容易在无意中阻塞另外一个流的复制操作或核函数履行。解决这个问题,在将操作放入流的队列时应当采取宽度优先方式,而非深度优先方式。也就是说,将这两个流之间的操作交叉添加。
#include "../common/book.h" #define N (1024*1024) #define FULL_DATA_SIZE (N*20) __global__ void kernel( int *a, int *b, int *c ) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main( void ) { cudaDeviceProp prop; int whichDevice; HANDLE_ERROR( cudaGetDevice( &whichDevice ) ); HANDLE_ERROR( cudaGetDeviceProperties( &prop, whichDevice ) ); if (!prop.deviceOverlap) { printf( "Device will not handle overlaps, so no speed up from streams\n" ); return 0; } cudaEvent_t start, stop; float elapsedTime; cudaStream_t stream0, stream1; int *host_a, *host_b, *host_c; int *dev_a0, *dev_b0, *dev_c0; int *dev_a1, *dev_b1, *dev_c1; // start the timers HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); // initialize the streams HANDLE_ERROR( cudaStreamCreate( &stream0 ) ); HANDLE_ERROR( cudaStreamCreate( &stream1 ) ); // allocate the memory on the GPU HANDLE_ERROR( cudaMalloc( (void**)&dev_a0, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b0, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_c0, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_a1, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b1, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_c1, N * sizeof(int) ) ); // allocate host locked memory, used to stream HANDLE_ERROR( cudaHostAlloc( (void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); HANDLE_ERROR( cudaHostAlloc( (void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); HANDLE_ERROR( cudaHostAlloc( (void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); for (int i=0; i<FULL_DATA_SIZE; i++) { host_a[i] = rand(); host_b[i] = rand(); } HANDLE_ERROR( cudaEventRecord( start, 0 ) ); // now loop over full data, in bite-sized chunks for (int i=0; i<FULL_DATA_SIZE; i+= N*2) { // enqueue copies of a in stream0 and stream1 HANDLE_ERROR( cudaMemcpyAsync( dev_a0, host_a+i, N * sizeof(int), cudaMemcpyHostToDevice, stream0 ) ); HANDLE_ERROR( cudaMemcpyAsync( dev_a1, host_a+i+N, N * sizeof(int), cudaMemcpyHostToDevice, stream1 ) ); // enqueue copies of b in stream0 and stream1 HANDLE_ERROR( cudaMemcpyAsync( dev_b0, host_b+i, N * sizeof(int), cudaMemcpyHostToDevice, stream0 ) ); HANDLE_ERROR( cudaMemcpyAsync( dev_b1, host_b+i+N, N * sizeof(int), cudaMemcpyHostToDevice, stream1 ) ); // enqueue kernels in stream0 and stream1 kernel<<<N/256,256,0,stream0>>>( dev_a0, dev_b0, dev_c0 ); kernel<<<N/256,256,0,stream1>>>( dev_a1, dev_b1, dev_c1 ); // enqueue copies of c from device to locked memory HANDLE_ERROR( cudaMemcpyAsync( host_c+i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0 ) ); HANDLE_ERROR( cudaMemcpyAsync( host_c+i+N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1 ) ); } HANDLE_ERROR( cudaStreamSynchronize( stream0 ) ); HANDLE_ERROR( cudaStreamSynchronize( stream1 ) ); HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) ); printf( "Time taken: %3.1f ms\n", elapsedTime ); // cleanup the streams and memory HANDLE_ERROR( cudaFreeHost( host_a ) ); HANDLE_ERROR( cudaFreeHost( host_b ) ); HANDLE_ERROR( cudaFreeHost( host_c ) ); HANDLE_ERROR( cudaFree( dev_a0 ) ); HANDLE_ERROR( cudaFree( dev_b0 ) ); HANDLE_ERROR( cudaFree( dev_c0 ) ); HANDLE_ERROR( cudaFree( dev_a1 ) ); HANDLE_ERROR( cudaFree( dev_b1 ) ); HANDLE_ERROR( cudaFree( dev_c1 ) ); HANDLE_ERROR( cudaStreamDestroy( stream0 ) ); HANDLE_ERROR( cudaStreamDestroy( stream1 ) ); return 0; }
这个速度更快。
生活不易,码农辛苦
如果您觉得本网站对您的学习有所帮助,可以手机扫描二维码进行捐赠
程序员人生
------分隔线----------------------------
分享到:
------分隔线----------------------------
关闭
程序员人生