GPU CUDA编程中threadIdx, blockIdx, blockDim, gridDim之间的区别与联系

本文主要是介绍GPU CUDA编程中threadIdx, blockIdx, blockDim, gridDim之间的区别与联系,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!

在启动kernel的时候,要通过指定gridsize和blocksize才行,举下面的例子说说:
dim3 gridsize(2,2);
dim3 blocksize(4,4);
gridsize相当于是一个2*2的block,gridDim.x,gridDim.y,gridDim.z相当于这个dim3的x,y,z方向的维度,这里是2*2*1。序号从0到3,且是从上到下的顺序,就是说是下面的情况:
grid中的blockidx序号标注情况为:

0	21	3

blocksize则是指里面的thread的情况,blockDim.x,blockDim.y,blockDim.z相当于这个dim3的x,y,z方向的维度,这里是4*4*1.序号是0-15,也是从上到下的标注:
block中的threadidx序号标注情况:

0	4	8	12
1	5	9	13
2	6	10	14
4	7	11	15

具体:
threadIdx是一个uint3类型,表示一个线程的索引。
blockIdx是一个uint3类型,表示一个线程块的索引,一个线程块中通常有多个线程。
blockDim是一个dim3类型,表示线程块的大小。
gridDim是一个dim3类型,表示网格的大小,一个网格中通常有多个线程块。
下面这张图比较清晰的表示的几个概念的关系:
在这里插入图片描述
cuda 通过<<< >>>符号来分配索引线程的方式,我知道的一共有15种索引方式。
在这里插入图片描述

#include "cuda_runtime.h"
#include "device_launch_parameters.h"#include <stdio.h>
#include <stdlib.h>
#include <iostream>using namespace std;//thread 1D
__global__ void testThread1(int *c, const int *a, const int *b)
{int i = threadIdx.x;c[i] = b[i] - a[i];
}
//thread 2D
__global__ void testThread2(int *c, const int *a, const int *b)
{int i = threadIdx.x + threadIdx.y*blockDim.x;c[i] = b[i] - a[i];
}//thread 3D
__global__ void testThread3(int *c, const int *a, const int *b)
{int i = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.x*blockDim.y;c[i] = b[i] - a[i];
}//block 1D
__global__ void testBlock1(int *c, const int *a, const int *b)
{int i = blockIdx.x;c[i] = b[i] - a[i];
}//block 2D
__global__ void testBlock2(int *c, const int *a, const int *b)
{int i = blockIdx.x + blockIdx.y*gridDim.x;c[i] = b[i] - a[i];
}//block 3D
__global__ void testBlock3(int *c, const int *a, const int *b)
{int i = blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y;c[i] = b[i] - a[i];
}//block-thread 1D-1D
__global__ void testBlockThread1(int *c, const int *a, const int *b)
{int i = threadIdx.x + blockDim.x*blockIdx.x;c[i] = b[i] - a[i];
}//block-thread 1D-2D
__global__ void testBlockThread2(int *c, const int *a, const int *b)
{int threadId_2D = threadIdx.x + threadIdx.y*blockDim.x;int i = threadId_2D+ (blockDim.x*blockDim.y)*blockIdx.x;c[i] = b[i] - a[i];
}//block-thread 1D-3D
__global__ void testBlockThread3(int *c, const int *a, const int *b)
{int threadId_3D = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.x*blockDim.y;int i = threadId_3D + (blockDim.x*blockDim.y*blockDim.z)*blockIdx.x;c[i] = b[i] - a[i];
}//block-thread 2D-1D
__global__ void testBlockThread4(int *c, const int *a, const int *b)
{int blockId_2D = blockIdx.x + blockIdx.y*gridDim.x;int i = threadIdx.x + blockDim.x*blockId_2D;c[i] = b[i] - a[i];
}//block-thread 3D-1D
__global__ void testBlockThread5(int *c, const int *a, const int *b)
{int blockId_3D = blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y;int i = threadIdx.x + blockDim.x*blockId_3D;c[i] = b[i] - a[i];
}//block-thread 2D-2D
__global__ void testBlockThread6(int *c, const int *a, const int *b)
{int threadId_2D = threadIdx.x + threadIdx.y*blockDim.x;int blockId_2D = blockIdx.x + blockIdx.y*gridDim.x;int i = threadId_2D + (blockDim.x*blockDim.y)*blockId_2D;c[i] = b[i] - a[i];
}//block-thread 2D-3D
__global__ void testBlockThread7(int *c, const int *a, const int *b)
{int threadId_3D = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.x*blockDim.y;int blockId_2D = blockIdx.x + blockIdx.y*gridDim.x;int i = threadId_3D + (blockDim.x*blockDim.y*blockDim.z)*blockId_2D;c[i] = b[i] - a[i];
}//block-thread 3D-2D
__global__ void testBlockThread8(int *c, const int *a, const int *b)
{int threadId_2D = threadIdx.x + threadIdx.y*blockDim.x;int blockId_3D = blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y;int i = threadId_2D + (blockDim.x*blockDim.y)*blockId_3D;c[i] = b[i] - a[i];
}//block-thread 3D-3D
__global__ void testBlockThread9(int *c, const int *a, const int *b)
{int threadId_3D = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.x*blockDim.y;int blockId_3D = blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y;int i = threadId_3D + (blockDim.x*blockDim.y*blockDim.z)*blockId_3D;c[i] = b[i] - a[i];
}void addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{int *dev_a = 0;int *dev_b = 0;int *dev_c = 0;cudaSetDevice(0);cudaMalloc((void**)&dev_c, size * sizeof(int));cudaMalloc((void**)&dev_a, size * sizeof(int));cudaMalloc((void**)&dev_b, size * sizeof(int));cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);testThread1<<<1, size>>>(dev_c, dev_a, dev_b);//testThread1<<<1, size>>>(dev_c, dev_a, dev_b);//uint3 s;s.x = size/5;s.y = 5;s.z = 1;//testThread2 <<<1,s>>>(dev_c, dev_a, dev_b);//uint3 s; s.x = size / 10; s.y = 5; s.z = 2;//testThread3<<<1, s >>>(dev_c, dev_a, dev_b);//testBlock1<<<size,1 >>>(dev_c, dev_a, dev_b);//uint3 s; s.x = size / 5; s.y = 5; s.z = 1;//testBlock2<<<s, 1 >>>(dev_c, dev_a, dev_b);//uint3 s; s.x = size / 10; s.y = 5; s.z = 2;//testBlock3<<<s, 1 >>>(dev_c, dev_a, dev_b);//testBlockThread1<<<size/10, 10>>>(dev_c, dev_a, dev_b);//uint3 s1; s1.x = size / 100; s1.y = 1; s1.z = 1;//uint3 s2; s2.x = 10; s2.y = 10; s2.z = 1;//testBlockThread2 << <s1, s2 >> >(dev_c, dev_a, dev_b);//uint3 s1; s1.x = size / 100; s1.y = 1; s1.z = 1;//uint3 s2; s2.x = 10; s2.y = 5; s2.z = 2;//testBlockThread3 << <s1, s2 >> >(dev_c, dev_a, dev_b);//uint3 s1; s1.x = 10; s1.y = 10; s1.z = 1;//uint3 s2; s2.x = size / 100; s2.y = 1; s2.z = 1;//testBlockThread4 << <s1, s2 >> >(dev_c, dev_a, dev_b);//uint3 s1; s1.x = 10; s1.y = 5; s1.z = 2;//uint3 s2; s2.x = size / 100; s2.y = 1; s2.z = 1;//testBlockThread5 << <s1, s2 >> >(dev_c, dev_a, dev_b);//uint3 s1; s1.x = size / 100; s1.y = 10; s1.z = 1;//uint3 s2; s2.x = 5; s2.y = 2; s2.z = 1;//testBlockThread6 << <s1, s2 >> >(dev_c, dev_a, dev_b);//uint3 s1; s1.x = size / 100; s1.y = 5; s1.z = 1;//uint3 s2; s2.x = 5; s2.y = 2; s2.z = 2;//testBlockThread7 << <s1, s2 >> >(dev_c, dev_a, dev_b);//uint3 s1; s1.x = 5; s1.y = 2; s1.z = 2;//uint3 s2; s2.x = size / 100; s2.y = 5; s2.z = 1;//testBlockThread8 <<<s1, s2 >>>(dev_c, dev_a, dev_b);//uint3 s1; s1.x = 5; s1.y = 2; s1.z = 2;//uint3 s2; s2.x = size / 200; s2.y = 5; s2.z = 2;//testBlockThread9<<<s1, s2 >>>(dev_c, dev_a, dev_b);cudaMemcpy(c, dev_c, size*sizeof(int), cudaMemcpyDeviceToHost);cudaFree(dev_a);cudaFree(dev_b);cudaFree(dev_c);cudaGetLastError();}
int main()
{const int n = 1000;int *a = new int[n];int *b = new int[n];int *c = new int[n];int *cc = new int[n];for (int i = 0; i < n; i++){a[i] = rand() % 100;b[i] = rand() % 100;c[i] = b[i] - a[i];}addWithCuda(cc, a, b, n);for (int i = 0; i < n; i++)printf("%d %d\n", c[i], cc[i]);delete[] a;delete[] b;delete[] c;delete[] cc; return 0;}

参考:https://www.cnblogs.com/rainbow70626/p/6498738.html?utm_source=itdadao&utm_medium=referral

https://www.cnblogs.com/tiandsp/p/9458734.html

这篇关于GPU CUDA编程中threadIdx, blockIdx, blockDim, gridDim之间的区别与联系的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!



http://www.chinasem.cn/article/870048

相关文章

Vue和React受控组件的区别小结

《Vue和React受控组件的区别小结》本文主要介绍了Vue和React受控组件的区别小结,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友们下面随着小编来一起学... 目录背景React 的实现vue3 的实现写法一:直接修改事件参数写法二:通过ref引用 DOMVu

Go之errors.New和fmt.Errorf 的区别小结

《Go之errors.New和fmt.Errorf的区别小结》本文主要介绍了Go之errors.New和fmt.Errorf的区别,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考... 目录error的基本用法1. 获取错误信息2. 在条件判断中使用基本区别1.函数签名2.使用场景详细对

Python异步编程之await与asyncio基本用法详解

《Python异步编程之await与asyncio基本用法详解》在Python中,await和asyncio是异步编程的核心工具,用于高效处理I/O密集型任务(如网络请求、文件读写、数据库操作等),接... 目录一、核心概念二、使用场景三、基本用法1. 定义协程2. 运行协程3. 并发执行多个任务四、关键

Redis中哨兵机制和集群的区别及说明

《Redis中哨兵机制和集群的区别及说明》Redis哨兵通过主从复制实现高可用,适用于中小规模数据;集群采用分布式分片,支持动态扩展,适合大规模数据,哨兵管理简单但扩展性弱,集群性能更强但架构复杂,根... 目录一、架构设计与节点角色1. 哨兵机制(Sentinel)2. 集群(Cluster)二、数据分片

AOP编程的基本概念与idea编辑器的配合体验过程

《AOP编程的基本概念与idea编辑器的配合体验过程》文章简要介绍了AOP基础概念,包括Before/Around通知、PointCut切入点、Advice通知体、JoinPoint连接点等,说明它们... 目录BeforeAroundAdvise — 通知PointCut — 切入点Acpect — 切面

一文带你迅速搞懂路由器/交换机/光猫三者概念区别

《一文带你迅速搞懂路由器/交换机/光猫三者概念区别》讨论网络设备时,常提及路由器、交换机及光猫等词汇,日常生活、工作中,这些设备至关重要,居家上网、企业内部沟通乃至互联网冲浪皆无法脱离其影响力,本文将... 当谈论网络设备时,我们常常会听到路由器、交换机和光猫这几个名词。它们是构建现代网络基础设施的关键组成

redis和redission分布式锁原理及区别说明

《redis和redission分布式锁原理及区别说明》文章对比了synchronized、乐观锁、Redis分布式锁及Redission锁的原理与区别,指出在集群环境下synchronized失效,... 目录Redis和redission分布式锁原理及区别1、有的同伴想到了synchronized关键字

C#异步编程ConfigureAwait的使用小结

《C#异步编程ConfigureAwait的使用小结》本文介绍了异步编程在GUI和服务器端应用的优势,详细的介绍了async和await的关键作用,通过实例解析了在UI线程正确使用await.Conf... 异步编程是并发的一种形式,它有两大好处:对于面向终端用户的GUI程序,提高了响应能力对于服务器端应

Java中数组与栈和堆之间的关系说明

《Java中数组与栈和堆之间的关系说明》文章讲解了Java数组的初始化方式、内存存储机制、引用传递特性及遍历、排序、拷贝技巧,强调引用数据类型方法调用时形参可能修改实参,但需注意引用指向单一对象的特性... 目录Java中数组与栈和堆的关系遍历数组接下来是一些编程小技巧总结Java中数组与栈和堆的关系关于

C# async await 异步编程实现机制详解

《C#asyncawait异步编程实现机制详解》async/await是C#5.0引入的语法糖,它基于**状态机(StateMachine)**模式实现,将异步方法转换为编译器生成的状态机类,本... 目录一、async/await 异步编程实现机制1.1 核心概念1.2 编译器转换过程1.3 关键组件解析