mirror of
https://github.com/stevenrobertson/cuburn.git
synced 2025-02-05 11:40:04 -05:00
Done. The Boost version is much faster, alas.
This commit is contained in:
parent
83704dd303
commit
72dbae1ebe
67
sortbench.cu
67
sortbench.cu
@ -36,8 +36,8 @@ void prefix_scan_8_0_shmem(unsigned char *keys, int nitems, int *pfxs) {
|
|||||||
#define BLKSZ 512
|
#define BLKSZ 512
|
||||||
|
|
||||||
__global__
|
__global__
|
||||||
void prefix_scan(unsigned short *offsets, int *pfxs,
|
void prefix_scan_8_0(unsigned short *offsets, int *pfxs,
|
||||||
const unsigned short *keys, const int shift) {
|
const unsigned short *keys) {
|
||||||
const int tid = threadIdx.x;
|
const int tid = threadIdx.x;
|
||||||
__shared__ int shr_pfxs[RDXSZ];
|
__shared__ int shr_pfxs[RDXSZ];
|
||||||
|
|
||||||
@ -49,7 +49,7 @@ void prefix_scan(unsigned short *offsets, int *pfxs,
|
|||||||
// TODO: compiler smart enough to turn this into a BFE?
|
// TODO: compiler smart enough to turn this into a BFE?
|
||||||
// TODO: should this just be two functions with fixed shifts?
|
// TODO: should this just be two functions with fixed shifts?
|
||||||
// TODO: separate or integrated loop vars? unrolling?
|
// TODO: separate or integrated loop vars? unrolling?
|
||||||
int value = (keys[i] >> shift) & 0xff;
|
int value = keys[i] & 0xff;
|
||||||
offsets[i] = atomicAdd(shr_pfxs + value, 1);
|
offsets[i] = atomicAdd(shr_pfxs + value, 1);
|
||||||
i += BLKSZ;
|
i += BLKSZ;
|
||||||
}
|
}
|
||||||
@ -58,6 +58,61 @@ void prefix_scan(unsigned short *offsets, int *pfxs,
|
|||||||
if (tid < RDXSZ) pfxs[tid + RDXSZ * blockIdx.x] = shr_pfxs[tid];
|
if (tid < RDXSZ) pfxs[tid + RDXSZ * blockIdx.x] = shr_pfxs[tid];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__global__
|
||||||
|
void prefix_scan_8_8(unsigned short *offsets, int *pfxs,
|
||||||
|
const unsigned short *keys) {
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
const int blk_offset = GRPSZ * blockIdx.x;
|
||||||
|
__shared__ int shr_pfxs[RDXSZ];
|
||||||
|
__shared__ int shr_lo_radix;
|
||||||
|
__shared__ int shr_rerun;
|
||||||
|
|
||||||
|
if (tid < RDXSZ) {
|
||||||
|
shr_pfxs[tid] = 0;
|
||||||
|
if (tid == 0) {
|
||||||
|
shr_lo_radix = keys[GRPSZ * blockIdx.x] & 0xff;
|
||||||
|
shr_rerun = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
int ran = 0;
|
||||||
|
|
||||||
|
int i = tid;
|
||||||
|
while (i < GRPSZ) {
|
||||||
|
int value = keys[i + blk_offset];
|
||||||
|
int lo_radix = value & 0xff;
|
||||||
|
if (shr_lo_radix < lo_radix) {
|
||||||
|
shr_rerun = 1;
|
||||||
|
} else if (shr_lo_radix == lo_radix) {
|
||||||
|
int radix = (value >> 8) & 0xff;
|
||||||
|
offsets[i + blk_offset] = atomicAdd(shr_pfxs + radix, 1);
|
||||||
|
ran = 1;
|
||||||
|
} else if (shr_lo_radix > lo_radix && !ran) {
|
||||||
|
// For reasons I have yet to bother assessing, the optimizer
|
||||||
|
// mangles this function unless it also includes code that runs on
|
||||||
|
// this case. This code should never actually run, though. In
|
||||||
|
// fact, 'ran' could be eliminated entirely, but for this.
|
||||||
|
offsets[i] = offsets[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
if (shr_rerun) {
|
||||||
|
if (tid == 0) {
|
||||||
|
shr_lo_radix += 1;
|
||||||
|
shr_rerun = 0;
|
||||||
|
}
|
||||||
|
__syncthreads();
|
||||||
|
} else {
|
||||||
|
i += blockDim.x;
|
||||||
|
ran = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
if (tid < RDXSZ) pfxs[tid + RDXSZ * blockIdx.x] = shr_pfxs[tid];
|
||||||
|
}
|
||||||
|
|
||||||
__global__
|
__global__
|
||||||
void prefix_scan_8_0_shmem_shortseg(unsigned char *keys, int *pfxs) {
|
void prefix_scan_8_0_shmem_shortseg(unsigned char *keys, int *pfxs) {
|
||||||
const int tid = threadIdx.y * 32 + threadIdx.x;
|
const int tid = threadIdx.y * 32 + threadIdx.x;
|
||||||
@ -263,7 +318,11 @@ void convert_offsets(
|
|||||||
for (int i = tid; i < GRPSZ; i += BLKSZ) {
|
for (int i = tid; i < GRPSZ; i += BLKSZ) {
|
||||||
int r = (keys[blk_offset + i] >> shift) & 0xff;
|
int r = (keys[blk_offset + i] >> shift) & 0xff;
|
||||||
int o = shr_split[r] + offsets[blk_offset + i];
|
int o = shr_split[r] + offsets[blk_offset + i];
|
||||||
shr_offsets[o] = i;
|
if (o < GRPSZ)
|
||||||
|
shr_offsets[o] = i;
|
||||||
|
else
|
||||||
|
printf("\nWTF b:%4x i:%4x r:%2x o:%4x s:%4x og:%4x",
|
||||||
|
blockIdx.x, i, r, o, shr_split[r], offsets[blk_offset+i]);
|
||||||
}
|
}
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
|
|
||||||
|
35
sortbench.py
35
sortbench.py
@ -14,7 +14,7 @@ os.environ['PATH'] = ('/usr/x86_64-pc-linux-gnu/gcc-bin/4.4.6:'
|
|||||||
i32 = np.int32
|
i32 = np.int32
|
||||||
|
|
||||||
with open('sortbench.cu') as f: src = f.read()
|
with open('sortbench.cu') as f: src = f.read()
|
||||||
mod = pycuda.compiler.SourceModule(src, keep=True)
|
mod = pycuda.compiler.SourceModule(src, keep=True, options=[])
|
||||||
|
|
||||||
def launch(name, *args, **kwargs):
|
def launch(name, *args, **kwargs):
|
||||||
fun = mod.get_function(name)
|
fun = mod.get_function(name)
|
||||||
@ -103,9 +103,9 @@ def py_radix_sort_maybe(keys, offsets, pfxs, split, shift):
|
|||||||
def go_sort(count, stream=None):
|
def go_sort(count, stream=None):
|
||||||
grids = count / 8192
|
grids = count / 8192
|
||||||
|
|
||||||
#keys = np.fromstring(np.random.bytes(count*2), dtype=np.uint16)
|
keys = np.fromstring(np.random.bytes(count*2), dtype=np.uint16)
|
||||||
keys = np.arange(count, dtype=np.uint16)
|
#keys = np.arange(count, dtype=np.uint16)
|
||||||
np.random.shuffle(keys)
|
#np.random.shuffle(keys)
|
||||||
mkeys = np.reshape(keys, (grids, 8192))
|
mkeys = np.reshape(keys, (grids, 8192))
|
||||||
vals = np.arange(count, dtype=np.uint32)
|
vals = np.arange(count, dtype=np.uint32)
|
||||||
dkeys = cuda.to_device(keys)
|
dkeys = cuda.to_device(keys)
|
||||||
@ -114,9 +114,8 @@ def go_sort(count, stream=None):
|
|||||||
|
|
||||||
dpfxs = cuda.mem_alloc(grids * 256 * 4)
|
dpfxs = cuda.mem_alloc(grids * 256 * 4)
|
||||||
doffsets = cuda.mem_alloc(count * 2)
|
doffsets = cuda.mem_alloc(count * 2)
|
||||||
launch('prefix_scan', doffsets, dpfxs, dkeys, i32(0),
|
launch('prefix_scan_8_0', doffsets, dpfxs, dkeys,
|
||||||
block=(512, 1, 1), grid=(grids, 1), stream=stream, l1=1)
|
block=(512, 1, 1), grid=(grids, 1), stream=stream, l1=1)
|
||||||
print cuda.from_device(dpfxs, (2, 256), np.uint32)
|
|
||||||
|
|
||||||
dsplit = cuda.mem_alloc(grids * 256 * 4)
|
dsplit = cuda.mem_alloc(grids * 256 * 4)
|
||||||
launch('better_split', dsplit, dpfxs,
|
launch('better_split', dsplit, dpfxs,
|
||||||
@ -125,7 +124,6 @@ def go_sort(count, stream=None):
|
|||||||
# This stage will be rejiggered along with the split
|
# This stage will be rejiggered along with the split
|
||||||
launch('prefix_sum', dpfxs, np.int32(grids * 256),
|
launch('prefix_sum', dpfxs, np.int32(grids * 256),
|
||||||
block=(256, 1, 1), grid=(1, 1), stream=stream, l1=1)
|
block=(256, 1, 1), grid=(1, 1), stream=stream, l1=1)
|
||||||
print cuda.from_device(dpfxs, (2, 256), np.uint32)
|
|
||||||
|
|
||||||
launch('convert_offsets', doffsets, dsplit, dkeys, i32(0),
|
launch('convert_offsets', doffsets, dsplit, dkeys, i32(0),
|
||||||
block=(1024, 1, 1), grid=(grids, 1), stream=stream)
|
block=(1024, 1, 1), grid=(grids, 1), stream=stream)
|
||||||
@ -134,7 +132,7 @@ def go_sort(count, stream=None):
|
|||||||
split = cuda.from_device(dsplit, (grids, 256), np.uint32)
|
split = cuda.from_device(dsplit, (grids, 256), np.uint32)
|
||||||
pfxs = cuda.from_device(dpfxs, (grids, 256), np.uint32)
|
pfxs = cuda.from_device(dpfxs, (grids, 256), np.uint32)
|
||||||
tkeys = py_radix_sort_maybe(mkeys, offsets, pfxs, split, 0)
|
tkeys = py_radix_sort_maybe(mkeys, offsets, pfxs, split, 0)
|
||||||
print frle(tkeys & 0xff)
|
#print frle(tkeys & 0xff)
|
||||||
|
|
||||||
d_skeys = cuda.mem_alloc(count * 2)
|
d_skeys = cuda.mem_alloc(count * 2)
|
||||||
d_svals = cuda.mem_alloc(count * 4)
|
d_svals = cuda.mem_alloc(count * 4)
|
||||||
@ -157,8 +155,6 @@ def go_sort(count, stream=None):
|
|||||||
else:
|
else:
|
||||||
print 'FAIL'
|
print 'FAIL'
|
||||||
|
|
||||||
print frle(skeys & 0xff)
|
|
||||||
|
|
||||||
dkeys, d_skeys = d_skeys, dkeys
|
dkeys, d_skeys = d_skeys, dkeys
|
||||||
dvals, d_svals = d_svals, dvals
|
dvals, d_svals = d_svals, dvals
|
||||||
|
|
||||||
@ -166,13 +162,14 @@ def go_sort(count, stream=None):
|
|||||||
cuda.memset_d32(d_skeys, 0, count/2)
|
cuda.memset_d32(d_skeys, 0, count/2)
|
||||||
cuda.memset_d32(d_svals, 0xffffffff, count)
|
cuda.memset_d32(d_svals, 0xffffffff, count)
|
||||||
|
|
||||||
launch('prefix_scan', doffsets, dpfxs, dkeys, i32(8),
|
launch('prefix_scan_8_8', doffsets, dpfxs, dkeys,
|
||||||
block=(512, 1, 1), grid=(grids, 1), stream=stream, l1=1)
|
block=(512, 1, 1), grid=(grids, 1), stream=stream, l1=1)
|
||||||
launch('better_split', dsplit, dpfxs,
|
launch('better_split', dsplit, dpfxs,
|
||||||
block=(32, 1, 1), grid=(grids / 32, 1), stream=stream)
|
block=(32, 1, 1), grid=(grids / 32, 1), stream=stream)
|
||||||
launch('prefix_sum', dpfxs, np.int32(grids * 256),
|
launch('prefix_sum', dpfxs, np.int32(grids * 256),
|
||||||
block=(256, 1, 1), grid=(1, 1), stream=stream, l1=1)
|
block=(256, 1, 1), grid=(1, 1), stream=stream, l1=1)
|
||||||
pre_offsets = cuda.from_device(doffsets, (grids, 8192), np.uint16)
|
if not stream:
|
||||||
|
pre_offsets = cuda.from_device(doffsets, (grids, 8192), np.uint16)
|
||||||
launch('convert_offsets', doffsets, dsplit, dkeys, i32(8),
|
launch('convert_offsets', doffsets, dsplit, dkeys, i32(8),
|
||||||
block=(1024, 1, 1), grid=(grids, 1), stream=stream)
|
block=(1024, 1, 1), grid=(grids, 1), stream=stream)
|
||||||
if not stream:
|
if not stream:
|
||||||
@ -182,13 +179,9 @@ def go_sort(count, stream=None):
|
|||||||
tkeys = np.reshape(tkeys, (grids, 8192))
|
tkeys = np.reshape(tkeys, (grids, 8192))
|
||||||
|
|
||||||
new_offs = py_convert_offsets(pre_offsets, split, tkeys, 8)
|
new_offs = py_convert_offsets(pre_offsets, split, tkeys, 8)
|
||||||
print new_offs[:3]
|
|
||||||
print offsets[:3]
|
|
||||||
print np.nonzero(new_offs != offsets)
|
print np.nonzero(new_offs != offsets)
|
||||||
|
|
||||||
fkeys = py_radix_sort_maybe(tkeys, new_offs, pfxs, split, 8)
|
fkeys = py_radix_sort_maybe(tkeys, new_offs, pfxs, split, 8)
|
||||||
print frle(fkeys)
|
#print frle(fkeys)
|
||||||
|
|
||||||
|
|
||||||
launch('radix_sort_maybe', d_skeys, d_svals,
|
launch('radix_sort_maybe', d_skeys, d_svals,
|
||||||
dkeys, dvals, doffsets, dpfxs, dsplit, i32(8),
|
dkeys, dvals, doffsets, dpfxs, dsplit, i32(8),
|
||||||
@ -213,11 +206,7 @@ def go_sort(count, stream=None):
|
|||||||
# correctness, so this test should be made "soft".)
|
# correctness, so this test should be made "soft".)
|
||||||
print 'Order: ', 'pass' if np.all(skeys == sorted_keys) else 'FAIL'
|
print 'Order: ', 'pass' if np.all(skeys == sorted_keys) else 'FAIL'
|
||||||
|
|
||||||
print frle(skeys)
|
#print frle(skeys, 5120)
|
||||||
print fkeys
|
|
||||||
print skeys
|
|
||||||
print np.nonzero(fkeys != skeys)[0]
|
|
||||||
|
|
||||||
|
|
||||||
def go_sort_old(count, stream=None):
|
def go_sort_old(count, stream=None):
|
||||||
data = np.fromstring(np.random.bytes(count), dtype=np.uint8)
|
data = np.fromstring(np.random.bytes(count), dtype=np.uint8)
|
||||||
@ -273,7 +262,7 @@ def main():
|
|||||||
#go(1024, 512<<8, False)
|
#go(1024, 512<<8, False)
|
||||||
#go(32768, 8192, False)
|
#go(32768, 8192, False)
|
||||||
stream = cuda.Stream() if '-s' in sys.argv else None
|
stream = cuda.Stream() if '-s' in sys.argv else None
|
||||||
go_sort(1<<20, stream)
|
go_sort(1<<25, stream)
|
||||||
if stream:
|
if stream:
|
||||||
stream.synchronize()
|
stream.synchronize()
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user