Move sortbench to helpers/

This commit is contained in:
Steven Robertson
2011-10-12 14:39:10 -04:00
parent d08b983542
commit 53127ffe7f
2 changed files with 0 additions and 0 deletions

587
helpers/sortbench.cu Normal file
View File

@ -0,0 +1,587 @@
#include <cuda.h>
#include <stdio.h>
#define s(x) #x
__global__
void prefix_scan_8_0_shmem(unsigned char *keys, int nitems, int *pfxs) {
__shared__ int sh_pfxs[256];
if (threadIdx.y < 8)
sh_pfxs[threadIdx.y * 32 + threadIdx.x] = 0;
__syncthreads();
int blksz = blockDim.x * blockDim.y;
int cap = nitems * (blockIdx.x + 1);
for (int i = threadIdx.y * 32 + threadIdx.x + nitems * blockIdx.x;
i < cap; i += blksz) {
int value = keys[i];
atomicAdd(sh_pfxs + value, 1);
}
__syncthreads();
if (threadIdx.y < 8) {
int off = threadIdx.y * 32 + threadIdx.x;
atomicAdd(pfxs + off, sh_pfxs[off]);
}
}
#define GRP_RDX_FACTOR (GRPSZ / RDXSZ)
#define GRP_BLK_FACTOR (GRPSZ / BLKSZ)
#define GRPSZ 8192
#define RDXSZ 256
#define BLKSZ 512
__global__
void prefix_scan_8_0(unsigned short *offsets, int *pfxs,
const unsigned short *keys) {
const int tid = threadIdx.x;
__shared__ int shr_pfxs[RDXSZ];
if (tid < RDXSZ) shr_pfxs[tid] = 0;
__syncthreads();
int i = tid + GRPSZ * blockIdx.x;
for (int j = 0; j < GRP_BLK_FACTOR; j++) {
// TODO: compiler smart enough to turn this into a BFE?
// TODO: should this just be two functions with fixed shifts?
// TODO: separate or integrated loop vars? unrolling?
int value = keys[i] & 0xff;
offsets[i] = atomicAdd(shr_pfxs + value, 1);
i += BLKSZ;
}
__syncthreads();
if (tid < RDXSZ) pfxs[tid + RDXSZ * blockIdx.x] = shr_pfxs[tid];
}
__global__
void prefix_scan_8_8(unsigned short *offsets, int *pfxs,
const unsigned short *keys) {
const int tid = threadIdx.x;
const int blk_offset = GRPSZ * blockIdx.x;
__shared__ int shr_pfxs[RDXSZ];
__shared__ int shr_lo_radix;
__shared__ int shr_rerun;
if (tid < RDXSZ) {
shr_pfxs[tid] = 0;
if (tid == 0) {
shr_lo_radix = keys[GRPSZ * blockIdx.x] & 0xff;
shr_rerun = 0;
}
}
__syncthreads();
int ran = 0;
int i = tid;
while (i < GRPSZ) {
int value = keys[i + blk_offset];
int lo_radix = value & 0xff;
if (shr_lo_radix < lo_radix) {
shr_rerun = 1;
} else if (shr_lo_radix == lo_radix) {
int radix = (value >> 8) & 0xff;
offsets[i + blk_offset] = atomicAdd(shr_pfxs + radix, 1);
ran = 1;
} else if (shr_lo_radix > lo_radix && !ran) {
// For reasons I have yet to bother assessing, the optimizer
// mangles this function unless it also includes code that runs on
// this case. This code should never actually run, though. In
// fact, 'ran' could be eliminated entirely, but for this.
offsets[i] = offsets[i];
}
__syncthreads();
if (shr_rerun) {
if (tid == 0) {
shr_lo_radix += 1;
shr_rerun = 0;
}
__syncthreads();
} else {
i += blockDim.x;
ran = 0;
}
}
__syncthreads();
if (tid < RDXSZ) pfxs[tid + RDXSZ * blockIdx.x] = shr_pfxs[tid];
}
__global__
void prefix_scan_8_0_shmem_shortseg(unsigned char *keys, int *pfxs) {
const int tid = threadIdx.y * 32 + threadIdx.x;
__shared__ int shr_pfxs[RDXSZ];
if (tid < RDXSZ) shr_pfxs[tid] = 0;
__syncthreads();
// TODO: this introduces a hard upper limit of 512M keys (3GB) sorted in a
// pass. It'll be a while before we get the 8GB cards needed to do this.
int i = tid + GRPSZ * blockIdx.x;
for (int j = 0; j < GRP_BLK_FACTOR; j++) {
int value = keys[i];
atomicAdd(shr_pfxs + value, 1);
i += BLKSZ;
}
__syncthreads();
if (tid < RDXSZ) pfxs[tid + RDXSZ * blockIdx.x] = shr_pfxs[tid];
}
__global__
void crappy_split(int *pfxs, int *pfxs_out) {
const int blksz = 256;
const int tid = threadIdx.y * 32 + threadIdx.x;
int i = blksz * (tid + blockIdx.x * blksz);
int i_bound = i + blksz;
int val = 0;
for (; i < i_bound; i++) {
pfxs_out[i] = val;
val += pfxs[i];
}
}
__global__
void better_split(int *pfxs_out, const int *pfxs) {
// This one must be launched as 32x1, regardless of BLKSZ.
const int tid = threadIdx.x;
const int tid5 = tid << 5;
__shared__ int swap[1024];
int base = RDXSZ * 32 * blockIdx.x;
int value = 0;
// Performs a fast "split" (don't know why I called it that, will rename
// soon). For each entry in pfxs (corresponding to the number of elements
// per radix in a group), this writes the exclusive prefix sum for that
// group. This is in fact a bunch of serial prefix sums in parallel, and
// not a parallel prefix sum.
//
// The contents of 32 group radix counts are loaded in 32-element chunks
// into shared memory, rotated by 1 unit each group to avoid bank
// conflicts. Each thread in the warp sums across each group serially,
// updating the values as it goes, then the results are written coherently
// to global memory.
//
// This leaves the SM underloaded, as this only allows 12 warps per SM. It
// might be better to halve the chunk size and lose some coalescing
// efficiency; need to benchmark. It's a relatively cheap step, though.
for (int j = 0; j < 8; j++) {
int jj = j << 5;
for (int i = 0; i < 32; i++) {
int base_offset = (i << 8) + jj + base + tid;
int swap_offset = (i << 5) + ((i + tid) & 0x1f);
swap[swap_offset] = pfxs[base_offset];
}
#pragma unroll
for (int i = 0; i < 32; i++) {
int swap_offset = tid5 + ((i + tid) & 0x1f);
int tmp = swap[swap_offset];
swap[swap_offset] = value;
value += tmp;
}
for (int i = 0; i < 32; i++) {
int base_offset = (i << 8) + jj + base + tid;
int swap_offset = (i << 5) + ((i + tid) & 0x1f);
pfxs_out[base_offset] = swap[swap_offset];
}
}
}
__global__
void prefix_sum(int *pfxs, const int nitems) {
// Needs optimizing (later). Should be rolled into split.
// Must launch 256 threads.
const int tid = threadIdx.x;
const int blksz = 256;
int val = 0;
for (int i = tid; i < nitems; i += blksz) val += pfxs[i];
// I know there's a better way to implement this summing network,
// but it's not a time-critical piece of code.
__shared__ int sh_pfxs[blksz];
sh_pfxs[tid] = val;
val = 0;
__syncthreads();
// Intentionally exclusive indexing here, val{0} should be 0
for (int i = 0; i < tid; i++) val += sh_pfxs[i];
int i;
for (i = tid; i < nitems; i += blksz) {
int t = pfxs[i];
pfxs[i] = val;
val += t;
}
}
__global__
void sort_8(unsigned char *keys, int *sorted_keys, int *pfxs) {
const int tid = threadIdx.x;
const int blk_offset = GRPSZ * blockIdx.x;
__shared__ int shr_pfxs[RDXSZ];
if (tid < RDXSZ) shr_pfxs[tid] = pfxs[RDXSZ * blockIdx.x + tid];
__syncthreads();
int i = tid;
for (int j = 0; j < GRP_BLK_FACTOR; j++) {
int value = keys[i+blk_offset];
int offset = atomicAdd(shr_pfxs + value, 1);
sorted_keys[offset] = value;
i += BLKSZ;
}
}
#undef BLKSZ
#define BLKSZ 1024
__global__
void sort_8_a(unsigned char *keys, int *sorted_keys,
const int *pfxs, const int *split) {
const int tid = threadIdx.x;
const int blk_offset = GRPSZ * blockIdx.x;
__shared__ int shr_offs[RDXSZ];
__shared__ int defer[GRPSZ];
const int pfx_i = RDXSZ * blockIdx.x + tid;
if (tid < RDXSZ) shr_offs[tid] = split[pfx_i];
__syncthreads();
for (int i = tid; i < GRPSZ; i += BLKSZ) {
int value = keys[i+blk_offset];
int offset = atomicAdd(shr_offs + value, 1);
defer[offset] = value;
}
__syncthreads();
// This calculation is a bit odd.
//
// For a given radix value 'r', shr_offs[r] currently holds the first index
// of the *next* radix in defer[] (i.e. if there are 28 '0'-radix values
// in defer[], shr_offs[0]==28). We want to get back to a normal exclusive
// prefix, so we subtract shr_offs[0] from everything.
//
// In the next block, we want to be able to find the correct position for a
// value in defer[], given that value's index 'i' and its radix 'r'. This
// requires two values: the destination index in sorted_keys[] of the first
// value in the group with radix 'r' (given by pfxs[BASE + r]), and the
// number of radix-'r' values before this one in defer[]. So, ultimately,
// we want an equation in the inner loop below that looks like this:
//
// int dst_offset = pfxs[r] + i - (shr_offs[r] - shr_offs[0]);
// sorted_keys[dst_offset] = defer[i];
//
// Of course, this generates tons of memory lookups and bank conflicts so
// we precombine some of this here.
int off0 = shr_offs[0];
if (tid < RDXSZ) shr_offs[tid] = pfxs[0] - (shr_offs[tid] - off0);
__syncthreads();
int i = tid;
#pragma unroll
for (int j = 0; j < GRP_BLK_FACTOR; j++) {
int value = defer[i];
int offset = shr_offs[value] + i;
sorted_keys[offset] = value;
i += BLKSZ;
}
}
__global__
void convert_offsets(
unsigned short *offsets, // input and output
const int *split,
const unsigned short *keys,
const int shift
) {
const int tid = threadIdx.x;
const int blk_offset = GRPSZ * blockIdx.x;
const int rdx_offset = RDXSZ * blockIdx.x;
__shared__ int shr_offsets[GRPSZ];
__shared__ int shr_split[RDXSZ];
if (tid < RDXSZ) shr_split[tid] = split[rdx_offset + tid];
__syncthreads();
for (int i = tid; i < GRPSZ; i += BLKSZ) {
int r = (keys[blk_offset + i] >> shift) & 0xff;
int o = shr_split[r] + offsets[blk_offset + i];
if (o < GRPSZ)
shr_offsets[o] = i;
else
printf("\nWTF b:%4x i:%4x r:%2x o:%4x s:%4x og:%4x",
blockIdx.x, i, r, o, shr_split[r], offsets[blk_offset+i]);
}
__syncthreads();
for (int i = tid; i < GRPSZ; i += BLKSZ)
offsets[blk_offset + i] = shr_offsets[i];
}
__global__
void radix_sort_maybe(
unsigned short *sorted_keys,
int *sorted_values,
const unsigned short *keys,
const unsigned int *values,
const unsigned short *offsets,
const int *pfxs,
const int *split,
const int shift
) {
const int tid = threadIdx.x;
const int blk_offset = GRPSZ * blockIdx.x;
const int rdx_offset = RDXSZ * blockIdx.x;
__shared__ int shr_offs[RDXSZ];
if (tid < RDXSZ)
shr_offs[tid] = pfxs[rdx_offset + tid] - split[rdx_offset + tid];
__syncthreads();
int i = tid;
for (int j = 0; j < GRP_BLK_FACTOR; j++) {
int offset = offsets[blk_offset + i];
int key = keys[blk_offset + offset];
int radix = (key >> shift) & 0xff;
int glob_offset = shr_offs[radix] + i;
/*if (sorted_values[glob_offset] != 0xffffffff)
printf("\nbad offset pos:%6x off:%4x gloff:%6x key:%4x "
"okey:%4x val:%8x oval:%8x",
i+blk_offset, offset, glob_offset, key,
sorted_keys[glob_offset], sorted_values[glob_offset]);*/
sorted_keys[glob_offset] = key;
sorted_values[glob_offset] = values[blk_offset + offset];
i += BLKSZ;
}
}
__global__
void radix_sort(unsigned short *sorted_keys, int *sorted_values,
const unsigned short *keys, const unsigned int *values,
const int *pfxs, const int *offsets, const int *split,
const int shift) {
const int tid = threadIdx.x;
const int blk_offset = GRPSZ * blockIdx.x;
__shared__ int shr_offs[RDXSZ];
__shared__ int defer[GRPSZ];
__shared__ unsigned char radishes[GRPSZ];
const int pfx_i = RDXSZ * blockIdx.x + tid;
if (tid < RDXSZ) shr_offs[tid] = split[pfx_i];
__syncthreads();
for (int i = tid; i < GRPSZ; i += BLKSZ) {
int idx = i + blk_offset;
int value = keys[idx];
int radix = radishes[i] = (value >> shift) & 0xff;
int offset = offsets[idx] + split[radix];
defer[offset] = value;
}
__syncthreads();
if (tid < RDXSZ) shr_offs[tid] = pfxs[tid] - shr_offs[tid];
__syncthreads();
// Faster to reload these or to recompute them in shmem? Need to see if we
// can safely stash both
int i = tid;
#pragma unroll
for (int j = 0; j < GRP_BLK_FACTOR; j++) {
int value = defer[i];
int offset = shr_offs[value] + i;
sorted_keys[offset] = value;
i += BLKSZ;
}
}
__global__
void prefix_scan_8_0_shmem_lessconf(unsigned char *keys, int nitems, int *pfxs) {
__shared__ int sh_pfxs_banked[256][32];
for (int i = threadIdx.y; i < 256; i += blockDim.y)
sh_pfxs_banked[i][threadIdx.x] = 0;
__syncthreads();
int blksz = blockDim.x * blockDim.y;
int cap = nitems * (blockIdx.x + 1);
for (int i = threadIdx.y * 32 + threadIdx.x + nitems * blockIdx.x;
i < cap; i += blksz) {
int value = keys[i];
atomicAdd(&(sh_pfxs_banked[value][threadIdx.x]), 1);
}
__syncthreads();
for (int i = threadIdx.y; i < 256; i += blockDim.y) {
for (int j = 16; j > 0; j = j >> 1)
if (j > threadIdx.x)
sh_pfxs_banked[i][threadIdx.x] += sh_pfxs_banked[i][j+threadIdx.x];
__syncthreads();
}
if (threadIdx.y < 8) {
int off = threadIdx.y * 32 + threadIdx.x;
atomicAdd(pfxs + off, sh_pfxs_banked[off][0]);
}
}
__global__
void prefix_scan_5_0_popc(unsigned char *keys, int nitems, int *pfxs) {
__shared__ int sh_pfxs[32];
if (threadIdx.y == 0) sh_pfxs[threadIdx.x] = 0;
__syncthreads();
int blksz = blockDim.x * blockDim.y;
int cap = nitems * (blockIdx.x + 1);
int sum = 0;
for (int i = threadIdx.y * 32 + threadIdx.x + nitems * blockIdx.x;
i < cap; i += blksz) {
int value = keys[i];
int test = __ballot(value & 1);
if (!(threadIdx.x & 1)) test = ~test;
int popc_res = __ballot(value & 2);
if (!(threadIdx.x & 2)) popc_res = ~popc_res;
test &= popc_res;
popc_res = __ballot(value & 4);
if (!(threadIdx.x & 4)) popc_res = ~popc_res;
test &= popc_res;
popc_res = __ballot(value & 8);
if (!(threadIdx.x & 8)) popc_res = ~popc_res;
test &= popc_res;
popc_res = __ballot(value & 16);
if (!(threadIdx.x & 16)) popc_res = ~popc_res;
test &= popc_res;
sum += __popc(test);
}
atomicAdd(sh_pfxs + threadIdx.x + 0, sum);
__syncthreads();
if (threadIdx.y == 0) {
int off = threadIdx.x;
atomicAdd(pfxs + off, sh_pfxs[off]);
}
}
__global__
void prefix_scan_8_0_popc(unsigned char *keys, int nitems, int *pfxs) {
__shared__ int sh_pfxs[256];
if (threadIdx.y < 8)
sh_pfxs[threadIdx.y * 32 + threadIdx.x] = 0;
__syncthreads();
int blksz = blockDim.x * blockDim.y;
int cap = nitems * (blockIdx.x + 1);
int sum_000 = 0;
int sum_001 = 0;
int sum_010 = 0;
int sum_011 = 0;
int sum_100 = 0;
int sum_101 = 0;
int sum_110 = 0;
int sum_111 = 0;
for (int i = threadIdx.y * 32 + threadIdx.x + nitems * blockIdx.x;
i < cap; i += blksz) {
int value = keys[i];
int test_000 = __ballot(value & 1);
if (!(threadIdx.x & 1)) test_000 = ~test_000;
int popc_res = __ballot(value & 2);
if (!(threadIdx.x & 2)) popc_res = ~popc_res;
test_000 &= popc_res;
popc_res = __ballot(value & 4);
if (!(threadIdx.x & 4)) popc_res = ~popc_res;
test_000 &= popc_res;
popc_res = __ballot(value & 8);
if (!(threadIdx.x & 8)) popc_res = ~popc_res;
test_000 &= popc_res;
popc_res = __ballot(value & 16);
if (!(threadIdx.x & 16)) popc_res = ~popc_res;
test_000 &= popc_res;
popc_res = __ballot(value & 32);
int test_001 = test_000 & popc_res;
popc_res = ~popc_res;
test_000 &= popc_res;
popc_res = __ballot(value & 64);
int test_010 = test_000 & popc_res;
int test_011 = test_001 & popc_res;
popc_res = ~popc_res;
test_000 &= popc_res;
test_001 &= popc_res;
popc_res = __ballot(value & 128);
int test_100 = test_000 & popc_res;
int test_101 = test_001 & popc_res;
int test_110 = test_010 & popc_res;
int test_111 = test_011 & popc_res;
popc_res = ~popc_res;
test_000 &= popc_res;
test_001 &= popc_res;
test_010 &= popc_res;
test_011 &= popc_res;
sum_000 += __popc(test_000);
sum_001 += __popc(test_001);
sum_010 += __popc(test_010);
sum_011 += __popc(test_011);
sum_100 += __popc(test_100);
sum_101 += __popc(test_101);
sum_110 += __popc(test_110);
sum_111 += __popc(test_111);
}
atomicAdd(sh_pfxs + (threadIdx.x + 0), sum_000);
atomicAdd(sh_pfxs + (threadIdx.x + 32), sum_001);
atomicAdd(sh_pfxs + (threadIdx.x + 64), sum_010);
atomicAdd(sh_pfxs + (threadIdx.x + 96), sum_011);
atomicAdd(sh_pfxs + (threadIdx.x + 128), sum_100);
atomicAdd(sh_pfxs + (threadIdx.x + 160), sum_101);
atomicAdd(sh_pfxs + (threadIdx.x + 192), sum_110);
atomicAdd(sh_pfxs + (threadIdx.x + 224), sum_111);
__syncthreads();
if (threadIdx.y < 8) {
int off = threadIdx.y * 32 + threadIdx.x;
atomicAdd(pfxs + off, sh_pfxs[off]);
}
}

270
helpers/sortbench.py Normal file
View File

@ -0,0 +1,270 @@
import time
import pycuda.autoinit
import pycuda.compiler
import pycuda.driver as cuda
import numpy as np
np.set_printoptions(precision=5, edgeitems=20, linewidth=100, threshold=9000)
import sys, os
os.environ['PATH'] = ('/usr/x86_64-pc-linux-gnu/gcc-bin/4.4.6:'
+ os.environ['PATH'])
i32 = np.int32
with open('sortbench.cu') as f: src = f.read()
mod = pycuda.compiler.SourceModule(src, keep=True, options=[])
def launch(name, *args, **kwargs):
fun = mod.get_function(name)
if kwargs.pop('l1', False):
fun.set_cache_config(cuda.func_cache.PREFER_L1)
if not kwargs.get('stream'):
kwargs['time_kernel'] = True
print 'launching %s with %sx%s... ' % (name, kwargs['block'],
kwargs['grid']),
t = fun(*args, **kwargs)
if t:
print 'done (%g secs).' % t
else:
print 'done.'
def go(scale, block, test_cpu):
data = np.fromstring(np.random.bytes(scale*block), dtype=np.uint8)
print 'Done seeding'
if test_cpu:
a = time.time()
cpu_pfxs = np.array([np.sum(data == v) for v in range(256)])
b = time.time()
print cpu_pfxs
print 'took %g secs on CPU' % (b - a)
shmem_pfxs = np.zeros(256, dtype=np.int32)
launch('prefix_scan_8_0_shmem',
cuda.In(data), np.int32(block), cuda.InOut(shmem_pfxs),
block=(32, 16, 1), grid=(scale, 1), l1=1)
if test_cpu:
print 'it worked? %s' % (np.all(shmem_pfxs == cpu_pfxs))
shmeml_pfxs = np.zeros(256, dtype=np.int32)
launch('prefix_scan_8_0_shmem_lessconf',
cuda.In(data), np.int32(block), cuda.InOut(shmeml_pfxs),
block=(32, 32, 1), grid=(scale, 1), l1=1)
print 'it worked? %s' % (np.all(shmeml_pfxs == shmem_pfxs))
popc_pfxs = np.zeros(256, dtype=np.int32)
launch('prefix_scan_8_0_popc',
cuda.In(data), np.int32(block), cuda.InOut(popc_pfxs),
block=(32, 16, 1), grid=(scale, 1), l1=1)
popc5_pfxs = np.zeros(32, dtype=np.int32)
launch('prefix_scan_5_0_popc',
cuda.In(data), np.int32(block), cuda.InOut(popc5_pfxs),
block=(32, 16, 1), grid=(scale, 1), l1=1)
def rle(a, n=512):
pos, = np.where(np.diff(a))
pos = np.concatenate(([0], pos+1, [len(a)]))
lens = np.diff(pos)
return [(a[p], p, l) for p, l in zip(pos, lens)[:n]]
def frle(a, n=512):
return ''.join(['\n\t%4x %6x %6x' % v for v in rle(a, n)])
# Some reference implementations follow for debugging.
def py_convert_offsets(offsets, split, keys, shift):
grids = len(offsets)
new_offs = np.empty((grids, 8192), dtype=np.int32)
for i in range(grids):
rdxs = (keys[i] >> shift) & 0xff
o = split[i][rdxs] + offsets[i]
new_offs[i][o] = np.arange(8192, dtype=np.int32)
return new_offs
def py_radix_sort_maybe(keys, offsets, pfxs, split, shift):
grids = len(offsets)
idxs = np.arange(8192)
okeys = np.empty(grids*8192, dtype=np.int32)
okeys.fill(-1)
for i in range(grids):
offs = pfxs[i] - split[i]
lkeys = keys[i][offsets[i]]
rdxs = (lkeys >> shift) & 0xff
glob_offsets = offs[rdxs] + idxs
okeys[glob_offsets] = lkeys
return okeys
def go_sort(count, stream=None):
grids = count / 8192
keys = np.fromstring(np.random.bytes(count*2), dtype=np.uint16)
#keys = np.arange(count, dtype=np.uint16)
#np.random.shuffle(keys)
mkeys = np.reshape(keys, (grids, 8192))
vals = np.arange(count, dtype=np.uint32)
dkeys = cuda.to_device(keys)
dvals = cuda.to_device(vals)
print 'Done seeding'
dpfxs = cuda.mem_alloc(grids * 256 * 4)
doffsets = cuda.mem_alloc(count * 2)
launch('prefix_scan_8_0', doffsets, dpfxs, dkeys,
block=(512, 1, 1), grid=(grids, 1), stream=stream, l1=1)
dsplit = cuda.mem_alloc(grids * 256 * 4)
launch('better_split', dsplit, dpfxs,
block=(32, 1, 1), grid=(grids / 32, 1), stream=stream)
# This stage will be rejiggered along with the split
launch('prefix_sum', dpfxs, np.int32(grids * 256),
block=(256, 1, 1), grid=(1, 1), stream=stream, l1=1)
launch('convert_offsets', doffsets, dsplit, dkeys, i32(0),
block=(1024, 1, 1), grid=(grids, 1), stream=stream)
if not stream:
offsets = cuda.from_device(doffsets, (grids, 8192), np.uint16)
split = cuda.from_device(dsplit, (grids, 256), np.uint32)
pfxs = cuda.from_device(dpfxs, (grids, 256), np.uint32)
tkeys = py_radix_sort_maybe(mkeys, offsets, pfxs, split, 0)
#print frle(tkeys & 0xff)
d_skeys = cuda.mem_alloc(count * 2)
d_svals = cuda.mem_alloc(count * 4)
if not stream:
cuda.memset_d32(d_skeys, 0, count/2)
cuda.memset_d32(d_svals, 0xffffffff, count)
launch('radix_sort_maybe', d_skeys, d_svals,
dkeys, dvals, doffsets, dpfxs, dsplit, i32(0),
block=(1024, 1, 1), grid=(grids, 1), stream=stream, l1=1)
if not stream:
skeys = cuda.from_device_like(d_skeys, keys)
svals = cuda.from_device_like(d_svals, vals)
# Test integrity of sort (keys and values kept together):
# skeys[i] = keys[svals[i]] for all i
print 'Integrity: ',
if np.all(svals < len(keys)) and np.all(skeys == keys[svals]):
print 'pass'
else:
print 'FAIL'
dkeys, d_skeys = d_skeys, dkeys
dvals, d_svals = d_svals, dvals
if not stream:
cuda.memset_d32(d_skeys, 0, count/2)
cuda.memset_d32(d_svals, 0xffffffff, count)
launch('prefix_scan_8_8', doffsets, dpfxs, dkeys,
block=(512, 1, 1), grid=(grids, 1), stream=stream, l1=1)
launch('better_split', dsplit, dpfxs,
block=(32, 1, 1), grid=(grids / 32, 1), stream=stream)
launch('prefix_sum', dpfxs, np.int32(grids * 256),
block=(256, 1, 1), grid=(1, 1), stream=stream, l1=1)
if not stream:
pre_offsets = cuda.from_device(doffsets, (grids, 8192), np.uint16)
launch('convert_offsets', doffsets, dsplit, dkeys, i32(8),
block=(1024, 1, 1), grid=(grids, 1), stream=stream)
if not stream:
offsets = cuda.from_device(doffsets, (grids, 8192), np.uint16)
split = cuda.from_device(dsplit, (grids, 256), np.uint32)
pfxs = cuda.from_device(dpfxs, (grids, 256), np.uint32)
tkeys = np.reshape(tkeys, (grids, 8192))
new_offs = py_convert_offsets(pre_offsets, split, tkeys, 8)
print np.nonzero(new_offs != offsets)
fkeys = py_radix_sort_maybe(tkeys, new_offs, pfxs, split, 8)
#print frle(fkeys)
launch('radix_sort_maybe', d_skeys, d_svals,
dkeys, dvals, doffsets, dpfxs, dsplit, i32(8),
block=(1024, 1, 1), grid=(grids, 1), stream=stream, l1=1)
if not stream:
#print cuda.from_device(doffsets, (4, 8192), np.uint16)
#print cuda.from_device(dkeys, (4, 8192), np.uint16)
#print cuda.from_device(d_skeys, (4, 8192), np.uint16)
skeys = cuda.from_device_like(d_skeys, keys)
svals = cuda.from_device_like(d_svals, vals)
print 'Integrity: ',
if np.all(svals < len(keys)) and np.all(skeys == keys[svals]):
print 'pass'
else:
print 'FAIL'
sorted_keys = np.sort(keys)
# Test that ordering is correct. (Note that we don't need 100%
# correctness, so this test should be made "soft".)
print 'Order: ', 'pass' if np.all(skeys == sorted_keys) else 'FAIL'
#print frle(skeys, 5120)
def go_sort_old(count, stream=None):
data = np.fromstring(np.random.bytes(count), dtype=np.uint8)
ddata = cuda.to_device(data)
print 'Done seeding'
grids = count / 8192
pfxs = np.zeros((grids + 1, 256), dtype=np.int32)
dpfxs = cuda.to_device(pfxs)
launch('prefix_scan_8_0_shmem_shortseg', ddata, dpfxs,
block=(32, 16, 1), grid=(grids, 1), stream=stream, l1=1)
#dsplit = cuda.to_device(pfxs)
#launch('crappy_split', dpfxs, dsplit,
#block=(32, 8, 1), grid=(grids / 256, 1), stream=stream, l1=1)
dsplit = cuda.mem_alloc(grids * 256 * 4)
launch('better_split', dsplit, dpfxs,
block=(32, 1, 1), grid=(grids / 32, 1), stream=stream)
#if not stream:
#split = cuda.from_device_like(dsplit, pfxs)
#split_ = cuda.from_device_like(dsplit_, pfxs)
#print np.all(split == split_)
dshortseg_pfxs = cuda.mem_alloc(256 * 4)
dshortseg_sums = cuda.mem_alloc(256 * 4)
launch('prefix_sum', dpfxs, np.int32(grids * 256),
dshortseg_pfxs, dshortseg_sums,
block=(32, 8, 1), grid=(1, 1), stream=stream, l1=1)
dsorted = cuda.mem_alloc(count * 4)
launch('sort_8', ddata, dsorted, dpfxs,
block=(32, 16, 1), grid=(grids, 1), stream=stream, l1=1)
launch('sort_8_a', ddata, dsorted, dpfxs, dsplit,
block=(32, 32, 1), grid=(grids, 1), stream=stream)
if not stream:
sorted = cuda.from_device(dsorted, (count,), np.int32)
f = lambda r: ''.join(['\n\t%3d %4d %4d' % v for v in r])
sort_stat = f(rle(sorted))
with open('dev.txt', 'w') as fp: fp.write(sort_stat)
sorted_np = np.sort(data)
np_stat = f(rle(sorted_np))
with open('cpu.txt', 'w') as fp: fp.write(np_stat)
print 'is_sorted?', np.all(sorted == sorted_np)
def main():
# shmem is known good; disable the CPU run to get better info from cuprof
#go(8, 512<<10, True)
#go(1024, 512<<8, False)
#go(32768, 8192, False)
stream = cuda.Stream() if '-s' in sys.argv else None
go_sort(1<<25, stream)
if stream:
stream.synchronize()
main()