Faiss
 All Classes Namespaces Functions Variables Typedefs Enumerations Enumerator Friends
WarpSelectKernel.cuh
1 
2 /**
3  * Copyright (c) 2015-present, Facebook, Inc.
4  * All rights reserved.
5  *
6  * This source code is licensed under the CC-by-NC license found in the
7  * LICENSE file in the root directory of this source tree.
8  */
9 
10 // Copyright 2004-present Facebook. All Rights Reserved.
11 #pragma once
12 
13 #include "Float16.cuh"
14 #include "Select.cuh"
15 
16 namespace faiss { namespace gpu {
17 
18 template <typename K,
19  typename IndexType,
20  bool Dir,
21  int NumWarpQ,
22  int NumThreadQ,
23  int ThreadsPerBlock>
24 __global__ void warpSelect(Tensor<K, 2, true> in,
25  Tensor<K, 2, true> outK,
26  Tensor<IndexType, 2, true> outV,
27  K initK,
28  IndexType initV,
29  int k) {
30  constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
31 
32  WarpSelect<K, IndexType, Dir, Comparator<K>,
33  NumWarpQ, NumThreadQ, ThreadsPerBlock>
34  heap(initK, initV, k);
35 
36  int warpId = threadIdx.x / kWarpSize;
37  int row = blockIdx.x * kNumWarps + warpId;
38 
39  if (row >= in.getSize(0)) {
40  return;
41  }
42 
43  K* inStart = in[row].data();
44  int i = getLaneId();
45 
46  // Whole warps must participate in the selection
47  int limit = utils::roundDown(in.getSize(1), kWarpSize);
48 
49  for (; i < limit; i += kWarpSize) {
50  heap.add(inStart[i], (IndexType) i);
51  }
52 
53  // Handle non-warp multiple remainder
54  if (i < in.getSize(1)) {
55  heap.addThreadQ(inStart[i], (IndexType) i);
56  }
57 
58  heap.reduce();
59  heap.writeOut(outK[row].data(),
60  outV[row].data(), k);
61 }
62 
63 void runWarpSelect(Tensor<float, 2, true>& in,
64  Tensor<float, 2, true>& outKeys,
65  Tensor<int, 2, true>& outIndices,
66  bool dir, int k, cudaStream_t stream);
67 
68 #ifdef FAISS_USE_FLOAT16
69 void runWarpSelect(Tensor<half, 2, true>& in,
70  Tensor<half, 2, true>& outKeys,
71  Tensor<int, 2, true>& outIndices,
72  bool dir, int k, cudaStream_t stream);
73 #endif
74 
75 } } // namespace