12 #include "IVFUtils.cuh"
13 #include "../utils/DeviceUtils.h"
14 #include "../utils/Select.cuh"
15 #include "../utils/StaticUtils.h"
16 #include "../utils/Tensor.cuh"
24 namespace faiss {
namespace gpu {
26 constexpr
auto kMax = std::numeric_limits<float>::max();
27 constexpr
auto kMin = std::numeric_limits<float>::min();
31 inline __device__
int binarySearchForBucket(
int* prefixSumOffsets,
37 while (end - start > 0) {
38 int mid = start + (end - start) / 2;
40 int midVal = prefixSumOffsets[mid];
51 assert(start != size);
56 template <
int ThreadsPerBlock,
61 pass2SelectLists(Tensor<float, 2, true> heapDistances,
62 Tensor<int, 2, true> heapIndices,
64 Tensor<int, 2, true> prefixSumOffsets,
65 Tensor<int, 2, true> topQueryToCentroid,
68 Tensor<float, 2, true> outDistances,
69 Tensor<long, 2, true> outIndices) {
70 constexpr
int kNumWarps = ThreadsPerBlock / kWarpSize;
72 __shared__
float smemK[kNumWarps * NumWarpQ];
73 __shared__
int smemV[kNumWarps * NumWarpQ];
75 constexpr
auto kInit = Dir ? kMin : kMax;
76 BlockSelect<float, int, Dir, Comparator<float>,
77 NumWarpQ, NumThreadQ, ThreadsPerBlock>
78 heap(kInit, -1, smemK, smemV, k);
80 auto queryId = blockIdx.x;
81 int num = heapDistances.getSize(1);
82 int limit = utils::roundDown(num, kWarpSize);
85 auto heapDistanceStart = heapDistances[queryId];
89 for (; i < limit; i += blockDim.x) {
90 heap.add(heapDistanceStart[i], i);
95 heap.addThreadQ(heapDistanceStart[i], i);
101 for (
int i = threadIdx.x; i < k; i += blockDim.x) {
102 outDistances[queryId][i] = smemK[i];
119 int offset = heapIndices[queryId][v];
124 int probe = binarySearchForBucket(prefixSumOffsets[queryId].data(),
125 prefixSumOffsets.getSize(1),
130 int listId = topQueryToCentroid[queryId][probe];
134 int listStart = *(prefixSumOffsets[queryId][probe].data() - 1);
135 int listOffset = offset - listStart;
138 if (opt == INDICES_32_BIT) {
139 index = (long) ((
int*) listIndices[listId])[listOffset];
140 }
else if (opt == INDICES_64_BIT) {
141 index = ((
long*) listIndices[listId])[listOffset];
143 index = ((long) listId << 32 | (
long) listOffset);
147 outIndices[queryId][i] = index;
152 runPass2SelectLists(Tensor<float, 2, true>& heapDistances,
153 Tensor<int, 2, true>& heapIndices,
154 thrust::device_vector<void*>& listIndices,
155 IndicesOptions indicesOptions,
156 Tensor<int, 2, true>& prefixSumOffsets,
157 Tensor<int, 2, true>& topQueryToCentroid,
160 Tensor<float, 2, true>& outDistances,
161 Tensor<long, 2, true>& outIndices,
162 cudaStream_t stream) {
163 constexpr
auto kThreadsPerBlock = 128;
165 auto grid = dim3(topQueryToCentroid.getSize(0));
166 auto block = dim3(kThreadsPerBlock);
168 #define RUN_PASS(NUM_WARP_Q, NUM_THREAD_Q, DIR) \
170 pass2SelectLists<kThreadsPerBlock, \
171 NUM_WARP_Q, NUM_THREAD_Q, DIR> \
172 <<<grid, block, 0, stream>>>(heapDistances, \
174 listIndices.data().get(), \
176 topQueryToCentroid, \
184 #define RUN_PASS_DIR(DIR) \
187 RUN_PASS(1, 1, DIR); \
188 } else if (k <= 32) { \
189 RUN_PASS(32, 2, DIR); \
190 } else if (k <= 64) { \
191 RUN_PASS(64, 3, DIR); \
192 } else if (k <= 128) { \
193 RUN_PASS(128, 3, DIR); \
194 } else if (k <= 256) { \
195 RUN_PASS(256, 4, DIR); \
196 } else if (k <= 512) { \
197 RUN_PASS(512, 8, DIR); \
198 } else if (k <= 1024) { \
199 RUN_PASS(1024, 8, DIR); \