Faiss
 All Classes Namespaces Functions Variables Typedefs Enumerations Enumerator Friends
IVFPQ.cu
1 /**
2  * Copyright (c) 2015-present, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD+Patents license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 
10 #include "IVFPQ.cuh"
11 #include "../GpuResources.h"
12 #include "BroadcastSum.cuh"
13 #include "Distance.cuh"
14 #include "FlatIndex.cuh"
15 #include "InvertedListAppend.cuh"
16 #include "L2Norm.cuh"
17 #include "PQCodeDistances.cuh"
18 #include "PQScanMultiPassNoPrecomputed.cuh"
19 #include "PQScanMultiPassPrecomputed.cuh"
20 #include "RemapIndices.h"
21 #include "VectorResidual.cuh"
22 #include "../utils/DeviceDefs.cuh"
23 #include "../utils/DeviceUtils.h"
24 #include "../utils/HostTensor.cuh"
25 #include "../utils/MatrixMult.cuh"
26 #include "../utils/NoTypeTensor.cuh"
27 #include "../utils/Transpose.cuh"
28 #include <limits>
29 #include <thrust/host_vector.h>
30 #include <unordered_map>
31 
32 namespace faiss { namespace gpu {
33 
35  FlatIndex* quantizer,
36  int numSubQuantizers,
37  int bitsPerSubQuantizer,
38  float* pqCentroidData,
39  IndicesOptions indicesOptions,
40  bool useFloat16LookupTables,
41  MemorySpace space) :
42  IVFBase(resources,
43  quantizer,
44  numSubQuantizers,
45  indicesOptions,
46  space),
47  numSubQuantizers_(numSubQuantizers),
48  bitsPerSubQuantizer_(bitsPerSubQuantizer),
49  numSubQuantizerCodes_(utils::pow2(bitsPerSubQuantizer_)),
50  dimPerSubQuantizer_(dim_ / numSubQuantizers),
51  precomputedCodes_(false),
52  useFloat16LookupTables_(useFloat16LookupTables) {
53  FAISS_ASSERT(pqCentroidData);
54 
55  FAISS_ASSERT(bitsPerSubQuantizer_ <= 8);
56  FAISS_ASSERT(dim_ % numSubQuantizers_ == 0);
58 
59 #ifndef FAISS_USE_FLOAT16
60  FAISS_ASSERT(!useFloat16LookupTables_);
61 #endif
62 
63  setPQCentroids_(pqCentroidData);
64 }
65 
66 IVFPQ::~IVFPQ() {
67 }
68 
69 
70 bool
72  switch (size) {
73  case 1:
74  case 2:
75  case 3:
76  case 4:
77  case 8:
78  case 12:
79  case 16:
80  case 20:
81  case 24:
82  case 28:
83  case 32:
84  case 40:
85  case 48:
86  case 56: // only supported with float16
87  case 64: // only supported with float16
88  case 96: // only supported with float16
89  return true;
90  default:
91  return false;
92  }
93 }
94 
95 bool
97  return faiss::gpu::isSupportedNoPrecomputedSubDimSize(dims);
98 }
99 
100 void
102  if (precomputedCodes_ != enable) {
103  precomputedCodes_ = enable;
104 
105  if (precomputedCodes_) {
106  precomputeCodes_();
107  } else {
108  // Clear out old precomputed code data
109  precomputedCode_ = std::move(DeviceTensor<float, 3, true>());
110 
111 #ifdef FAISS_USE_FLOAT16
112  precomputedCodeHalf_ = std::move(DeviceTensor<half, 3, true>());
113 #endif
114  }
115  }
116 }
117 
118 int
120  Tensor<long, 1, true>& indices) {
121  FAISS_ASSERT(vecs.getSize(0) == indices.getSize(0));
122  FAISS_ASSERT(vecs.getSize(1) == dim_);
123 
124  FAISS_ASSERT(!quantizer_->getUseFloat16());
125  auto& coarseCentroids = quantizer_->getVectorsFloat32Ref();
127  auto stream = resources_->getDefaultStreamCurrentDevice();
128 
129  // Number of valid vectors that we actually add; we return this
130  int numAdded = 0;
131 
132  // We don't actually need this
133  DeviceTensor<float, 2, true> listDistance(mem, {vecs.getSize(0), 1}, stream);
134  // We use this
135  DeviceTensor<int, 2, true> listIds2d(mem, {vecs.getSize(0), 1}, stream);
136  auto listIds = listIds2d.view<1>({vecs.getSize(0)});
137 
138  quantizer_->query(vecs, 1, listDistance, listIds2d, false);
139 
140  // Copy the lists that we wish to append to back to the CPU
141  // FIXME: really this can be into pinned memory and a true async
142  // copy on a different stream; we can start the copy early, but it's
143  // tiny
144  HostTensor<int, 1, true> listIdsHost(listIds, stream);
145 
146  // Calculate the residual for each closest centroid
148  mem, {vecs.getSize(0), vecs.getSize(1)}, stream);
149 
150  runCalcResidual(vecs, coarseCentroids, listIds, residuals, stream);
151 
152  // Residuals are in the form
153  // (vec x numSubQuantizer x dimPerSubQuantizer)
154  // transpose to
155  // (numSubQuantizer x vec x dimPerSubQuantizer)
156  auto residualsView = residuals.view<3>(
157  {residuals.getSize(0), numSubQuantizers_, dimPerSubQuantizer_});
158 
159  DeviceTensor<float, 3, true> residualsTranspose(
160  mem,
161  {numSubQuantizers_, residuals.getSize(0), dimPerSubQuantizer_},
162  stream);
163 
164  runTransposeAny(residualsView, 0, 1, residualsTranspose, stream);
165 
166  // Get the product quantizer centroids in the form
167  // (numSubQuantizer x numSubQuantizerCodes x dimPerSubQuantizer)
168  // which is pqCentroidsMiddleCode_
169 
170  // We now have a batch operation to find the top-1 distances:
171  // batch size: numSubQuantizer
172  // centroids: (numSubQuantizerCodes x dimPerSubQuantizer)
173  // residuals: (vec x dimPerSubQuantizer)
174  // => (numSubQuantizer x vec x 1)
175 
176  DeviceTensor<float, 3, true> closestSubQDistance(
177  mem, {numSubQuantizers_, residuals.getSize(0), 1}, stream);
178  DeviceTensor<int, 3, true> closestSubQIndex(
179  mem, {numSubQuantizers_, residuals.getSize(0), 1}, stream);
180 
181  for (int subQ = 0; subQ < numSubQuantizers_; ++subQ) {
182  auto closestSubQDistanceView = closestSubQDistance[subQ].view();
183  auto closestSubQIndexView = closestSubQIndex[subQ].view();
184 
185  auto pqCentroidsMiddleCodeView = pqCentroidsMiddleCode_[subQ].view();
186  auto residualsTransposeView = residualsTranspose[subQ].view();
187 
188  runL2Distance(resources_,
189  pqCentroidsMiddleCodeView,
190  nullptr, // no transposed storage
191  nullptr, // no precomputed norms
192  residualsTransposeView,
193  1,
194  closestSubQDistanceView,
195  closestSubQIndexView,
196  // We don't care about distances
197  true);
198  }
199 
200  // Now, we have the nearest sub-q centroid for each slice of the
201  // residual vector.
202  auto closestSubQIndexView = closestSubQIndex.view<2>(
203  {numSubQuantizers_, residuals.getSize(0)});
204 
205  // Transpose this for easy use
206  DeviceTensor<int, 2, true> encodings(
207  mem, {residuals.getSize(0), numSubQuantizers_}, stream);
208 
209  runTransposeAny(closestSubQIndexView, 0, 1, encodings, stream);
210 
211  // Now we add the encoded vectors to the individual lists
212  // First, make sure that there is space available for adding the new
213  // encoded vectors and indices
214 
215  // list id -> # being added
216  std::unordered_map<int, int> assignCounts;
217 
218  // vector id -> offset in list
219  // (we already have vector id -> list id in listIds)
220  HostTensor<int, 1, true> listOffsetHost({listIdsHost.getSize(0)});
221 
222  for (int i = 0; i < listIdsHost.getSize(0); ++i) {
223  int listId = listIdsHost[i];
224 
225  // Add vector could be invalid (contains NaNs etc)
226  if (listId < 0) {
227  listOffsetHost[i] = -1;
228  continue;
229  }
230 
231  FAISS_ASSERT(listId < numLists_);
232  ++numAdded;
233 
234  int offset = deviceListData_[listId]->size() / bytesPerVector_;
235 
236  auto it = assignCounts.find(listId);
237  if (it != assignCounts.end()) {
238  offset += it->second;
239  it->second++;
240  } else {
241  assignCounts[listId] = 1;
242  }
243 
244  listOffsetHost[i] = offset;
245  }
246 
247  // If we didn't add anything (all invalid vectors), no need to
248  // continue
249  if (numAdded == 0) {
250  return 0;
251  }
252 
253  // We need to resize the data structures for the inverted lists on
254  // the GPUs, which means that they might need reallocation, which
255  // means that their base address may change. Figure out the new base
256  // addresses, and update those in a batch on the device
257  {
258  // Resize all of the lists that we are appending to
259  for (auto& counts : assignCounts) {
260  auto& codes = deviceListData_[counts.first];
261  codes->resize(codes->size() + counts.second * bytesPerVector_,
262  stream);
263  int newNumVecs = (int) (codes->size() / bytesPerVector_);
264 
265  auto& indices = deviceListIndices_[counts.first];
266  if ((indicesOptions_ == INDICES_32_BIT) ||
267  (indicesOptions_ == INDICES_64_BIT)) {
268  size_t indexSize =
269  (indicesOptions_ == INDICES_32_BIT) ? sizeof(int) : sizeof(long);
270 
271  indices->resize(indices->size() + counts.second * indexSize, stream);
272  } else if (indicesOptions_ == INDICES_CPU) {
273  // indices are stored on the CPU side
274  FAISS_ASSERT(counts.first < listOffsetToUserIndex_.size());
275 
276  auto& userIndices = listOffsetToUserIndex_[counts.first];
277  userIndices.resize(newNumVecs);
278  } else {
279  // indices are not stored on the GPU or CPU side
280  FAISS_ASSERT(indicesOptions_ == INDICES_IVF);
281  }
282 
283  // This is used by the multi-pass query to decide how much scratch
284  // space to allocate for intermediate results
285  maxListLength_ = std::max(maxListLength_, newNumVecs);
286  }
287 
288  // Update all pointers and sizes on the device for lists that we
289  // appended to
290  {
291  std::vector<int> listIds(assignCounts.size());
292  int i = 0;
293  for (auto& counts : assignCounts) {
294  listIds[i++] = counts.first;
295  }
296 
297  updateDeviceListInfo_(listIds, stream);
298  }
299  }
300 
301  // If we're maintaining the indices on the CPU side, update our
302  // map. We already resized our map above.
303  if (indicesOptions_ == INDICES_CPU) {
304  // We need to maintain the indices on the CPU side
305  HostTensor<long, 1, true> hostIndices(indices, stream);
306 
307  for (int i = 0; i < hostIndices.getSize(0); ++i) {
308  int listId = listIdsHost[i];
309 
310  // Add vector could be invalid (contains NaNs etc)
311  if (listId < 0) {
312  continue;
313  }
314 
315  int offset = listOffsetHost[i];
316 
317  FAISS_ASSERT(listId < listOffsetToUserIndex_.size());
318  auto& userIndices = listOffsetToUserIndex_[listId];
319 
320  FAISS_ASSERT(offset < userIndices.size());
321  userIndices[offset] = hostIndices[i];
322  }
323  }
324 
325  // We similarly need to actually append the new encoded vectors
326  {
327  DeviceTensor<int, 1, true> listOffset(mem, listOffsetHost, stream);
328 
329  // This kernel will handle appending each encoded vector + index to
330  // the appropriate list
331  runIVFPQInvertedListAppend(listIds,
332  listOffset,
333  encodings,
334  indices,
338  stream);
339  }
340 
341  return numAdded;
342 }
343 
344 void
346  const void* codes,
347  const long* indices,
348  size_t numVecs) {
349  // This list must already exist
350  FAISS_ASSERT(listId < deviceListData_.size());
351  auto stream = resources_->getDefaultStreamCurrentDevice();
352 
353  // If there's nothing to add, then there's nothing we have to do
354  if (numVecs == 0) {
355  return;
356  }
357 
358  size_t lengthInBytes = numVecs * bytesPerVector_;
359 
360  auto& listCodes = deviceListData_[listId];
361  auto prevCodeData = listCodes->data();
362 
363  // We only have int32 length representations on the GPU per each
364  // list; the length is in sizeof(char)
365  FAISS_ASSERT(listCodes->size() % bytesPerVector_ == 0);
366  FAISS_ASSERT(listCodes->size() + lengthInBytes <=
367  (size_t) std::numeric_limits<int>::max());
368 
369  listCodes->append((unsigned char*) codes,
370  lengthInBytes,
371  stream,
372  true /* exact reserved size */);
373 
374  // Handle the indices as well
375  addIndicesFromCpu_(listId, indices, numVecs);
376 
377  // This list address may have changed due to vector resizing, but
378  // only bother updating it on the device if it has changed
379  if (prevCodeData != listCodes->data()) {
380  deviceListDataPointers_[listId] = listCodes->data();
381  }
382 
383  // And our size has changed too
384  int listLength = listCodes->size() / bytesPerVector_;
385  deviceListLengths_[listId] = listLength;
386 
387  // We update this as well, since the multi-pass algorithm uses it
388  maxListLength_ = std::max(maxListLength_, listLength);
389 
390  // device_vector add is potentially happening on a different stream
391  // than our default stream
393  streamWait({stream}, {0});
394  }
395 }
396 
397 void
398 IVFPQ::setPQCentroids_(float* data) {
399  size_t pqSize =
400  numSubQuantizers_ * numSubQuantizerCodes_ * dimPerSubQuantizer_;
401 
402  // Make sure the data is on the host
403  // FIXME: why are we doing this?
404  thrust::host_vector<float> hostMemory;
405  hostMemory.insert(hostMemory.end(), data, data + pqSize);
406 
408  hostMemory.data(),
409  {numSubQuantizers_, numSubQuantizerCodes_, dimPerSubQuantizer_});
410  DeviceTensor<float, 3, true> pqDevice(
411  pqHost,
413 
414  DeviceTensor<float, 3, true> pqDeviceTranspose(
415  {numSubQuantizers_, dimPerSubQuantizer_, numSubQuantizerCodes_});
416  runTransposeAny(pqDevice, 1, 2, pqDeviceTranspose,
418 
419  pqCentroidsInnermostCode_ = std::move(pqDeviceTranspose);
420 
421  // Also maintain the PQ centroids in the form
422  // (sub q)(code id)(sub dim)
423  DeviceTensor<float, 3, true> pqCentroidsMiddleCode(
424  {numSubQuantizers_, numSubQuantizerCodes_, dimPerSubQuantizer_});
425  runTransposeAny(pqCentroidsInnermostCode_, 1, 2, pqCentroidsMiddleCode,
427 
428  pqCentroidsMiddleCode_ = std::move(pqCentroidsMiddleCode);
429 }
430 
431 void
432 IVFPQ::precomputeCodes_() {
433  //
434  // d = || x - y_C ||^2 + || y_R ||^2 + 2 * (y_C|y_R) - 2 * (x|y_R)
435  // --------------- --------------------------- -------
436  // term 1 term 2 term 3
437  //
438 
439  // Terms 1 and 3 are available only at query time. We compute term 2
440  // here.
441  FAISS_ASSERT(!quantizer_->getUseFloat16());
442  auto& coarseCentroids = quantizer_->getVectorsFloat32Ref();
443 
444  // Compute ||y_R||^2 by treating
445  // (sub q)(code id)(sub dim) as (sub q * code id)(sub dim)
446  auto pqCentroidsMiddleCodeView =
447  pqCentroidsMiddleCode_.view<2>(
448  {numSubQuantizers_ * numSubQuantizerCodes_, dimPerSubQuantizer_});
449  DeviceTensor<float, 1, true> subQuantizerNorms(
450  {numSubQuantizers_ * numSubQuantizerCodes_});
451 
452  runL2Norm(pqCentroidsMiddleCodeView, subQuantizerNorms, true,
454 
455  // Compute 2 * (y_C|y_R) via batch matrix multiplication
456  // batch size (sub q) x {(centroid id)(sub dim) x (code id)(sub dim)'}
457  // => (sub q) x {(centroid id)(code id)}
458  // => (sub q)(centroid id)(code id)
459 
460  // View (centroid id)(dim) as
461  // (centroid id)(sub q)(dim)
462  // Transpose (centroid id)(sub q)(sub dim) to
463  // (sub q)(centroid id)(sub dim)
464  auto centroidView = coarseCentroids.view<3>(
465  {coarseCentroids.getSize(0), numSubQuantizers_, dimPerSubQuantizer_});
466  DeviceTensor<float, 3, true> centroidsTransposed(
467  {numSubQuantizers_, coarseCentroids.getSize(0), dimPerSubQuantizer_});
468 
469  runTransposeAny(centroidView, 0, 1, centroidsTransposed,
471 
472  DeviceTensor<float, 3, true> coarsePQProduct(
473  {numSubQuantizers_, coarseCentroids.getSize(0), numSubQuantizerCodes_});
474 
475  runIteratedMatrixMult(coarsePQProduct, false,
476  centroidsTransposed, false,
477  pqCentroidsMiddleCode_, true,
478  2.0f, 0.0f,
481 
482  // Transpose (sub q)(centroid id)(code id) to
483  // (centroid id)(sub q)(code id)
484  DeviceTensor<float, 3, true> coarsePQProductTransposed(
485  {coarseCentroids.getSize(0), numSubQuantizers_, numSubQuantizerCodes_});
486  runTransposeAny(coarsePQProduct, 0, 1, coarsePQProductTransposed,
488 
489  // View (centroid id)(sub q)(code id) as
490  // (centroid id)(sub q * code id)
491  auto coarsePQProductTransposedView = coarsePQProductTransposed.view<2>(
492  {coarseCentroids.getSize(0), numSubQuantizers_ * numSubQuantizerCodes_});
493 
494  // Sum || y_R ||^2 + 2 * (y_C|y_R)
495  // i.e., add norms (sub q * code id)
496  // along columns of inner product (centroid id)(sub q * code id)
497  runSumAlongColumns(subQuantizerNorms, coarsePQProductTransposedView,
499 
500 #ifdef FAISS_USE_FLOAT16
501  if (useFloat16LookupTables_) {
502  precomputedCodeHalf_ = toHalf(resources_,
504  coarsePQProductTransposed);
505  return;
506  }
507 #endif
508 
509  // We added into the view, so `coarsePQProductTransposed` is now our
510  // precomputed term 2.
511  precomputedCode_ = std::move(coarsePQProductTransposed);
512 }
513 
514 void
516  int nprobe,
517  int k,
518  Tensor<float, 2, true>& outDistances,
519  Tensor<long, 2, true>& outIndices) {
520  // Validate these at a top level
521  FAISS_ASSERT(nprobe <= 1024);
522  FAISS_ASSERT(k <= 1024);
523 
525  auto stream = resources_->getDefaultStreamCurrentDevice();
526  nprobe = std::min(nprobe, quantizer_->getSize());
527 
528  FAISS_ASSERT(queries.getSize(1) == dim_);
529  FAISS_ASSERT(outDistances.getSize(0) == queries.getSize(0));
530  FAISS_ASSERT(outIndices.getSize(0) == queries.getSize(0));
531 
532  // Reserve space for the closest coarse centroids
534  coarseDistances(mem, {queries.getSize(0), nprobe}, stream);
536  coarseIndices(mem, {queries.getSize(0), nprobe}, stream);
537 
538  // Find the `nprobe` closest coarse centroids; we can use int
539  // indices both internally and externally
540  quantizer_->query(queries,
541  nprobe,
542  coarseDistances,
543  coarseIndices,
544  true);
545 
546  if (precomputedCodes_) {
547  runPQPrecomputedCodes_(queries,
548  coarseDistances,
549  coarseIndices,
550  k,
551  outDistances,
552  outIndices);
553  } else {
554  runPQNoPrecomputedCodes_(queries,
555  coarseDistances,
556  coarseIndices,
557  k,
558  outDistances,
559  outIndices);
560  }
561 
562  // If the GPU isn't storing indices (they are on the CPU side), we
563  // need to perform the re-mapping here
564  // FIXME: we might ultimately be calling this function with inputs
565  // from the CPU, these are unnecessary copies
566  if (indicesOptions_ == INDICES_CPU) {
567  HostTensor<long, 2, true> hostOutIndices(outIndices, stream);
568 
569  ivfOffsetToUserIndex(hostOutIndices.data(),
570  numLists_,
571  hostOutIndices.getSize(0),
572  hostOutIndices.getSize(1),
574 
575  // Copy back to GPU, since the input to this function is on the
576  // GPU
577  outIndices.copyFrom(hostOutIndices, stream);
578  }
579 }
580 
581 std::vector<unsigned char>
582 IVFPQ::getListCodes(int listId) const {
583  FAISS_ASSERT(listId < deviceListData_.size());
584 
585  return deviceListData_[listId]->copyToHost<unsigned char>(
587 }
588 
591  return pqCentroidsMiddleCode_;
592 }
593 
594 void
595 IVFPQ::runPQPrecomputedCodes_(
596  Tensor<float, 2, true>& queries,
597  DeviceTensor<float, 2, true>& coarseDistances,
598  DeviceTensor<int, 2, true>& coarseIndices,
599  int k,
600  Tensor<float, 2, true>& outDistances,
601  Tensor<long, 2, true>& outIndices) {
603  auto stream = resources_->getDefaultStreamCurrentDevice();
604 
605  // Compute precomputed code term 3, - 2 * (x|y_R)
606  // This is done via batch MM
607  // {sub q} x {(query id)(sub dim) * (code id)(sub dim)'} =>
608  // {sub q} x {(query id)(code id)}
609  DeviceTensor<float, 3, true> term3Transposed(
610  mem,
611  {queries.getSize(0), numSubQuantizers_, numSubQuantizerCodes_},
612  stream);
613 
614  // These allocations within are only temporary, so release them when
615  // we're done to maximize free space
616  {
617  auto querySubQuantizerView = queries.view<3>(
618  {queries.getSize(0), numSubQuantizers_, dimPerSubQuantizer_});
619  DeviceTensor<float, 3, true> queriesTransposed(
620  mem,
621  {numSubQuantizers_, queries.getSize(0), dimPerSubQuantizer_},
622  stream);
623  runTransposeAny(querySubQuantizerView, 0, 1, queriesTransposed, stream);
624 
625  DeviceTensor<float, 3, true> term3(
626  mem,
627  {numSubQuantizers_, queries.getSize(0), numSubQuantizerCodes_},
628  stream);
629 
630  runIteratedMatrixMult(term3, false,
631  queriesTransposed, false,
632  pqCentroidsMiddleCode_, true,
633  -2.0f, 0.0f,
635  stream);
636 
637  runTransposeAny(term3, 0, 1, term3Transposed, stream);
638  }
639 
640  NoTypeTensor<3, true> term2;
641  NoTypeTensor<3, true> term3;
642 #ifdef FAISS_USE_FLOAT16
643  DeviceTensor<half, 3, true> term3Half;
644 
645  if (useFloat16LookupTables_) {
646  term3Half = toHalf(resources_, stream, term3Transposed);
647  term2 = NoTypeTensor<3, true>(precomputedCodeHalf_);
648  term3 = NoTypeTensor<3, true>(term3Half);
649  }
650 #endif
651 
652  if (!useFloat16LookupTables_) {
653  term2 = NoTypeTensor<3, true>(precomputedCode_);
654  term3 = NoTypeTensor<3, true>(term3Transposed);
655  }
656 
657  runPQScanMultiPassPrecomputed(queries,
658  coarseDistances, // term 1
659  term2, // term 2
660  term3, // term 3
661  coarseIndices,
662  useFloat16LookupTables_,
664  numSubQuantizers_,
665  numSubQuantizerCodes_,
671  k,
672  outDistances,
673  outIndices,
674  resources_);
675 }
676 
677 void
678 IVFPQ::runPQNoPrecomputedCodes_(
679  Tensor<float, 2, true>& queries,
680  DeviceTensor<float, 2, true>& coarseDistances,
681  DeviceTensor<int, 2, true>& coarseIndices,
682  int k,
683  Tensor<float, 2, true>& outDistances,
684  Tensor<long, 2, true>& outIndices) {
685  FAISS_ASSERT(!quantizer_->getUseFloat16());
686  auto& coarseCentroids = quantizer_->getVectorsFloat32Ref();
687 
688  runPQScanMultiPassNoPrecomputed(queries,
689  coarseCentroids,
690  pqCentroidsInnermostCode_,
691  coarseIndices,
692  useFloat16LookupTables_,
694  numSubQuantizers_,
695  numSubQuantizerCodes_,
701  k,
702  outDistances,
703  outIndices,
704  resources_);
705 }
706 
707 } } // namespace
const int numLists_
Number of inverted lists we maintain.
Definition: IVFBase.cuh:90
int maxListLength_
Maximum list length seen.
Definition: IVFBase.cuh:114
cudaStream_t getDefaultStreamCurrentDevice()
Calls getDefaultStream with the current device.
void addCodeVectorsFromCpu(int listId, const void *codes, const long *indices, size_t numVecs)
Definition: IVFPQ.cu:345
int getSize() const
Returns the number of vectors we contain.
Definition: FlatIndex.cu:46
std::vector< std::vector< long > > listOffsetToUserIndex_
Definition: IVFBase.cuh:126
Holder of GPU resources for a particular flat index.
Definition: FlatIndex.cuh:22
__host__ __device__ Tensor< T, SubDim, InnerContig, IndexT, PtrTraits > view(DataPtrType at)
Definition: Tensor-inl.cuh:633
Base inverted list functionality for IVFFlat and IVFPQ.
Definition: IVFBase.cuh:26
thrust::device_vector< int > deviceListLengths_
Definition: IVFBase.cuh:111
static bool isSupportedPQCodeLength(int size)
Returns true if we support PQ in this size.
Definition: IVFPQ.cu:71
thrust::device_vector< void * > deviceListIndexPointers_
Definition: IVFBase.cuh:107
cublasHandle_t getBlasHandleCurrentDevice()
Calls getBlasHandle with the current device.
DeviceMemory & getMemoryManagerCurrentDevice()
Calls getMemoryManager for the current device.
int classifyAndAddVectors(Tensor< float, 2, true > &vecs, Tensor< long, 1, true > &indices)
Definition: IVFPQ.cu:119
__host__ void copyFrom(Tensor< T, Dim, InnerContig, IndexT, PtrTraits > &t, cudaStream_t stream)
Copies a tensor into ourselves; sizes must match.
Definition: Tensor-inl.cuh:131
void query(Tensor< float, 2, true > &queries, int nprobe, int k, Tensor< float, 2, true > &outDistances, Tensor< long, 2, true > &outIndices)
Definition: IVFPQ.cu:515
Tensor< float, 3, true > getPQCentroids()
Definition: IVFPQ.cu:590
FlatIndex * quantizer_
Quantizer object.
Definition: IVFBase.cuh:84
void setPrecomputedCodes(bool enable)
Enable or disable pre-computed codes.
Definition: IVFPQ.cu:101
std::vector< unsigned char > getListCodes(int listId) const
Return the list codes of a particular list back to the CPU.
Definition: IVFPQ.cu:582
__host__ __device__ IndexT getSize(int i) const
Definition: Tensor.cuh:223
thrust::device_vector< void * > deviceListDataPointers_
Definition: IVFBase.cuh:103
__host__ __device__ DataPtrType data()
Returns a raw pointer to the start of our data.
Definition: Tensor.cuh:175
GpuResources * resources_
Collection of GPU resources that we use.
Definition: IVFBase.cuh:81
Our tensor type.
Definition: Tensor.cuh:29
const int bytesPerVector_
Number of bytes per vector in the list.
Definition: IVFBase.cuh:93
void updateDeviceListInfo_(cudaStream_t stream)
Update all device-side list pointer and size information.
Definition: IVFBase.cu:137
Tensor< float, 2, true > & getVectorsFloat32Ref()
Returns a reference to our vectors currently in use.
Definition: FlatIndex.cu:78
const IndicesOptions indicesOptions_
How are user indices stored on the GPU?
Definition: IVFBase.cuh:96
std::vector< std::unique_ptr< DeviceVector< unsigned char > > > deviceListData_
Definition: IVFBase.cuh:120
IVFPQ(GpuResources *resources, FlatIndex *quantizer, int numSubQuantizers, int bitsPerSubQuantizer, float *pqCentroidData, IndicesOptions indicesOptions, bool useFloat16LookupTables, MemorySpace space)
Definition: IVFPQ.cu:34
const int dim_
Expected dimensionality of the vectors.
Definition: IVFBase.cuh:87
void addIndicesFromCpu_(int listId, const long *indices, size_t numVecs)
Shared function to copy indices from CPU to GPU.
Definition: IVFBase.cu:244
static bool isSupportedNoPrecomputedSubDimSize(int dims)
Definition: IVFPQ.cu:96