Faiss
 All Classes Namespaces Functions Variables Typedefs Enumerations Enumerator Friends
TestGpuIndexIVFFlat.cpp
1 /**
2  * Copyright (c) 2015-present, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD+Patents license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 // Copyright 2004-present Facebook. All Rights Reserved.
10 
11 #include "../../IndexFlat.h"
12 #include "../../IndexIVFFlat.h"
13 #include "../GpuIndexIVFFlat.h"
14 #include "../StandardGpuResources.h"
15 #include "../utils/DeviceUtils.h"
16 #include "../test/TestUtils.h"
17 #include <cmath>
18 #include <gtest/gtest.h>
19 #include <glog/logging.h>
20 #include <sstream>
21 #include <vector>
22 
23 // FIXME: figure out a better way to test fp16
24 constexpr float kF16MaxRelErr = 0.3f;
25 constexpr float kF32MaxRelErr = 0.03f;
26 
27 
28 struct Options {
29  Options() {
30  numAdd = faiss::gpu::randVal(4000, 20000);
31  dim = faiss::gpu::randVal(64, 200);
32 
33  numCentroids = std::sqrt((float) numAdd);
34  numTrain = numCentroids * 40;
35  nprobe = faiss::gpu::randVal(10, numCentroids);
36  numQuery = faiss::gpu::randVal(32, 100);
37  k = std::min(faiss::gpu::randVal(10, 30), numAdd / 40);
38  indicesOpt = faiss::gpu::randSelect({
39  faiss::gpu::INDICES_CPU,
40  faiss::gpu::INDICES_32_BIT,
41  faiss::gpu::INDICES_64_BIT});
42 
43  device = faiss::gpu::randVal(0, faiss::gpu::getNumDevices() - 1);
44  }
45 
46  std::string toString() const {
47  std::stringstream str;
48  str << "IVFFlat device " << device
49  << " numVecs " << numAdd
50  << " dim " << dim
51  << " numCentroids " << numCentroids
52  << " nprobe " << nprobe
53  << " numQuery " << numQuery
54  << " k " << k
55  << " indicesOpt " << indicesOpt;
56 
57  return str.str();
58  }
59 
60  int numAdd;
61  int dim;
62  int numCentroids;
63  int numTrain;
64  int nprobe;
65  int numQuery;
66  int k;
67  int device;
68  faiss::gpu::IndicesOptions indicesOpt;
69 };
70 
71 void queryTest(faiss::MetricType metricType,
72  bool useFloat16CoarseQuantizer,
73  bool useFloat16,
74  int dimOverride = -1) {
75  for (int tries = 0; tries < 3; ++tries) {
76  faiss::gpu::newTestSeed();
77 
78  Options opt;
79  opt.dim = dimOverride != -1 ? dimOverride : opt.dim;
80 
81  std::vector<float> trainVecs = faiss::gpu::randVecs(opt.numTrain, opt.dim);
82  std::vector<float> addVecs = faiss::gpu::randVecs(opt.numAdd, opt.dim);
83 
84  faiss::IndexFlatL2 quantizerL2(opt.dim);
85  faiss::IndexFlatIP quantizerIP(opt.dim);
86  faiss::Index* quantizer =
87  metricType == faiss::METRIC_L2 ?
88  (faiss::Index*) &quantizerL2 : (faiss::Index*) &quantizerIP;
89 
90  faiss::IndexIVFFlat cpuIndex(quantizer,
91  opt.dim, opt.numCentroids, metricType);
92  cpuIndex.train(opt.numTrain, trainVecs.data());
93  cpuIndex.add(opt.numAdd, addVecs.data());
94  cpuIndex.nprobe = opt.nprobe;
95 
97  res.noTempMemory();
98 
100  config.device = opt.device;
101  config.indicesOptions = opt.indicesOpt;
102  config.flatConfig.useFloat16 = useFloat16CoarseQuantizer;
103  config.useFloat16IVFStorage = useFloat16;
104 
105  faiss::gpu::GpuIndexIVFFlat gpuIndex(&res,
106  cpuIndex.d,
107  cpuIndex.nlist,
108  cpuIndex.metric_type,
109  config);
110  gpuIndex.copyFrom(&cpuIndex);
111  gpuIndex.setNumProbes(opt.nprobe);
112 
113  bool compFloat16 = useFloat16CoarseQuantizer || useFloat16;
114  faiss::gpu::compareIndices(cpuIndex, gpuIndex,
115  opt.numQuery, opt.dim, opt.k, opt.toString(),
116  compFloat16 ? kF16MaxRelErr : kF32MaxRelErr,
117  // FIXME: the fp16 bounds are
118  // useless when math (the accumulator) is
119  // in fp16. Figure out another way to test
120  compFloat16 ? 0.99f : 0.1f,
121  compFloat16 ? 0.65f : 0.015f);
122  }
123 }
124 
125 void addTest(faiss::MetricType metricType,
126  bool useFloat16CoarseQuantizer,
127  bool useFloat16) {
128  for (int tries = 0; tries < 5; ++tries) {
129  faiss::gpu::newTestSeed();
130 
131  Options opt;
132 
133  std::vector<float> trainVecs = faiss::gpu::randVecs(opt.numTrain, opt.dim);
134  std::vector<float> addVecs = faiss::gpu::randVecs(opt.numAdd, opt.dim);
135 
136  faiss::IndexFlatL2 quantizerL2(opt.dim);
137  faiss::IndexFlatIP quantizerIP(opt.dim);
138  faiss::Index* quantizer =
139  metricType == faiss::METRIC_L2 ?
140  (faiss::Index*) &quantizerL2 : (faiss::Index*) &quantizerIP;
141 
142  faiss::IndexIVFFlat cpuIndex(quantizer,
143  opt.dim,
144  opt.numCentroids,
145  metricType);
146  cpuIndex.train(opt.numTrain, trainVecs.data());
147  cpuIndex.nprobe = opt.nprobe;
148 
150  res.noTempMemory();
151 
153  config.device = opt.device;
154  config.indicesOptions = opt.indicesOpt;
155  config.flatConfig.useFloat16 = useFloat16CoarseQuantizer;
156  config.useFloat16IVFStorage = useFloat16;
157 
158  faiss::gpu::GpuIndexIVFFlat gpuIndex(&res,
159  cpuIndex.d,
160  cpuIndex.nlist,
161  cpuIndex.metric_type,
162  config);
163  gpuIndex.copyFrom(&cpuIndex);
164  gpuIndex.setNumProbes(opt.nprobe);
165 
166  cpuIndex.add(opt.numAdd, addVecs.data());
167  gpuIndex.add(opt.numAdd, addVecs.data());
168 
169  bool compFloat16 = useFloat16CoarseQuantizer || useFloat16;
170  faiss::gpu::compareIndices(cpuIndex, gpuIndex,
171  opt.numQuery, opt.dim, opt.k, opt.toString(),
172  compFloat16 ? kF16MaxRelErr : kF32MaxRelErr,
173  compFloat16 ? 0.70f : 0.1f,
174  compFloat16 ? 0.30f : 0.015f);
175  }
176 }
177 
178 void copyToTest(bool useFloat16CoarseQuantizer,
179  bool useFloat16) {
180  faiss::gpu::newTestSeed();
181 
182  Options opt;
183  std::vector<float> trainVecs = faiss::gpu::randVecs(opt.numTrain, opt.dim);
184  std::vector<float> addVecs = faiss::gpu::randVecs(opt.numAdd, opt.dim);
185 
187  res.noTempMemory();
188 
190  config.device = opt.device;
191  config.indicesOptions = opt.indicesOpt;
192  config.flatConfig.useFloat16 = useFloat16CoarseQuantizer;
193  config.useFloat16IVFStorage = useFloat16;
194 
195  faiss::gpu::GpuIndexIVFFlat gpuIndex(&res,
196  opt.dim,
197  opt.numCentroids,
198  faiss::METRIC_L2,
199  config);
200  gpuIndex.train(opt.numTrain, trainVecs.data());
201  gpuIndex.add(opt.numAdd, addVecs.data());
202  gpuIndex.setNumProbes(opt.nprobe);
203 
204  // use garbage values to see if we overwrite then
205  faiss::IndexFlatL2 cpuQuantizer(1);
206  faiss::IndexIVFFlat cpuIndex(&cpuQuantizer, 1, 1, faiss::METRIC_L2);
207  cpuIndex.nprobe = 1;
208 
209  gpuIndex.copyTo(&cpuIndex);
210 
211  EXPECT_EQ(cpuIndex.ntotal, gpuIndex.ntotal);
212  EXPECT_EQ(gpuIndex.ntotal, opt.numAdd);
213 
214  EXPECT_EQ(cpuIndex.d, gpuIndex.d);
215  EXPECT_EQ(cpuIndex.d, opt.dim);
216  EXPECT_EQ(cpuIndex.nlist, gpuIndex.getNumLists());
217  EXPECT_EQ(cpuIndex.nprobe, gpuIndex.getNumProbes());
218 
219  // Query both objects; results should be equivalent
220  bool compFloat16 = useFloat16CoarseQuantizer || useFloat16;
221  faiss::gpu::compareIndices(cpuIndex, gpuIndex,
222  opt.numQuery, opt.dim, opt.k, opt.toString(),
223  compFloat16 ? kF16MaxRelErr : kF32MaxRelErr,
224  compFloat16 ? 0.70f : 0.1f,
225  compFloat16 ? 0.30f : 0.015f);
226 }
227 
228 void copyFromTest(bool useFloat16CoarseQuantizer,
229  bool useFloat16) {
230  faiss::gpu::newTestSeed();
231 
232  Options opt;
233  std::vector<float> trainVecs = faiss::gpu::randVecs(opt.numTrain, opt.dim);
234  std::vector<float> addVecs = faiss::gpu::randVecs(opt.numAdd, opt.dim);
235 
236  faiss::IndexFlatL2 cpuQuantizer(opt.dim);
237  faiss::IndexIVFFlat cpuIndex(&cpuQuantizer,
238  opt.dim,
239  opt.numCentroids,
240  faiss::METRIC_L2);
241  cpuIndex.nprobe = opt.nprobe;
242  cpuIndex.train(opt.numTrain, trainVecs.data());
243  cpuIndex.add(opt.numAdd, addVecs.data());
244 
245  // use garbage values to see if we overwrite then
247  res.noTempMemory();
248 
250  config.device = opt.device;
251  config.indicesOptions = opt.indicesOpt;
252  config.flatConfig.useFloat16 = useFloat16CoarseQuantizer;
253  config.useFloat16IVFStorage = useFloat16;
254 
255  faiss::gpu::GpuIndexIVFFlat gpuIndex(&res,
256  1,
257  1,
258  faiss::METRIC_L2,
259  config);
260  gpuIndex.setNumProbes(1);
261 
262  gpuIndex.copyFrom(&cpuIndex);
263 
264  EXPECT_EQ(cpuIndex.ntotal, gpuIndex.ntotal);
265  EXPECT_EQ(gpuIndex.ntotal, opt.numAdd);
266 
267  EXPECT_EQ(cpuIndex.d, gpuIndex.d);
268  EXPECT_EQ(cpuIndex.d, opt.dim);
269  EXPECT_EQ(cpuIndex.nlist, gpuIndex.getNumLists());
270  EXPECT_EQ(cpuIndex.nprobe, gpuIndex.getNumProbes());
271 
272  // Query both objects; results should be equivalent
273  bool compFloat16 = useFloat16CoarseQuantizer || useFloat16;
274  faiss::gpu::compareIndices(cpuIndex, gpuIndex,
275  opt.numQuery, opt.dim, opt.k, opt.toString(),
276  compFloat16 ? kF16MaxRelErr : kF32MaxRelErr,
277  compFloat16 ? 0.70f : 0.1f,
278  compFloat16 ? 0.30f : 0.015f);
279 }
280 
281 TEST(TestGpuIndexIVFFlat, Float32_32_Add_L2) {
282  addTest(faiss::METRIC_L2, false, false);
283 }
284 
285 TEST(TestGpuIndexIVFFlat, Float32_32_Add_IP) {
286  addTest(faiss::METRIC_INNER_PRODUCT, false, false);
287 }
288 
289 TEST(TestGpuIndexIVFFlat, Float32_16_Add_L2) {
290  addTest(faiss::METRIC_L2, false, true);
291 }
292 
293 TEST(TestGpuIndexIVFFlat, Float32_16_Add_IP) {
294  addTest(faiss::METRIC_INNER_PRODUCT, false, true);
295 }
296 
297 TEST(TestGpuIndexIVFFlat, Float16_32_Add_L2) {
298  addTest(faiss::METRIC_L2, true, false);
299 }
300 
301 TEST(TestGpuIndexIVFFlat, Float16_32_Add_IP) {
302  addTest(faiss::METRIC_INNER_PRODUCT, true, false);
303 }
304 
305 //
306 // General query tests
307 //
308 
309 TEST(TestGpuIndexIVFFlat, Float32_Query_L2) {
310  queryTest(faiss::METRIC_L2, false, false);
311 }
312 
313 TEST(TestGpuIndexIVFFlat, Float32_Query_IP) {
314  queryTest(faiss::METRIC_INNER_PRODUCT, false, false);
315 }
316 
317 TEST(TestGpuIndexIVFFlat, Float16_Query_L2) {
318  queryTest(faiss::METRIC_L2, false, true);
319 }
320 
321 TEST(TestGpuIndexIVFFlat, Float16_Query_IP) {
322  queryTest(faiss::METRIC_INNER_PRODUCT, false, true);
323 }
324 
325 // float16 coarse quantizer
326 
327 TEST(TestGpuIndexIVFFlat, Float16_32_Query_L2) {
328  queryTest(faiss::METRIC_L2, true, false);
329 }
330 
331 TEST(TestGpuIndexIVFFlat, Float16_32_Query_IP) {
332  queryTest(faiss::METRIC_INNER_PRODUCT, true, false);
333 }
334 
335 //
336 // There are IVF list scanning specializations for 64-d and 128-d that we
337 // make sure we explicitly test here
338 //
339 
340 TEST(TestGpuIndexIVFFlat, Float32_Query_L2_64) {
341  queryTest(faiss::METRIC_L2, false, false, 64);
342 }
343 
344 TEST(TestGpuIndexIVFFlat, Float32_Query_IP_64) {
345  queryTest(faiss::METRIC_INNER_PRODUCT, false, false, 64);
346 }
347 
348 TEST(TestGpuIndexIVFFlat, Float16_Query_L2_64) {
349  queryTest(faiss::METRIC_L2, false, true, 64);
350 }
351 
352 TEST(TestGpuIndexIVFFlat, Float16_Query_IP_64) {
353  queryTest(faiss::METRIC_INNER_PRODUCT, false, true, 64);
354 }
355 
356 TEST(TestGpuIndexIVFFlat, Float32_Query_L2_128) {
357  queryTest(faiss::METRIC_L2, false, false, 128);
358 }
359 
360 TEST(TestGpuIndexIVFFlat, Float32_Query_IP_128) {
361  queryTest(faiss::METRIC_INNER_PRODUCT, false, false, 128);
362 }
363 
364 TEST(TestGpuIndexIVFFlat, Float16_Query_L2_128) {
365  queryTest(faiss::METRIC_L2, false, true, 128);
366 }
367 
368 TEST(TestGpuIndexIVFFlat, Float16_Query_IP_128) {
369  queryTest(faiss::METRIC_INNER_PRODUCT, false, true, 128);
370 }
371 
372 // For 256-d, only float16 is specialized
373 
374 TEST(TestGpuIndexIVFFlat, Float16_Query_L2_256) {
375  queryTest(faiss::METRIC_L2, false, true, 256);
376 }
377 
378 TEST(TestGpuIndexIVFFlat, Float16_Query_IP_256) {
379  queryTest(faiss::METRIC_INNER_PRODUCT, false, true, 256);
380 }
381 
382 //
383 // Copy tests
384 //
385 
386 TEST(TestGpuIndexIVFFlat, Float32_16_CopyTo) {
387  copyToTest(false, true);
388 }
389 
390 TEST(TestGpuIndexIVFFlat, Float32_32_CopyTo) {
391  copyToTest(false, false);
392 }
393 
394 TEST(TestGpuIndexIVFFlat, Float32_negative) {
395  faiss::gpu::newTestSeed();
396 
397  Options opt;
398 
399  auto trainVecs = faiss::gpu::randVecs(opt.numTrain, opt.dim);
400  auto addVecs = faiss::gpu::randVecs(opt.numAdd, opt.dim);
401 
402  // Put all vecs on negative side
403  for (auto& f : trainVecs) {
404  f = std::abs(f) * -1.0f;
405  }
406 
407  for (auto& f : addVecs) {
408  f *= std::abs(f) * -1.0f;
409  }
410 
411  faiss::IndexFlatIP quantizerIP(opt.dim);
412  faiss::Index* quantizer = (faiss::Index*) &quantizerIP;
413 
414  faiss::IndexIVFFlat cpuIndex(quantizer,
415  opt.dim, opt.numCentroids,
416  faiss::METRIC_INNER_PRODUCT);
417  cpuIndex.train(opt.numTrain, trainVecs.data());
418  cpuIndex.add(opt.numAdd, addVecs.data());
419  cpuIndex.nprobe = opt.nprobe;
420 
422  res.noTempMemory();
423 
425  config.device = opt.device;
426  config.indicesOptions = opt.indicesOpt;
427 
428  faiss::gpu::GpuIndexIVFFlat gpuIndex(&res,
429  cpuIndex.d,
430  cpuIndex.nlist,
431  cpuIndex.metric_type,
432  config);
433  gpuIndex.copyFrom(&cpuIndex);
434  gpuIndex.setNumProbes(opt.nprobe);
435 
436  // Construct a positive test set
437  auto queryVecs = faiss::gpu::randVecs(opt.numQuery, opt.dim);
438 
439  // Put all vecs on positive size
440  for (auto& f : queryVecs) {
441  f = std::abs(f);
442  }
443 
444  bool compFloat16 = false;
445  faiss::gpu::compareIndices(queryVecs,
446  cpuIndex, gpuIndex,
447  opt.numQuery, opt.dim, opt.k, opt.toString(),
448  compFloat16 ? kF16MaxRelErr : kF32MaxRelErr,
449  // FIXME: the fp16 bounds are
450  // useless when math (the accumulator) is
451  // in fp16. Figure out another way to test
452  compFloat16 ? 0.99f : 0.1f,
453  compFloat16 ? 0.65f : 0.015f);
454 }
455 
456 //
457 // NaN tests
458 //
459 
460 TEST(TestGpuIndexIVFFlat, QueryNaN) {
461  faiss::gpu::newTestSeed();
462 
463  Options opt;
464 
465  std::vector<float> trainVecs = faiss::gpu::randVecs(opt.numTrain, opt.dim);
466  std::vector<float> addVecs = faiss::gpu::randVecs(opt.numAdd, opt.dim);
467 
469  res.noTempMemory();
470 
472  config.device = opt.device;
473  config.indicesOptions = opt.indicesOpt;
474  config.flatConfig.useFloat16 = faiss::gpu::randBool();
475  config.useFloat16IVFStorage = faiss::gpu::randBool();
476 
477  faiss::gpu::GpuIndexIVFFlat gpuIndex(&res,
478  opt.dim,
479  opt.numCentroids,
480  faiss::METRIC_L2,
481  config);
482  gpuIndex.setNumProbes(opt.nprobe);
483 
484  gpuIndex.train(opt.numTrain, trainVecs.data());
485  gpuIndex.add(opt.numAdd, addVecs.data());
486 
487  int numQuery = 10;
488  std::vector<float> nans(numQuery * opt.dim,
489  std::numeric_limits<float>::quiet_NaN());
490 
491  std::vector<float> distances(numQuery * opt.k, 0);
492  std::vector<faiss::Index::idx_t> indices(numQuery * opt.k, 0);
493 
494  gpuIndex.search(numQuery,
495  nans.data(),
496  opt.k,
497  distances.data(),
498  indices.data());
499 
500  for (int q = 0; q < numQuery; ++q) {
501  for (int k = 0; k < opt.k; ++k) {
502  EXPECT_EQ(indices[q * opt.k + k], -1);
503  EXPECT_EQ(distances[q * opt.k + k], std::numeric_limits<float>::max());
504  }
505  }
506 }
507 
508 TEST(TestGpuIndexIVFFlat, AddNaN) {
509  faiss::gpu::newTestSeed();
510 
511  Options opt;
512 
514  res.noTempMemory();
515 
517  config.device = opt.device;
518  config.indicesOptions = opt.indicesOpt;
519  config.flatConfig.useFloat16 = faiss::gpu::randBool();
520  config.useFloat16IVFStorage = faiss::gpu::randBool();
521 
522  faiss::gpu::GpuIndexIVFFlat gpuIndex(&res,
523  opt.dim,
524  opt.numCentroids,
525  faiss::METRIC_L2,
526  config);
527  gpuIndex.setNumProbes(opt.nprobe);
528 
529  int numNans = 10;
530  std::vector<float> nans(numNans * opt.dim,
531  std::numeric_limits<float>::quiet_NaN());
532 
533  // Make one vector valid, which should actually add
534  for (int i = 0; i < opt.dim; ++i) {
535  nans[i] = 0.0f;
536  }
537 
538  std::vector<float> trainVecs = faiss::gpu::randVecs(opt.numTrain, opt.dim);
539  gpuIndex.train(opt.numTrain, trainVecs.data());
540 
541  // should not crash
542  EXPECT_EQ(gpuIndex.ntotal, 0);
543  gpuIndex.add(numNans, nans.data());
544 
545  // Only the single valid vector should have added
546  EXPECT_EQ(gpuIndex.ntotal, 1);
547 
548  std::vector<float> queryVecs = faiss::gpu::randVecs(opt.numQuery, opt.dim);
549  std::vector<float> distance(opt.numQuery * opt.k, 0);
550  std::vector<faiss::Index::idx_t> indices(opt.numQuery * opt.k, 0);
551 
552  // should not crash
553  gpuIndex.search(opt.numQuery, queryVecs.data(), opt.k,
554  distance.data(), indices.data());
555 
556 }
557 
558 TEST(TestGpuIndexIVFFlat, UnifiedMemory) {
559  // Construct on a random device to test multi-device, if we have
560  // multiple devices
561  int device = faiss::gpu::randVal(0, faiss::gpu::getNumDevices() - 1);
562 
563  if (!faiss::gpu::getFullUnifiedMemSupport(device)) {
564  return;
565  }
566 
567  int dim = 256;
568 
569  int numCentroids = 1024;
570  // 24 GB of vecs should be enough to test unified memory in
571  // oversubscription mode
572  size_t numAdd =
573  (size_t) 1024 * 1024 * 1024 * 24 / ((size_t) dim * sizeof(float));
574  size_t numTrain = numCentroids * 40;
575  int numQuery = 10;
576  int k = 10;
577  int nprobe = 8;
578 
579  LOG(INFO) << "generating vecs";
580  std::vector<float> trainVecs = faiss::gpu::randVecs(numTrain, dim);
581  std::vector<float> addVecs = faiss::gpu::randVecs(numAdd, dim);
582 
583  LOG(INFO) << "train CPU";
584  faiss::IndexFlatL2 quantizer(dim);
585  faiss::IndexIVFFlat cpuIndex(&quantizer, dim, numCentroids, faiss::METRIC_L2);
586  LOG(INFO) << "train CPU";
587  cpuIndex.train(numTrain, trainVecs.data());
588  LOG(INFO) << "add CPU";
589  cpuIndex.add(numAdd, addVecs.data());
590  cpuIndex.nprobe = nprobe;
591 
593  res.noTempMemory();
594 
596  config.device = device;
597  config.memorySpace = faiss::gpu::MemorySpace::Unified;
598 
599  faiss::gpu::GpuIndexIVFFlat gpuIndex(&res,
600  dim,
601  numCentroids,
602  faiss::METRIC_L2,
603  config);
604  LOG(INFO) << "copy from CPU";
605  gpuIndex.copyFrom(&cpuIndex);
606  gpuIndex.setNumProbes(nprobe);
607 
608  LOG(INFO) << "compare";
609 
610  faiss::gpu::compareIndices(cpuIndex, gpuIndex,
611  numQuery, dim, k, "Unified Memory",
612  kF32MaxRelErr,
613  0.1f,
614  0.015f);
615 }
bool useFloat16
Whether or not data is stored as float16.
Definition: GpuIndexFlat.h:35
int device
GPU device on which the index is resident.
Definition: GpuIndex.h:27
GpuIndexFlatConfig flatConfig
Configuration for the coarse quantizer object.
Definition: GpuIndexIVF.h:34
MemorySpace memorySpace
Definition: GpuIndex.h:32
IndicesOptions indicesOptions
Index storage options for the GPU.
Definition: GpuIndexIVF.h:31
MetricType
Some algorithms support both an inner product version and a L2 search version.
Definition: Index.h:43