20 #include "FaissAssert.h"
21 #include "IndexFlat.h"
22 #include "AuxIndexStructures.h"
31 IndexIVF::IndexIVF (
Index * quantizer,
size_t d,
size_t nlist,
36 quantizer (quantizer),
37 quantizer_trains_alone (false),
40 maintain_direct_map (false)
42 FAISS_THROW_IF_NOT (d == quantizer->
d);
56 IndexIVF::IndexIVF ():
57 nlist (0), nprobe (1), quantizer (nullptr),
58 quantizer_trains_alone (false), own_fields (false),
59 maintain_direct_map (false)
74 if (new_maintain_direct_map) {
75 direct_map.resize (
ntotal, -1);
76 for (
size_t key = 0; key <
nlist; key++) {
77 const std::vector<long> & idlist =
ids[key];
79 for (
long ofs = 0; ofs < idlist.size(); ofs++) {
80 FAISS_THROW_IF_NOT_MSG (
81 0 <= idlist [ofs] && idlist[ofs] <
ntotal,
82 "direct map supported only for seuquential ids");
83 direct_map [idlist [ofs]] = key << 32 | ofs;
97 for (
size_t i = 0; i <
ids.size(); i++)
106 printf (
"IVF quantizer does not need training.\n");
109 printf (
"IVF quantizer trains alone...\n");
112 "nlist not consistent with quantizer size");
115 printf (
"Training IVF quantizer on %ld vectors in %dD\n",
125 printf (
"Training IVF residual\n");
134 printf (
"IndexIVF: no residual training\n");
142 std::vector<int> hist (
nlist);
143 for (
int i = 0; i <
nlist; i++) {
144 hist[i] =
ids[i].size();
151 std::vector<int> sizes(40);
152 for (
int i = 0; i <
nlist; i++) {
153 for (
int j = 0; j < sizes.size(); j++) {
154 if ((
ids[i].size() >> j) == 0) {
160 for (
int i = 0; i < sizes.size(); i++) {
162 printf (
"list size in < %d: %d instances\n",
172 FAISS_THROW_IF_NOT (other.
d ==
d);
176 "direct map copy not implemented");
177 FAISS_THROW_IF_NOT_MSG (
typeid (*
this) ==
typeid (other),
178 "can only merge indexes of the same type");
179 for (
long i = 0; i <
nlist; i++) {
180 std::vector<idx_t> & src = other.
ids[i];
181 std::vector<idx_t> & dest =
ids[i];
182 for (
long j = 0; j < src.size(); j++)
183 dest.push_back (src[j] + add_id);
193 IndexIVF::~IndexIVF()
204 IndexIVFFlat::IndexIVFFlat (Index * quantizer,
206 IndexIVF (quantizer, d, nlist, metric)
222 const long *precomputed_idx)
227 "cannot have direct map and add with ids");
231 if (precomputed_idx) {
232 idx = precomputed_idx;
234 long * idx0 =
new long [n];
235 quantizer->assign (n, x, idx0);
240 for (
size_t i = 0; i < n; i++) {
241 long id = xids ? xids[i] :
ntotal + i;
242 long list_no = idx [i];
245 assert (list_no < nlist);
247 ids[list_no].push_back (
id);
248 const float *xi = x + i *
d;
250 for (
size_t j = 0 ; j <
d ; j++)
251 vecs[list_no].push_back (xi [j]);
254 direct_map.push_back (list_no << 32 | (
ids[list_no].size() - 1));
258 printf(
"IndexIVFFlat::add_core: added %ld / %ld vectors\n",
264 void IndexIVFFlatStats::reset()
266 memset ((
void*)
this, 0,
sizeof (*
this));
270 IndexIVFFlatStats indexIVFFlat_stats;
275 const long * __restrict keys,
279 const size_t k = res->
k;
280 size_t nlistv = 0, ndis = 0;
282 #pragma omp parallel for reduction(+: nlistv, ndis)
283 for (
size_t i = 0; i < nx; i++) {
284 const float * xi = x + i *
d;
285 const long * keysi = keys + i *
nprobe;
286 float * __restrict simi = res->
get_val (i);
287 long * __restrict idxi = res->
get_ids (i);
288 minheap_heapify (k, simi, idxi);
290 for (
size_t ik = 0; ik <
nprobe; ik++) {
291 long key = keysi[ik];
296 if (key >= (
long) nlist) {
297 fprintf (stderr,
"Invalid key=%ld at ik=%ld nlist=%ld\n",
302 const size_t list_size =
ids[key].size();
303 const float * list_vecs =
vecs[key].data();
305 for (
size_t j = 0; j < list_size; j++) {
306 const float * yj = list_vecs + d * j;
307 float ip = fvec_inner_product (xi, yj, d);
309 minheap_pop (k, simi, idxi);
310 minheap_push (k, simi, idxi, ip,
ids[key][j]);
315 minheap_reorder (k, simi, idxi);
317 indexIVFFlat_stats.nq += nx;
318 indexIVFFlat_stats.nlist += nlistv;
319 indexIVFFlat_stats.ndis += ndis;
326 const long * __restrict keys,
329 const size_t k = res->
k;
330 size_t nlistv = 0, ndis = 0;
332 #pragma omp parallel for reduction(+: nlistv, ndis)
333 for (
size_t i = 0; i < nx; i++) {
334 const float * xi = x + i *
d;
335 const long * keysi = keys + i *
nprobe;
336 float * __restrict disi = res->
get_val (i);
337 long * __restrict idxi = res->
get_ids (i);
338 maxheap_heapify (k, disi, idxi);
340 for (
size_t ik = 0; ik <
nprobe; ik++) {
341 long key = keysi[ik];
346 if (key >= (
long) nlist) {
347 fprintf (stderr,
"Invalid key=%ld at ik=%ld nlist=%ld\n",
352 const size_t list_size =
ids[key].size();
353 const float * list_vecs =
vecs[key].data();
355 for (
size_t j = 0; j < list_size; j++) {
356 const float * yj = list_vecs + d * j;
358 if (disij < disi[0]) {
359 maxheap_pop (k, disi, idxi);
360 maxheap_push (k, disi, idxi, disij,
ids[key][j]);
365 maxheap_reorder (k, disi, idxi);
367 indexIVFFlat_stats.nq += nx;
368 indexIVFFlat_stats.nlist += nlistv;
369 indexIVFFlat_stats.ndis += ndis;
374 float *distances,
idx_t *labels)
const
378 quantizer->assign (n, x, idx,
nprobe);
385 float *distances,
idx_t *labels)
const
389 size_t(n), size_t(k), labels, distances};
394 size_t(n), size_t(k), labels, distances};
406 quantizer->assign (nx, x, keys,
nprobe);
412 for (
size_t i = 0; i < nx; i++) {
413 const float * xi = x + i *
d;
414 const long * keysi = keys + i *
nprobe;
419 for (
size_t ik = 0; ik <
nprobe; ik++) {
420 long key = keysi[ik];
421 if (key < 0 || key >= (
long) nlist) {
422 fprintf (stderr,
"Invalid key=%ld at ik=%ld nlist=%ld\n",
427 const size_t list_size =
ids[key].size();
428 const float * list_vecs =
vecs[key].data();
430 for (
size_t j = 0; j < list_size; j++) {
431 const float * yj = list_vecs + d * j;
434 if (disij < radius) {
435 qres.add (disij,
ids[key][j]);
438 float disij = fvec_inner_product(xi, yj, d);
439 if (disij > radius) {
440 qres.add (disij,
ids[key][j]);
454 for (
int i = 0; i <
nlist; i++) {
455 std::vector<float> & src = other.
vecs[i];
456 std::vector<float> & dest =
vecs[i];
457 for (
int j = 0; j < src.size(); j++)
458 dest.push_back (src[j]);
464 long a1,
long a2)
const
466 FAISS_THROW_IF_NOT (nlist == other.
nlist);
469 for (
long list_no = 0; list_no <
nlist; list_no++) {
470 const std::vector<idx_t> & ids_in =
ids[list_no];
471 std::vector<idx_t> & ids_out = other.
ids[list_no];
472 const std::vector<float> & vecs_in =
vecs[list_no];
473 std::vector<float> & vecs_out = other.
vecs[list_no];
475 for (
long i = 0; i < ids_in.size(); i++) {
476 idx_t id = ids_in[i];
477 if (subset_type == 0 && a1 <=
id &&
id < a2) {
478 ids_out.push_back (
id);
479 vecs_out.insert (vecs_out.end(),
480 vecs_in.begin() + i *
d,
481 vecs_in.begin() + (i + 1) * d);
492 std::vector<idx_t>
assign (n);
493 quantizer->assign (n, x, assign.data());
495 for (
int i = 0; i < n; i++) {
496 idx_t id = new_ids[i];
497 FAISS_THROW_IF_NOT_MSG (0 <=
id &&
id <
ntotal,
498 "id to update out of range");
500 long dm = direct_map[id];
501 long ofs = dm & 0xffffffff;
503 size_t l =
ids[il].size();
505 long id2 =
ids[il].back();
507 direct_map[id2] = (il << 32) | ofs;
508 memcpy (
vecs[il].data() + ofs * d,
509 vecs[il].data() + (l - 1) * d,
510 d *
sizeof(
vecs[il][0]));
513 vecs[il].resize((l - 1) * d);
517 size_t l =
ids[il].size();
518 long dm = (il << 32) | l;
520 ids[il].push_back (
id);
521 vecs[il].resize((l + 1) * d);
522 memcpy (
vecs[il].data() + l * d,
524 d *
sizeof(
vecs[il][0]));
536 for (
size_t key = 0; key <
nlist; key++) {
544 "direct map remove not implemented");
546 #pragma omp parallel for reduction(+: nremove)
547 for (
long i = 0; i <
nlist; i++) {
548 std::vector<idx_t> & idsi =
ids[i];
549 float *vecsi =
vecs[i].data();
551 long l = idsi.size(), j = 0;
553 if (sel.is_member (idsi[j])) {
556 memmove (vecsi + j * d,
557 vecsi + l * d, d *
sizeof (
float));
562 if (l < idsi.size()) {
563 nremove += idsi.size() - l;
565 vecs[i].resize (l * d);
575 FAISS_THROW_IF_NOT_MSG (direct_map.size() ==
ntotal,
576 "direct map is not initialized");
577 int list_no = direct_map[key] >> 32;
578 int ofs = direct_map[key] & 0xffffffff;
579 memcpy (recons, &
vecs[list_no][ofs * d], d *
sizeof(recons[0]));
589 IndexIVFFlatIPBounds::IndexIVFFlatIPBounds (
590 Index * quantizer,
size_t d,
size_t nlist,
592 IndexIVFFlat(quantizer, d, nlist, METRIC_INNER_PRODUCT), fsize(fsize)
594 part_norms.resize(nlist);
600 const long *precomputed_idx) {
606 if (precomputed_idx) {
607 idx = precomputed_idx;
609 long * idx0 =
new long [n];
610 quantizer->
assign (n, x, idx0);
617 const float * xi = x +
fsize;
618 for (
size_t i = 0; i < n; i++) {
629 void search_bounds_knn_inner_product (
637 size_t k = res->
k, nx = res->
nh, nprobe = ivf.
nprobe;
639 int fsize = ivf.
fsize;
641 size_t nlistv = 0, ndis = 0, npartial = 0;
643 #pragma omp parallel for reduction(+: nlistv, ndis, npartial)
644 for (
size_t i = 0; i < nx; i++) {
645 const float * xi = x + i * d;
646 const long * keysi = keys + i * nprobe;
647 float qnorm = qnorms[i];
648 float * __restrict simi = res->
get_val (i);
649 long * __restrict idxi = res->
get_ids (i);
650 minheap_heapify (k, simi, idxi);
652 for (
size_t ik = 0; ik < nprobe; ik++) {
653 long key = keysi[ik];
658 assert (key < (
long) ivf.
nlist);
661 const size_t list_size = ivf.
ids[key].size();
662 const float * yj = ivf.
vecs[key].data();
663 const float * bnorms = ivf.
part_norms[key].data();
665 for (
size_t j = 0; j < list_size; j++) {
666 float ip_part = fvec_inner_product (xi, yj, fsize);
667 float bound = ip_part + bnorms[j] * qnorm;
669 if (bound > simi[0]) {
670 float ip = ip_part + fvec_inner_product (
671 xi + fsize, yj + fsize, d - fsize);
673 minheap_pop (k, simi, idxi);
674 minheap_push (k, simi, idxi, ip, ivf.
ids[key][j]);
680 npartial += list_size;
682 minheap_reorder (k, simi, idxi);
684 indexIVFFlat_stats.nq += nx;
685 indexIVFFlat_stats.nlist += nlistv;
686 indexIVFFlat_stats.ndis += ndis;
687 indexIVFFlat_stats.npartial += npartial;
696 float *distances,
idx_t *labels)
const
701 quantizer->
assign (n, x, idx, nprobe);
703 float * qnorms =
new float [n];
706 #pragma omp parallel for
707 for (
size_t i = 0; i < n; i++) {
709 x + i * d + fsize, d - fsize));
713 size_t(n), size_t(k), labels, distances};
715 search_bounds_knn_inner_product (*
this, x, idx, &res, qnorms);
void search_preassigned(idx_t n, const float *x, idx_t k, const idx_t *assign, float *distances, idx_t *labels) const
perform search, without computing the assignment to the quantizer
int niter
clustering iterations
result structure for a single query
float fvec_L2sqr(const float *x, const float *y, size_t d)
Squared L2 distance between two vectors.
void search_knn_L2sqr(size_t nx, const float *x, const long *keys, float_maxheap_array_t *res) const
Implementation of the search for the L2 metric.
T * get_val(size_t key)
Return the list of values for a heap.
double imbalance_factor() const
1= perfectly balanced, >1: imbalanced
virtual void reset()=0
removes all elements from the database.
size_t nprobe
number of probes at query time
void reconstruct(idx_t key, float *recons) const override
void assign(idx_t n, const float *x, idx_t *labels, idx_t k=1)
bool quantizer_trains_alone
just pass over the trainset to quantizer
void range_search(idx_t n, const float *x, float radius, RangeSearchResult *result) const override
void copy_subset_to(IndexIVFFlat &other, int subset_type, long a1, long a2) const
void merge_from_residuals(IndexIVF &other) override
virtual void add_with_ids(idx_t n, const float *x, const long *xids)
virtual void train_residual(idx_t n, const float *x)
size_t k
allocated size per heap
double imbalance_factor(int n, int k, const long *assign)
a balanced assignment has a IF of 1
long remove_ids(const IDSelector &sel) override
std::vector< std::vector< long > > ids
Inverted lists for indexes.
Index * quantizer
quantizer that maps vectors to inverted lists
void train(idx_t n, const float *x) override
Trains the quantizer and calls train_residual to train sub-quantizers.
ClusteringParameters cp
to override default clustering params
void add_with_ids(idx_t n, const float *x, const long *xids) override
implemented for all IndexIVF* classes
bool own_fields
whether object owns the quantizer
long idx_t
all indices are this type
void reset() override
removes all elements from the database.
idx_t ntotal
total nb of indexed vectors
bool verbose
verbosity level
void reset() override
removes all elements from the database.
QueryResult & new_result(idx_t qno)
begin a new result
void update_vectors(int nv, idx_t *idx, const float *v)
void search(idx_t n, const float *x, idx_t k, float *distances, idx_t *labels) const override
std::vector< std::vector< float > > part_norms
norm of remainder (dimensions fsize:d)
float fvec_norm_L2sqr(const float *x, size_t d)
size_t fsize
nb of dimensions of pre-filter
the entries in the buffers are split per query
virtual void merge_from_residuals(IndexIVF &other)=0
void make_direct_map(bool new_maintain_direct_map=true)
TI * get_ids(size_t key)
Correspponding identifiers.
MetricType metric_type
type of metric this index uses for search
void print_stats() const
display some stats about the inverted lists
void add_core(idx_t n, const float *x, const long *xids, const long *precomputed_idx) override
same as add_with_ids, with precomputed coarse quantizer
size_t nlist
number of possible key values
void add(idx_t n, const float *x) override
Quantizes x and calls add_with_key.
virtual void train(idx_t n, const float *x, faiss::Index &index)
Index is used during the assignment stage.
bool is_trained
set if the Index does not require training, or if training is done already
void search_knn_inner_product(size_t nx, const float *x, const long *keys, float_minheap_array_t *res) const
Implementation of the search for the inner product metric.
virtual void train(idx_t n, const float *x)
bool maintain_direct_map
map for direct access to the elements. Enables reconstruct().
bool spherical
do we want normalized centroids?
virtual void merge_from(IndexIVF &other, idx_t add_id)
MetricType
Some algorithms support both an inner product vetsion and a L2 search version.
std::vector< std::vector< float > > vecs
virtual void add_core(idx_t n, const float *x, const long *xids, const long *precomputed_idx)
same as add_with_ids, with precomputed coarse quantizer
void search(idx_t n, const float *x, idx_t k, float *distances, idx_t *labels) const override