23 #include "FaissAssert.h"
33 IndexPQ::IndexPQ (
int d,
size_t M,
size_t nbits,
MetricType metric):
34 Index(d, metric), pq(d, M, nbits)
61 if (ntrain_perm > n / 4)
64 printf (
"PQ training on %ld points, remains %ld points: "
65 "training polysemous on %s\n",
66 n - ntrain_perm, ntrain_perm,
67 ntrain_perm == 0 ?
"centroids" :
"these");
69 pq.train(n - ntrain_perm, x);
72 pq, ntrain_perm, x + (n - ntrain_perm) *
d);
96 FAISS_THROW_IF_NOT (ni == 0 || (i0 >= 0 && i0 + ni <=
ntotal));
97 for (
idx_t i = 0; i < ni; i++) {
106 FAISS_THROW_IF_NOT (key >= 0 && key <
ntotal);
125 float *distances,
idx_t *labels)
const
128 if (search_type ==
ST_PQ) {
132 size_t(n), size_t(k), labels, distances };
136 size_t(n), size_t(k), labels, distances };
139 indexPQ_stats.nq += n;
140 indexPQ_stats.ncode += n *
ntotal;
147 search_core_polysemous (n, x, k, distances, labels);
151 uint8_t * q_codes =
new uint8_t [n *
pq.
code_size];
160 for (
size_t i = 0; i < n; i++) {
161 const float *xi = x + i *
d;
163 for (
int j = 0; j <
d; j++)
164 if (xi[j] > 0) code [j>>3] |= 1 << (j & 7);
168 if (search_type ==
ST_SDC) {
171 size_t(n), size_t(k), labels, distances};
173 pq.search_sdc (q_codes, n,
codes.data(),
ntotal, &res,
true);
176 int * idistances =
new int [n * k];
180 size_t (n), size_t (k), labels, idistances};
182 if (search_type ==
ST_HE) {
194 for (
int i = 0; i < k * n; i++)
195 distances[i] = idistances[i];
200 indexPQ_stats.nq += n;
201 indexPQ_stats.ncode += n *
ntotal;
209 void IndexPQStats::reset()
211 nq = ncode = n_hamming_pass = 0;
214 IndexPQStats indexPQ_stats;
217 template <
class HammingComputer>
218 static size_t polysemous_inner_loop (
219 const IndexPQ & index,
220 const float *dis_table_qi,
const uint8_t *q_code,
221 size_t k,
float *heap_dis,
long *heap_ids)
225 int code_size = index.pq.code_size;
226 int ksub = index.pq.ksub;
227 size_t ntotal = index.ntotal;
228 int ht = index.polysemous_ht;
230 const uint8_t *b_code = index.codes.data();
234 HammingComputer hc (q_code, code_size);
236 for (
long bi = 0; bi < ntotal; bi++) {
237 int hd = hc.hamming (b_code);
243 const float * dis_table = dis_table_qi;
244 for (
int m = 0; m < M; m++) {
245 dis += dis_table [b_code[m]];
249 if (dis < heap_dis[0]) {
250 maxheap_pop (k, heap_dis, heap_ids);
251 maxheap_push (k, heap_dis, heap_ids, dis, bi);
260 void IndexPQ::search_core_polysemous (idx_t n,
const float *x, idx_t k,
261 float *distances, idx_t *labels)
const
266 float * dis_tables =
new float [n *
pq.
ksub *
pq.
M];
267 ScopeDeleter<float> del (dis_tables);
271 uint8_t * q_codes =
new uint8_t [n *
pq.
code_size];
272 ScopeDeleter<uint8_t> del2 (q_codes);
277 #pragma omp parallel for
278 for (idx_t qi = 0; qi < n; qi++) {
287 #pragma omp parallel for reduction (+: n_pass)
288 for (idx_t qi = 0; qi < n; qi++) {
289 const uint8_t * q_code = q_codes + qi *
pq.
code_size;
291 const float * dis_table_qi = dis_tables + qi *
pq.
M *
pq.
ksub;
293 long * heap_ids = labels + qi * k;
294 float *heap_dis = distances + qi * k;
295 maxheap_heapify (k, heap_dis, heap_ids);
301 n_pass += polysemous_inner_loop<HammingComputer4>
302 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
305 n_pass += polysemous_inner_loop<HammingComputer8>
306 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
309 n_pass += polysemous_inner_loop<HammingComputer16>
310 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
313 n_pass += polysemous_inner_loop<HammingComputer32>
314 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
317 n_pass += polysemous_inner_loop<HammingComputer20>
318 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
322 n_pass += polysemous_inner_loop<HammingComputerM8>
323 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
325 n_pass += polysemous_inner_loop<HammingComputerM4>
326 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
329 "code size %zd not supported for polysemous",
337 n_pass += polysemous_inner_loop<GenHammingComputer8>
338 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
341 n_pass += polysemous_inner_loop<GenHammingComputer16>
342 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
345 n_pass += polysemous_inner_loop<GenHammingComputer32>
346 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
350 n_pass += polysemous_inner_loop<GenHammingComputerM8>
351 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
354 "code size %zd not supported for polysemous",
360 maxheap_reorder (k, heap_dis, heap_ids);
363 indexPQ_stats.nq += n;
364 indexPQ_stats.ncode += n *
ntotal;
365 indexPQ_stats.n_hamming_pass += n_pass;
383 uint8_t * q_codes =
new uint8_t [n *
pq.
code_size];
393 idx_t nb,
const float *xb,
401 uint8_t * q_codes =
new uint8_t [n *
pq.
code_size];
410 del_b_codes.set (b_codes);
414 b_codes =
codes.data();
417 memset (hist, 0,
sizeof(*hist) * (nbits + 1));
422 std::vector<long> histi (nbits + 1);
423 hamdis_t *distances =
new hamdis_t [nb * bs];
426 for (
size_t q0 = 0; q0 < n; q0 += bs) {
435 for (
size_t i = 0; i < nb * (q1 - q0); i++)
436 histi [distances [i]]++;
440 for (
int i = 0; i <= nbits; i++)
472 template <
typename T>
473 struct PreSortedArray {
478 explicit PreSortedArray (
int N): N(N) {
480 void init (
const T*x) {
490 return x[n] - x[n - 1];
494 int get_ord (
int n) {
500 template <
typename T>
503 bool operator() (
size_t i,
size_t j) {
512 template <
typename T>
516 std::vector<int> perm;
518 explicit SortedArray (
int N) {
523 void init (
const T*x) {
525 for (
int n = 0; n < N; n++)
527 ArgSort<T> cmp = {x };
528 std::sort (perm.begin(), perm.end(), cmp);
538 return x[perm[n]] - x[perm[n - 1]];
542 int get_ord (
int n) {
553 void partial_sort (
int k,
int n,
554 const typename C::T * vals,
typename C::TI * perm) {
556 for (
int i = 1; i < k; i++) {
557 indirect_heap_push<C> (i + 1, vals, perm, perm[i]);
561 for (
int i = k; i < n; i++) {
562 typename C::TI
id = perm[i];
563 typename C::TI top = perm[0];
565 if (C::cmp(vals[top], vals[
id])) {
566 indirect_heap_pop<C> (k, vals, perm);
567 indirect_heap_push<C> (k, vals, perm, id);
575 for (
int i = k - 1; i > 0; i--) {
576 typename C::TI top = perm[0];
577 indirect_heap_pop<C> (i + 1, vals, perm);
583 template <
typename T>
584 struct SemiSortedArray {
589 typedef CMax<T, int> HC;
590 std::vector<int> perm;
594 int initial_k, k_factor;
596 explicit SemiSortedArray (
int N) {
604 void init (
const T*x) {
606 for (
int n = 0; n < N; n++)
613 void grow (
int next_k) {
615 partial_sort<HC> (next_k - k, N - k, x, &perm[k]);
618 ArgSort<T> cmp = {x };
619 std::sort (perm.begin() + k, perm.end(), cmp);
633 int next_k = (k + 1) * k_factor - 1;
636 return x[perm[n]] - x[perm[n - 1]];
640 int get_ord (
int n) {
674 template <
typename T,
class SSA,
bool use_seen>
685 typedef CMin<T, long> HC;
686 size_t heap_capacity, heap_size;
690 std::vector <SSA> ssx;
696 std::vector <uint8_t> seen;
698 MinSumK (
int K,
int M,
int nbit,
int N):
699 K(K), M(M), nbit(nbit), N(N) {
700 heap_capacity = K * M;
701 assert (N <= (1 << nbit));
704 bh_val =
new T[heap_capacity];
705 bh_ids =
new long[heap_capacity];
708 long n_ids = weight(M);
709 seen.resize ((n_ids + 7) / 8);
712 for (
int m = 0; m < M; m++)
713 ssx.push_back (SSA(N));
717 long weight (
int i) {
718 return 1 << (i * nbit);
721 bool is_seen (
long i) {
722 return (seen[i >> 3] >> (i & 7)) & 1;
725 void mark_seen (
long i) {
727 seen [i >> 3] |= 1 << (i & 7);
730 void run (
const T *x,
long ldx,
731 T * sums,
long * terms) {
734 for (
int m = 0; m < M; m++) {
743 for (
int m = 0; m < M; m++) {
744 sum += ssx[m].get_0();
747 for (
int m = 0; m < M; m++) {
748 heap_push<HC> (++heap_size, bh_val, bh_ids,
749 sum + ssx[m].get_diff(1),
754 for (
int k = 1; k < K; k++) {
757 while (is_seen (bh_ids[0])) {
758 assert (heap_size > 0);
759 heap_pop<HC> (heap_size--, bh_val, bh_ids);
762 assert (heap_size > 0);
764 T sum = sums[k] = bh_val[0];
765 long ti = terms[k] = bh_ids[0];
769 heap_pop<HC> (heap_size--, bh_val, bh_ids);
772 heap_pop<HC> (heap_size--, bh_val, bh_ids);
773 }
while (heap_size > 0 && bh_ids[0] == ti);
778 for (
int m = 0; m < M; m++) {
779 long n = ii & ((1 << nbit) - 1);
781 if (n + 1 >= N)
continue;
783 enqueue_follower (ti, m, n, sum);
794 for (
int k = 0; k < K; k++) {
801 for (
int m = 0; m < M; m++) {
802 long n = ii & ((1 << nbit) - 1);
803 ti += ssx[m].get_ord(n) << (nbit * m);
811 void enqueue_follower (
long ti,
int m,
int n, T sum) {
812 T next_sum = sum + ssx[m].get_diff(n + 1);
813 long next_ti = ti + weight(m);
814 heap_push<HC> (++heap_size, bh_val, bh_ids, next_sum, next_ti);
826 MultiIndexQuantizer::MultiIndexQuantizer (
int d,
829 Index(d, METRIC_L2), pq(d, M, nbits)
844 for (
int m = 0; m < pq.
M; m++)
850 float *distances,
idx_t *labels)
const {
853 float * dis_tables =
new float [n * pq.
ksub * pq.
M];
861 #pragma omp parallel for
862 for (
int i = 0; i < n; i++) {
863 const float * dis_table = dis_tables + i * pq.
ksub * pq.
M;
867 for (
int s = 0; s < pq.
M; s++) {
868 float vmin = HUGE_VALF;
872 if (dis_table[j] < vmin) {
878 label |= lmin << (s * pq.
nbits);
879 dis_table += pq.
ksub;
889 #pragma omp parallel if(n > 1)
891 MinSumK <float, SemiSortedArray<float>,
false>
894 for (
int i = 0; i < n; i++) {
895 msk.run (dis_tables + i * pq.
ksub * pq.
M, pq.
ksub,
896 distances + i * k, labels + i * k);
909 for (
int m = 0; m < pq.
M; m++) {
910 long n = jj & ((1L << pq.
nbits) - 1);
919 "This index has virtual elements, "
920 "it does not support add");
925 FAISS_THROW_MSG (
"This index has virtual elements, "
926 "it does not support reset");
944 MultiIndexQuantizer2::MultiIndexQuantizer2 (
945 int d,
size_t M,
size_t nbits,
949 assign_indexes.resize (M);
950 for (
int i = 0; i < M; i++) {
951 FAISS_THROW_IF_NOT_MSG(
952 indexes[i]->d == pq.
dsub,
953 "Provided sub-index has incorrect size");
954 assign_indexes[i] = indexes[i];
959 MultiIndexQuantizer2::MultiIndexQuantizer2 (
961 Index *assign_index_0,
962 Index *assign_index_1):
963 MultiIndexQuantizer (d, 2, nbits)
965 FAISS_THROW_IF_NOT_MSG(
966 assign_index_0->d == pq.
dsub &&
967 assign_index_1->d == pq.
dsub,
968 "Provided sub-index has incorrect size");
969 assign_indexes.resize (2);
970 assign_indexes [0] = assign_index_0;
971 assign_indexes [1] = assign_index_1;
979 for (
int i = 0; i < pq.
M; i++) {
987 float* distances,
idx_t* labels)
const
992 int k2 = std::min(K,
long(pq.
ksub));
995 long dsub = pq.
dsub, ksub = pq.
ksub;
998 std::vector<idx_t> sub_ids(n * M * k2);
999 std::vector<float> sub_dis(n * M * k2);
1000 std::vector<float> xsub(n * dsub);
1002 for (
int m = 0; m < M; m++) {
1003 float *xdest = xsub.data();
1004 const float *xsrc = x + m * dsub;
1005 for (
int j = 0; j < n; j++) {
1006 memcpy(xdest, xsrc, dsub *
sizeof(xdest[0]));
1013 &sub_dis[k2 * n * m],
1014 &sub_ids[k2 * n * m]);
1021 for (
int i = 0; i < n; i++) {
1025 for (
int m = 0; m < M; m++) {
1026 float vmin = sub_dis[i + m * n];
1027 idx_t lmin = sub_ids[i + m * n];
1029 label |= lmin << (m * pq.
nbits);
1031 distances [i] = dis;
1037 #pragma omp parallel if(n > 1)
1039 MinSumK <float, PreSortedArray<float>,
false>
1040 msk(K, pq.
M, pq.
nbits, k2);
1042 for (
int i = 0; i < n; i++) {
1043 idx_t *li = labels + i * K;
1044 msk.run (&sub_dis[i * k2], k2 * n,
1045 distances + i * K, li);
1049 const idx_t *idmap0 = sub_ids.data() + i * k2;
1050 long ld_idmap = k2 * n;
1051 long mask1 = ksub - 1L;
1053 for (
int k = 0; k < K; k++) {
1054 const idx_t *idmap = idmap0;
1058 for (
int m = 0; m < M; m++) {
1059 long s = vin & mask1;
1061 vout |= idmap[s] << bs;
std::vector< uint8_t > codes
Codes. Size ntotal * pq.code_size.
size_t nbits
number of bits per quantization index
void decode(const uint8_t *code, float *x) const
decode a vector from a given code (or n vectors if third argument)
Hamming distance on codes.
bool do_polysemous_training
false = standard PQ
void train(idx_t n, const float *x) override
size_t byte_per_idx
nb bytes per code component (1 or 2)
void reset() override
removes all elements from the database.
void train(idx_t n, const float *x) override
size_t dsub
dimensionality of each subvector
void compute_distance_tables(size_t nx, const float *x, float *dis_tables) const
void generalized_hammings_knn(int_maxheap_array_t *ha, const uint8_t *a, const uint8_t *b, size_t nb, size_t code_size, int ordered)
void compute_code_from_distance_table(const float *tab, uint8_t *code) const
void compute_codes(const float *x, uint8_t *codes, size_t n) const
same as compute_code for several vectors
void hamming_distance_histogram(idx_t n, const float *x, idx_t nb, const float *xb, long *dist_histogram)
void search(const float *x, size_t nx, const uint8_t *codes, const size_t ncodes, float_maxheap_array_t *res, bool init_finalize_heap=true) const
void train(idx_t n, const float *x) override
size_t code_size
byte per indexed vector
Filter on generalized Hamming.
size_t ksub
number of centroids for each subquantizer
void search_ip(const float *x, size_t nx, const uint8_t *codes, const size_t ncodes, float_minheap_array_t *res, bool init_finalize_heap=true) const
long idx_t
all indices are this type
void hammings_knn(int_maxheap_array_t *ha, const uint8_t *a, const uint8_t *b, size_t nb, size_t ncodes, int order)
ProductQuantizer pq
The product quantizer used to encode the vectors.
idx_t ntotal
total nb of indexed vectors
bool verbose
verbosity level
void add(idx_t n, const float *x) override
void hamming_distance_table(idx_t n, const float *x, int32_t *dis) const
void search(idx_t n, const float *x, idx_t k, float *distances, idx_t *labels) const override
void reconstruct(idx_t key, float *recons) const override
MetricType metric_type
type of metric this index uses for search
size_t M
number of subquantizers
void reconstruct_n(idx_t i0, idx_t ni, float *recons) const override
asymmetric product quantizer (default)
void reconstruct(idx_t key, float *recons) const override
HE filter (using ht) + PQ combination.
void search(idx_t n, const float *x, idx_t k, float *distances, idx_t *labels) const override
void add(idx_t n, const float *x) override
add and reset will crash at runtime
bool is_trained
set if the Index does not require training, or if training is done already
void reset() override
removes all elements from the database.
float * get_centroids(size_t m, size_t i)
return the centroids associated with subvector m
void optimize_pq_for_hamming(ProductQuantizer &pq, size_t n, const float *x) const
bool verbose
verbose during training?
void search(idx_t n, const float *x, idx_t k, float *distances, idx_t *labels) const override
symmetric product quantizer (SDC)
int polysemous_ht
Hamming threshold used for polysemy.
PolysemousTraining polysemous_training
parameters used for the polysemous training
std::vector< Index * > assign_indexes
M Indexes on d / M dimensions.
MetricType
Some algorithms support both an inner product version and a L2 search version.