23 #include "FaissAssert.h"
33 IndexPQ::IndexPQ (
int d,
size_t M,
size_t nbits,
MetricType metric):
34 Index(d, metric), pq(d, M, nbits)
61 if (ntrain_perm > n / 4)
64 printf (
"PQ training on %ld points, remains %ld points: "
65 "training polysemous on %s\n",
66 n - ntrain_perm, ntrain_perm,
67 ntrain_perm == 0 ?
"centroids" :
"these");
69 pq.train(n - ntrain_perm, x);
72 pq, ntrain_perm, x + (n - ntrain_perm) *
d);
96 FAISS_THROW_IF_NOT (ni == 0 || (i0 >= 0 && i0 + ni <=
ntotal));
97 for (
idx_t i = 0; i < ni; i++) {
106 FAISS_THROW_IF_NOT (key >= 0 && key <
ntotal);
125 float *distances,
idx_t *labels)
const
128 if (search_type ==
ST_PQ) {
132 size_t(n), size_t(k), labels, distances };
136 size_t(n), size_t(k), labels, distances };
139 indexPQ_stats.nq += n;
140 indexPQ_stats.ncode += n *
ntotal;
147 search_core_polysemous (n, x, k, distances, labels);
151 uint8_t * q_codes =
new uint8_t [n *
pq.
code_size];
160 for (
size_t i = 0; i < n; i++) {
161 const float *xi = x + i *
d;
163 for (
int j = 0; j <
d; j++)
164 if (xi[j] > 0) code [j>>3] |= 1 << (j & 7);
168 if (search_type ==
ST_SDC) {
171 size_t(n), size_t(k), labels, distances};
173 pq.search_sdc (q_codes, n,
codes.data(),
ntotal, &res,
true);
176 int * idistances =
new int [n * k];
180 size_t (n), size_t (k), labels, idistances};
182 if (search_type ==
ST_HE) {
194 for (
int i = 0; i < k * n; i++)
195 distances[i] = idistances[i];
200 indexPQ_stats.nq += n;
201 indexPQ_stats.ncode += n *
ntotal;
209 void IndexPQStats::reset()
211 nq = ncode = n_hamming_pass = 0;
214 IndexPQStats indexPQ_stats;
217 template <
class HammingComputer>
218 static size_t polysemous_inner_loop (
219 const IndexPQ & index,
220 const float *dis_table_qi,
const uint8_t *q_code,
221 size_t k,
float *heap_dis,
long *heap_ids)
225 int code_size = index.pq.code_size;
226 int ksub = index.pq.ksub;
227 size_t ntotal = index.ntotal;
228 int ht = index.polysemous_ht;
230 const uint8_t *b_code = index.codes.data();
234 HammingComputer hc (q_code, code_size);
236 for (
long bi = 0; bi < ntotal; bi++) {
237 int hd = hc.hamming (b_code);
243 const float * dis_table = dis_table_qi;
244 for (
int m = 0; m < M; m++) {
245 dis += dis_table [b_code[m]];
249 if (dis < heap_dis[0]) {
250 maxheap_pop (k, heap_dis, heap_ids);
251 maxheap_push (k, heap_dis, heap_ids, dis, bi);
260 void IndexPQ::search_core_polysemous (idx_t n,
const float *x, idx_t k,
261 float *distances, idx_t *labels)
const
266 float * dis_tables =
new float [n *
pq.
ksub *
pq.
M];
267 ScopeDeleter<float> del (dis_tables);
271 uint8_t * q_codes =
new uint8_t [n *
pq.
code_size];
272 ScopeDeleter<uint8_t> del2 (q_codes);
277 #pragma omp parallel for
278 for (
idx_t qi = 0; qi < n; qi++) {
287 #pragma omp parallel for reduction (+: n_pass)
288 for (
idx_t qi = 0; qi < n; qi++) {
289 const uint8_t * q_code = q_codes + qi *
pq.
code_size;
291 const float * dis_table_qi = dis_tables + qi *
pq.
M *
pq.
ksub;
293 long * heap_ids = labels + qi * k;
294 float *heap_dis = distances + qi * k;
295 maxheap_heapify (k, heap_dis, heap_ids);
301 n_pass += polysemous_inner_loop<HammingComputer4>
302 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
305 n_pass += polysemous_inner_loop<HammingComputer8>
306 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
309 n_pass += polysemous_inner_loop<HammingComputer16>
310 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
313 n_pass += polysemous_inner_loop<HammingComputer32>
314 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
317 n_pass += polysemous_inner_loop<HammingComputer20>
318 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
322 n_pass += polysemous_inner_loop<HammingComputerM8>
323 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
325 n_pass += polysemous_inner_loop<HammingComputerM4>
326 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
329 "code size %zd not supported for polysemous",
337 n_pass += polysemous_inner_loop<GenHammingComputer8>
338 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
341 n_pass += polysemous_inner_loop<GenHammingComputer16>
342 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
345 n_pass += polysemous_inner_loop<GenHammingComputer32>
346 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
350 n_pass += polysemous_inner_loop<GenHammingComputerM8>
351 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
354 "code size %zd not supported for polysemous",
360 maxheap_reorder (k, heap_dis, heap_ids);
363 indexPQ_stats.nq += n;
364 indexPQ_stats.ncode += n *
ntotal;
365 indexPQ_stats.n_hamming_pass += n_pass;
383 uint8_t * q_codes =
new uint8_t [n *
pq.
code_size];
393 idx_t nb,
const float *xb,
401 uint8_t * q_codes =
new uint8_t [n *
pq.
code_size];
410 del_b_codes.set (b_codes);
414 b_codes =
codes.data();
417 memset (hist, 0,
sizeof(*hist) * (nbits + 1));
422 std::vector<long> histi (nbits + 1);
423 hamdis_t *distances =
new hamdis_t [nb * bs];
426 for (
size_t q0 = 0; q0 < n; q0 += bs) {
435 for (
size_t i = 0; i < nb * (q1 - q0); i++)
436 histi [distances [i]]++;
440 for (
int i = 0; i <= nbits; i++)
472 template <
typename T>
475 bool operator() (
size_t i,
size_t j) {
484 template <
typename T>
488 std::vector<int> perm;
495 void init (
const T*x) {
497 for (
int n = 0; n < N; n++)
500 std::sort (perm.begin(), perm.end(), cmp);
510 return x[perm[n]] - x[perm[n - 1]];
514 int get_ord (
int n) {
526 const typename C::T * vals,
typename C::TI * perm) {
528 for (
int i = 1; i < k; i++) {
529 indirect_heap_push<C> (i + 1, vals, perm, perm[i]);
533 for (
int i = k; i < n; i++) {
534 typename C::TI
id = perm[i];
535 typename C::TI top = perm[0];
537 if (C::cmp(vals[top], vals[
id])) {
538 indirect_heap_pop<C> (k, vals, perm);
539 indirect_heap_push<C> (k, vals, perm, id);
547 for (
int i = k - 1; i > 0; i--) {
548 typename C::TI top = perm[0];
549 indirect_heap_pop<C> (i + 1, vals, perm);
555 template <
typename T>
562 std::vector<int> perm;
566 int initial_k, k_factor;
576 void init (
const T*x) {
578 for (
int n = 0; n < N; n++)
587 partial_sort<HC> (next_k - k, N - k, x, &perm[k]);
591 std::sort (perm.begin() + k, perm.end(), cmp);
605 int next_k = (k + 1) * k_factor - 1;
608 return x[perm[n]] - x[perm[n - 1]];
612 int get_ord (
int n) {
646 template <
typename T,
class SSA,
bool use_seen>
657 size_t heap_capacity, heap_size;
661 std::vector <SSA> ssx;
662 std::vector <long> weights;
668 std::vector <uint8_t> seen;
670 MinSumK (
int K,
int M,
int N): K(K), M(M), N(N) {
671 heap_capacity = K *
M;
673 bh_val =
new T[heap_capacity];
674 bh_ids =
new long[heap_capacity];
676 weights.push_back (1);
677 for (
int m = 1; m <
M; m++)
678 weights.push_back(weights[m - 1] * N);
681 long n_ids = weights.back() *
N;
682 seen.resize ((n_ids + 7) / 8);
685 for (
int m = 0; m <
M; m++)
686 ssx.push_back (SSA(N));
690 bool is_seen (
long i) {
691 return (seen[i >> 3] >> (i & 7)) & 1;
694 void mark_seen (
long i) {
696 seen [i >> 3] |= 1 << (i & 7);
699 void run (
const T *x, T * sums,
long * terms) {
702 for (
int m = 0; m <
M; m++)
703 ssx[m].init(x + N * m);
709 for (
int m = 0; m <
M; m++) {
710 sum += ssx[m].get_0();
713 for (
int m = 0; m <
M; m++) {
714 heap_push<HC> (++heap_size, bh_val, bh_ids,
715 sum + ssx[m].get_diff(1),
720 for (
int k = 1; k <
K; k++) {
723 while (is_seen (bh_ids[0])) {
724 assert (heap_size > 0);
725 heap_pop<HC> (heap_size--, bh_val, bh_ids);
728 assert (heap_size > 0);
730 T sum = sums[k] = bh_val[0];
731 long ti = terms[k] = bh_ids[0];
735 heap_pop<HC> (heap_size--, bh_val, bh_ids);
738 heap_pop<HC> (heap_size--, bh_val, bh_ids);
739 }
while (heap_size > 0 && bh_ids[0] == ti);
744 for (
int m = 0; m <
M; m++) {
747 if (n + 1 >= N)
continue;
749 enqueue_follower (ti, m, n, sum);
760 for (
int k = 0; k <
K; k++) {
767 for (
int m = 0; m <
M; m++) {
769 ti += weights[m] * ssx[m].get_ord(n);
777 void enqueue_follower (
long ti,
int m,
int n, T sum) {
778 T next_sum = sum + ssx[m].get_diff(n + 1);
779 long next_ti = ti + weights[m];
780 heap_push<HC> (++heap_size, bh_val, bh_ids, next_sum, next_ti);
793 MultiIndexQuantizer::MultiIndexQuantizer (
int d,
796 Index(d, METRIC_L2), pq(d, M, nbits)
811 for (
int m = 0; m < pq.
M; m++)
817 float *distances,
idx_t *labels)
const {
820 float * dis_tables =
new float [n * pq.
ksub * pq.
M];
828 #pragma omp parallel for
829 for (
int i = 0; i < n; i++) {
830 const float * dis_table = dis_tables + i * pq.
ksub * pq.
M;
834 for (
int s = 0; s < pq.
M; s++) {
835 float vmin = HUGE_VALF;
839 if (dis_table[j] < vmin) {
845 label |= lmin << (s * pq.
nbits);
846 dis_table += pq.
ksub;
856 #pragma omp parallel if(n > 1)
859 msk(k, pq.
M, pq.
ksub);
861 for (
int i = 0; i < n; i++) {
862 msk.run (dis_tables + i * pq.
ksub * pq.
M,
863 distances + i * k, labels + i * k);
877 for (
int m = 0; m < pq.
M; m++) {
878 long n = jj % pq.
ksub;
886 for (
int m = 0; m < pq.
M; m++) {
887 long n = jj % pq.
ksub;
891 pq.
decode ((uint8_t*)code, recons);
892 }
else FAISS_THROW_MSG(
"only 1 or 2 bytes per index supported");
897 "This index has virtual elements, "
898 "it does not support add");
903 FAISS_THROW_MSG (
"This index has virtual elements, "
904 "it does not support reset");
int M
nb of elements to sum up
std::vector< uint8_t > codes
Codes. Size ntotal * pq.code_size.
size_t nbits
number of bits per quantization index
void decode(const uint8_t *code, float *x) const
decode a vector from a given code (or n vectors if third argument)
Hamming distance on codes.
bool do_polysemous_training
false = standard PQ
void train(idx_t n, const float *x) override
size_t byte_per_idx
nb bytes per code component (1 or 2)
void reset() override
removes all elements from the database.
void partial_sort(int k, int n, const typename C::T *vals, typename C::TI *perm)
void train(idx_t n, const float *x) override
void grow(int next_k)
grow the sorted part of the array to size next_k
void compute_distance_tables(size_t nx, const float *x, float *dis_tables) const
void generalized_hammings_knn(int_maxheap_array_t *ha, const uint8_t *a, const uint8_t *b, size_t nb, size_t code_size, int ordered)
void compute_code_from_distance_table(const float *tab, uint8_t *code) const
void compute_codes(const float *x, uint8_t *codes, size_t n) const
same as compute_code for several vectors
void hamming_distance_histogram(idx_t n, const float *x, idx_t nb, const float *xb, long *dist_histogram)
void search(const float *x, size_t nx, const uint8_t *codes, const size_t ncodes, float_maxheap_array_t *res, bool init_finalize_heap=true) const
size_t code_size
byte per indexed vector
Filter on generalized Hamming.
size_t ksub
number of centroids for each subquantizer
void search_ip(const float *x, size_t nx, const uint8_t *codes, const size_t ncodes, float_minheap_array_t *res, bool init_finalize_heap=true) const
long idx_t
all indices are this type
void hammings_knn(int_maxheap_array_t *ha, const uint8_t *a, const uint8_t *b, size_t nb, size_t ncodes, int order)
ProductQuantizer pq
The product quantizer used to encode the vectors.
idx_t ntotal
total nb of indexed vectors
bool verbose
verbosity level
void add(idx_t n, const float *x) override
int K
nb of sums to return
void hamming_distance_table(idx_t n, const float *x, int32_t *dis) const
void search(idx_t n, const float *x, idx_t k, float *distances, idx_t *labels) const override
void reconstruct(idx_t key, float *recons) const override
MetricType metric_type
type of metric this index uses for search
size_t M
number of subquantizers
int N
nb of possible elements for each of the M terms
void reconstruct_n(idx_t i0, idx_t ni, float *recons) const override
asymmetric product quantizer (default)
void reconstruct(idx_t key, float *recons) const override
HE filter (using ht) + PQ combination.
void add(idx_t n, const float *x) override
add and reset will crash at runtime
bool is_trained
set if the Index does not require training, or if training is done already
void reset() override
removes all elements from the database.
void optimize_pq_for_hamming(ProductQuantizer &pq, size_t n, const float *x) const
bool verbose
verbose during training?
void search(idx_t n, const float *x, idx_t k, float *distances, idx_t *labels) const override
symmetric product quantizer (SDC)
int polysemous_ht
Hamming threshold used for polysemy.
PolysemousTraining polysemous_training
parameters used for the polysemous training
MetricType
Some algorithms support both an inner product version and a L2 search version.