23 #include "FaissAssert.h"
33 IndexPQ::IndexPQ (
int d,
size_t M,
size_t nbits,
MetricType metric):
34 Index(d, metric), pq(d, M, nbits)
61 if (ntrain_perm > n / 4)
64 printf (
"PQ training on %ld points, remains %ld points: "
65 "training polysemous on %s\n",
66 n - ntrain_perm, ntrain_perm,
67 ntrain_perm == 0 ?
"centroids" :
"these");
69 pq.train(n - ntrain_perm, x);
72 pq, ntrain_perm, x + (n - ntrain_perm) *
d);
96 FAISS_THROW_IF_NOT (ni == 0 || (i0 >= 0 && i0 + ni <=
ntotal));
97 for (
idx_t i = 0; i < ni; i++) {
106 FAISS_THROW_IF_NOT (key >= 0 && key <
ntotal);
125 float *distances,
idx_t *labels)
const
128 if (search_type ==
ST_PQ) {
132 size_t(n), size_t(k), labels, distances };
136 size_t(n), size_t(k), labels, distances };
139 indexPQ_stats.nq += n;
140 indexPQ_stats.ncode += n *
ntotal;
147 search_core_polysemous (n, x, k, distances, labels);
151 uint8_t * q_codes =
new uint8_t [n *
pq.
code_size];
160 for (
size_t i = 0; i < n; i++) {
161 const float *xi = x + i *
d;
163 for (
int j = 0; j <
d; j++)
164 if (xi[j] > 0) code [j>>3] |= 1 << (j & 7);
168 if (search_type ==
ST_SDC) {
171 size_t(n), size_t(k), labels, distances};
173 pq.search_sdc (q_codes, n,
codes.data(),
ntotal, &res,
true);
176 int * idistances =
new int [n * k];
180 size_t (n), size_t (k), labels, idistances};
182 if (search_type ==
ST_HE) {
194 for (
int i = 0; i < k * n; i++)
195 distances[i] = idistances[i];
200 indexPQ_stats.nq += n;
201 indexPQ_stats.ncode += n *
ntotal;
209 void IndexPQStats::reset()
211 nq = ncode = n_hamming_pass = 0;
214 IndexPQStats indexPQ_stats;
217 template <
class HammingComputer>
218 static size_t polysemous_inner_loop (
219 const IndexPQ & index,
220 const float *dis_table_qi,
const uint8_t *q_code,
221 size_t k,
float *heap_dis,
long *heap_ids)
225 int code_size = index.pq.code_size;
226 int ksub = index.pq.ksub;
227 size_t ntotal = index.ntotal;
228 int ht = index.polysemous_ht;
230 const uint8_t *b_code = index.codes.data();
234 HammingComputer hc (q_code, code_size);
236 for (
long bi = 0; bi < ntotal; bi++) {
237 int hd = hc.hamming (b_code);
243 const float * dis_table = dis_table_qi;
244 for (
int m = 0; m < M; m++) {
245 dis += dis_table [b_code[m]];
249 if (dis < heap_dis[0]) {
250 maxheap_pop (k, heap_dis, heap_ids);
251 maxheap_push (k, heap_dis, heap_ids, dis, bi);
260 void IndexPQ::search_core_polysemous (idx_t n,
const float *x, idx_t k,
261 float *distances, idx_t *labels)
const
267 float * dis_tables =
new float [n *
pq.
ksub *
pq.
M];
268 ScopeDeleter<float> del (dis_tables);
272 uint8_t * q_codes =
new uint8_t [n *
pq.
code_size];
273 ScopeDeleter<uint8_t> del2 (q_codes);
278 #pragma omp parallel for
279 for (
idx_t qi = 0; qi < n; qi++) {
288 #pragma omp parallel for reduction (+: n_pass)
289 for (
idx_t qi = 0; qi < n; qi++) {
290 const uint8_t * q_code = q_codes + qi *
pq.
code_size;
292 const float * dis_table_qi = dis_tables + qi *
pq.
M *
pq.
ksub;
294 long * heap_ids = labels + qi * k;
295 float *heap_dis = distances + qi * k;
296 maxheap_heapify (k, heap_dis, heap_ids);
302 n_pass += polysemous_inner_loop<HammingComputer4>
303 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
306 n_pass += polysemous_inner_loop<HammingComputer8>
307 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
310 n_pass += polysemous_inner_loop<HammingComputer16>
311 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
314 n_pass += polysemous_inner_loop<HammingComputer32>
315 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
318 n_pass += polysemous_inner_loop<HammingComputer20>
319 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
323 n_pass += polysemous_inner_loop<HammingComputerM8>
324 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
326 n_pass += polysemous_inner_loop<HammingComputerM4>
327 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
333 n_pass += polysemous_inner_loop<GenHammingComputer8>
334 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
337 n_pass += polysemous_inner_loop<GenHammingComputer16>
338 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
341 n_pass += polysemous_inner_loop<GenHammingComputer32>
342 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
345 n_pass += polysemous_inner_loop<GenHammingComputerM8>
346 (*
this, dis_table_qi, q_code, k, heap_dis, heap_ids);
350 maxheap_reorder (k, heap_dis, heap_ids);
353 indexPQ_stats.nq += n;
354 indexPQ_stats.ncode += n *
ntotal;
355 indexPQ_stats.n_hamming_pass += n_pass;
373 uint8_t * q_codes =
new uint8_t [n *
pq.
code_size];
383 idx_t nb,
const float *xb,
391 uint8_t * q_codes =
new uint8_t [n *
pq.
code_size];
400 del_b_codes.set (b_codes);
404 b_codes =
codes.data();
407 memset (hist, 0,
sizeof(*hist) * (nbits + 1));
412 std::vector<long> histi (nbits + 1);
413 hamdis_t *distances =
new hamdis_t [nb * bs];
416 for (
size_t q0 = 0; q0 < n; q0 += bs) {
425 for (
size_t i = 0; i < nb * (q1 - q0); i++)
426 histi [distances [i]]++;
430 for (
int i = 0; i <= nbits; i++)
462 template <
typename T>
465 bool operator() (
size_t i,
size_t j) {
474 template <
typename T>
478 std::vector<int> perm;
485 void init (
const T*x) {
487 for (
int n = 0; n < N; n++)
490 std::sort (perm.begin(), perm.end(), cmp);
500 return x[perm[n]] - x[perm[n - 1]];
504 int get_ord (
int n) {
516 const typename C::T * vals,
typename C::TI * perm) {
518 for (
int i = 1; i < k; i++) {
519 indirect_heap_push<C> (i + 1, vals, perm, perm[i]);
523 for (
int i = k; i < n; i++) {
524 typename C::TI
id = perm[i];
525 typename C::TI top = perm[0];
527 if (C::cmp(vals[top], vals[
id])) {
528 indirect_heap_pop<C> (k, vals, perm);
529 indirect_heap_push<C> (k, vals, perm, id);
537 for (
int i = k - 1; i > 0; i--) {
538 typename C::TI top = perm[0];
539 indirect_heap_pop<C> (i + 1, vals, perm);
545 template <
typename T>
552 std::vector<int> perm;
556 int initial_k, k_factor;
566 void init (
const T*x) {
568 for (
int n = 0; n < N; n++)
577 partial_sort<HC> (next_k - k, N - k, x, &perm[k]);
581 std::sort (perm.begin() + k, perm.end(), cmp);
595 int next_k = (k + 1) * k_factor - 1;
598 return x[perm[n]] - x[perm[n - 1]];
602 int get_ord (
int n) {
636 template <
typename T,
class SSA,
bool use_seen>
647 size_t heap_capacity, heap_size;
651 std::vector <SSA> ssx;
652 std::vector <long> weights;
658 std::vector <uint8_t> seen;
660 MinSumK (
int K,
int M,
int N): K(K), M(M), N(N) {
661 heap_capacity = K *
M;
663 bh_val =
new T[heap_capacity];
664 bh_ids =
new long[heap_capacity];
666 weights.push_back (1);
667 for (
int m = 1; m <
M; m++)
668 weights.push_back(weights[m - 1] * N);
671 long n_ids = weights.back() *
N;
672 seen.resize ((n_ids + 7) / 8);
675 for (
int m = 0; m <
M; m++)
676 ssx.push_back (SSA(N));
680 bool is_seen (
long i) {
681 return (seen[i >> 3] >> (i & 7)) & 1;
684 void mark_seen (
long i) {
686 seen [i >> 3] |= 1 << (i & 7);
689 void run (
const T *x, T * sums,
long * terms) {
692 for (
int m = 0; m <
M; m++)
693 ssx[m].init(x + N * m);
699 for (
int m = 0; m <
M; m++) {
700 sum += ssx[m].get_0();
703 for (
int m = 0; m <
M; m++) {
704 heap_push<HC> (++heap_size, bh_val, bh_ids,
705 sum + ssx[m].get_diff(1),
710 for (
int k = 1; k <
K; k++) {
713 while (is_seen (bh_ids[0])) {
714 assert (heap_size > 0);
715 heap_pop<HC> (heap_size--, bh_val, bh_ids);
718 assert (heap_size > 0);
720 T sum = sums[k] = bh_val[0];
721 long ti = terms[k] = bh_ids[0];
725 heap_pop<HC> (heap_size--, bh_val, bh_ids);
728 heap_pop<HC> (heap_size--, bh_val, bh_ids);
729 }
while (heap_size > 0 && bh_ids[0] == ti);
734 for (
int m = 0; m <
M; m++) {
737 if (n + 1 >= N)
continue;
739 enqueue_follower (ti, m, n, sum);
750 for (
int k = 0; k <
K; k++) {
757 for (
int m = 0; m <
M; m++) {
759 ti += weights[m] * ssx[m].get_ord(n);
767 void enqueue_follower (
long ti,
int m,
int n, T sum) {
768 T next_sum = sum + ssx[m].get_diff(n + 1);
769 long next_ti = ti + weights[m];
770 heap_push<HC> (++heap_size, bh_val, bh_ids, next_sum, next_ti);
783 MultiIndexQuantizer::MultiIndexQuantizer (
int d,
786 Index(d, METRIC_L2), pq(d, M, nbits)
800 for (
int m = 0; m < pq.
M; m++)
806 float *distances,
idx_t *labels)
const {
809 float * dis_tables =
new float [n * pq.
ksub * pq.
M];
817 #pragma omp parallel for
818 for (
int i = 0; i < n; i++) {
819 const float * dis_table = dis_tables + i * pq.
ksub * pq.
M;
823 for (
int s = 0; s < pq.
M; s++) {
824 float vmin = HUGE_VALF;
828 if (dis_table[j] < vmin) {
834 label |= lmin << (s * pq.
nbits);
835 dis_table += pq.
ksub;
845 #pragma omp parallel if(n > 1)
848 msk(k, pq.
M, pq.
ksub);
850 for (
int i = 0; i < n; i++) {
851 msk.run (dis_tables + i * pq.
ksub * pq.
M,
852 distances + i * k, labels + i * k);
866 for (
int m = 0; m < pq.
M; m++) {
867 long n = jj % pq.
ksub;
875 for (
int m = 0; m < pq.
M; m++) {
876 long n = jj % pq.
ksub;
880 pq.
decode ((uint8_t*)code, recons);
881 }
else FAISS_THROW_MSG(
"only 1 or 2 bytes per index supported");
886 "This index has virtual elements, "
887 "it does not support add");
892 FAISS_THROW_MSG (
"This index has virtual elements, "
893 "it does not support reset");
int M
nb of elements to sum up
std::vector< uint8_t > codes
Codes. Size ntotal * pq.code_size.
size_t nbits
number of bits per quantization index
void decode(const uint8_t *code, float *x) const
decode a vector from a given code (or n vectors if third argument)
Hamming distance on codes.
bool do_polysemous_training
false = standard PQ
void train(idx_t n, const float *x) override
size_t byte_per_idx
nb bytes per code component (1 or 2)
void reset() override
removes all elements from the database.
void partial_sort(int k, int n, const typename C::T *vals, typename C::TI *perm)
void train(idx_t n, const float *x) override
void grow(int next_k)
grow the sorted part of the array to size next_k
void compute_distance_tables(size_t nx, const float *x, float *dis_tables) const
void generalized_hammings_knn(int_maxheap_array_t *ha, const uint8_t *a, const uint8_t *b, size_t nb, size_t code_size, int ordered)
void compute_code_from_distance_table(const float *tab, uint8_t *code) const
void compute_codes(const float *x, uint8_t *codes, size_t n) const
same as compute_code for several vectors
void hamming_distance_histogram(idx_t n, const float *x, idx_t nb, const float *xb, long *dist_histogram)
void search(const float *x, size_t nx, const uint8_t *codes, const size_t ncodes, float_maxheap_array_t *res, bool init_finalize_heap=true) const
size_t code_size
byte per indexed vector
Filter on generalized Hamming.
size_t ksub
number of centroids for each subquantizer
void search_ip(const float *x, size_t nx, const uint8_t *codes, const size_t ncodes, float_minheap_array_t *res, bool init_finalize_heap=true) const
long idx_t
all indices are this type
void hammings_knn(int_maxheap_array_t *ha, const uint8_t *a, const uint8_t *b, size_t nb, size_t ncodes, int order)
ProductQuantizer pq
The product quantizer used to encode the vectors.
idx_t ntotal
total nb of indexed vectors
bool verbose
verbosity level
void add(idx_t n, const float *x) override
int K
nb of sums to return
void hamming_distance_table(idx_t n, const float *x, int32_t *dis) const
void search(idx_t n, const float *x, idx_t k, float *distances, idx_t *labels) const override
void reconstruct(idx_t key, float *recons) const override
MetricType metric_type
type of metric this index uses for search
size_t M
number of subquantizers
int N
nb of possible elements for each of the M terms
void reconstruct_n(idx_t i0, idx_t ni, float *recons) const override
asymmetric product quantizer (default)
void reconstruct(idx_t key, float *recons) const override
HE filter (using ht) + PQ combination.
void add(idx_t n, const float *x) override
add and reset will crash at runtime
bool is_trained
set if the Index does not require training, or if training is done already
void reset() override
removes all elements from the database.
void optimize_pq_for_hamming(ProductQuantizer &pq, size_t n, const float *x) const
bool verbose
verbose during training?
void search(idx_t n, const float *x, idx_t k, float *distances, idx_t *labels) const override
symmetric product quantizer (SDC)
int polysemous_ht
Hamming threshold used for polysemy.
PolysemousTraining polysemous_training
parameters used for the polysemous training
MetricType
Some algorithms support both an inner product vetsion and a L2 search version.