13 #include "IVFBase.cuh"
14 #include "../utils/Float16.cuh"
16 namespace faiss {
namespace gpu {
25 int bitsPerSubQuantizer,
26 float* pqCentroidData,
27 IndicesOptions indicesOptions,
28 bool useFloat16LookupTables,
68 std::vector<unsigned char>
getListCodes(
int listId)
const;
80 void setPQCentroids_(
float* data);
83 void precomputeCodes_();
103 const int numSubQuantizers_;
106 const int bitsPerSubQuantizer_;
109 const int numSubQuantizerCodes_;
112 const int dimPerSubQuantizer_;
116 const bool useFloat16LookupTables_;
129 bool precomputedCodes_;
135 #ifdef FAISS_USE_FLOAT16
void addCodeVectorsFromCpu(int listId, const void *codes, const long *indices, size_t numVecs)
Holder of GPU resources for a particular flat index.
Base inverted list functionality for IVFFlat and IVFPQ.
static bool isSupportedPQCodeLength(int size)
Returns true if we support PQ in this size.
int classifyAndAddVectors(Tensor< float, 2, true > &vecs, Tensor< long, 1, true > &indices)
void query(Tensor< float, 2, true > &queries, int nprobe, int k, Tensor< float, 2, true > &outDistances, Tensor< long, 2, true > &outIndices)
Tensor< float, 3, true > getPQCentroids()
void setPrecomputedCodes(bool enable)
Enable or disable pre-computed codes.
std::vector< unsigned char > getListCodes(int listId) const
Return the list codes of a particular list back to the CPU.
IVFPQ(GpuResources *resources, FlatIndex *quantizer, int numSubQuantizers, int bitsPerSubQuantizer, float *pqCentroidData, IndicesOptions indicesOptions, bool useFloat16LookupTables, MemorySpace space)
Implementing class for IVFPQ on the GPU.
static bool isSupportedNoPrecomputedSubDimSize(int dims)