11 #include "IVFBase.cuh"
12 #include "../utils/Float16.cuh"
14 namespace faiss {
namespace gpu {
23 int bitsPerSubQuantizer,
24 float* pqCentroidData,
25 IndicesOptions indicesOptions,
26 bool useFloat16LookupTables,
66 std::vector<unsigned char>
getListCodes(
int listId)
const;
78 void setPQCentroids_(
float* data);
81 void precomputeCodes_();
101 const int numSubQuantizers_;
104 const int bitsPerSubQuantizer_;
107 const int numSubQuantizerCodes_;
110 const int dimPerSubQuantizer_;
114 const bool useFloat16LookupTables_;
127 bool precomputedCodes_;
133 #ifdef FAISS_USE_FLOAT16
void addCodeVectorsFromCpu(int listId, const void *codes, const long *indices, size_t numVecs)
Holder of GPU resources for a particular flat index.
Base inverted list functionality for IVFFlat and IVFPQ.
static bool isSupportedPQCodeLength(int size)
Returns true if we support PQ in this size.
int classifyAndAddVectors(Tensor< float, 2, true > &vecs, Tensor< long, 1, true > &indices)
void query(Tensor< float, 2, true > &queries, int nprobe, int k, Tensor< float, 2, true > &outDistances, Tensor< long, 2, true > &outIndices)
Tensor< float, 3, true > getPQCentroids()
void setPrecomputedCodes(bool enable)
Enable or disable pre-computed codes.
std::vector< unsigned char > getListCodes(int listId) const
Return the list codes of a particular list back to the CPU.
IVFPQ(GpuResources *resources, FlatIndex *quantizer, int numSubQuantizers, int bitsPerSubQuantizer, float *pqCentroidData, IndicesOptions indicesOptions, bool useFloat16LookupTables, MemorySpace space)
Implementing class for IVFPQ on the GPU.
static bool isSupportedNoPrecomputedSubDimSize(int dims)