12 #include "IVFBase.cuh"
13 #include "../utils/Float16.cuh"
15 namespace faiss {
namespace gpu {
24 int bitsPerSubQuantizer,
25 float* pqCentroidData,
26 IndicesOptions indicesOptions,
27 bool useFloat16LookupTables,
67 std::vector<unsigned char>
getListCodes(
int listId)
const;
79 void setPQCentroids_(
float* data);
82 void precomputeCodes_();
102 const int numSubQuantizers_;
105 const int bitsPerSubQuantizer_;
108 const int numSubQuantizerCodes_;
111 const int dimPerSubQuantizer_;
115 const bool useFloat16LookupTables_;
128 bool precomputedCodes_;
134 #ifdef FAISS_USE_FLOAT16
void addCodeVectorsFromCpu(int listId, const void *codes, const long *indices, size_t numVecs)
Holder of GPU resources for a particular flat index.
Base inverted list functionality for IVFFlat and IVFPQ.
static bool isSupportedPQCodeLength(int size)
Returns true if we support PQ in this size.
int classifyAndAddVectors(Tensor< float, 2, true > &vecs, Tensor< long, 1, true > &indices)
void query(Tensor< float, 2, true > &queries, int nprobe, int k, Tensor< float, 2, true > &outDistances, Tensor< long, 2, true > &outIndices)
Tensor< float, 3, true > getPQCentroids()
void setPrecomputedCodes(bool enable)
Enable or disable pre-computed codes.
std::vector< unsigned char > getListCodes(int listId) const
Return the list codes of a particular list back to the CPU.
IVFPQ(GpuResources *resources, FlatIndex *quantizer, int numSubQuantizers, int bitsPerSubQuantizer, float *pqCentroidData, IndicesOptions indicesOptions, bool useFloat16LookupTables, MemorySpace space)
Implementing class for IVFPQ on the GPU.
static bool isSupportedNoPrecomputedSubDimSize(int dims)