24 namespace faiss {
namespace gpu {
71 void reset()
override;
95 float* out)
const override;
97 void set_typename()
override;
108 float* outDistancesData,
109 int* outIndicesData)
const;
111 void searchNonPaged_(
int n,
114 float* outDistancesData,
115 int* outIndicesData)
const;
GpuIndexFlat(GpuResources *resources, int device, bool useFloat16, const faiss::IndexFlat *index)
void copyFrom(faiss::IndexFlatL2 *index)
void copyTo(faiss::IndexFlat *index) const
void reconstruct_n(faiss::Index::idx_t i0, faiss::Index::idx_t num, float *out) const override
Batch reconstruction method.
size_t getMinPagingSize() const
Returns the current minimum data size for paged searches.
Holder of GPU resources for a particular flat index.
FlatIndex * getGpuData()
For internal access.
void copyTo(faiss::IndexFlatL2 *index)
void searchFromCpuPaged_(int n, const float *x, int k, float *outDistancesData, int *outIndicesData) const
const bool useFloat16_
Whether or not we store our vectors in float32 or float16.
size_t getNumVecs() const
Returns the number of vectors we contain.
void setMinPagingSize(size_t size)
bool getUseFloat16() const
Do we store vectors and perform math in float16?
GpuIndexFlatL2(GpuResources *resources, int device, bool useFloat16, faiss::IndexFlatL2 *index)
void reconstruct(faiss::Index::idx_t key, float *out) const override
void copyTo(faiss::IndexFlatIP *index)
long idx_t
all indices are this type
void copyFrom(const faiss::IndexFlat *index)
void add(Index::idx_t n, const float *x) override
size_t minPagedSize_
Size above which we page copies from the CPU to GPU.
GpuIndexFlatIP(GpuResources *resources, int device, bool useFloat16, faiss::IndexFlatIP *index)
void reset() override
Clears all vectors from this index.
void copyFrom(faiss::IndexFlatIP *index)
void search(faiss::Index::idx_t n, const float *x, faiss::Index::idx_t k, float *distances, faiss::Index::idx_t *labels) const override
void train(Index::idx_t n, const float *x) override
This index is not trained, so this does nothing.
MetricType
Some algorithms support both an inner product vetsion and a L2 search version.
FlatIndex * data_
Holds our GPU data containing the list of vectors.