24 namespace faiss {
namespace gpu {
32 storeTransposed(
false) {
81 void reset()
override;
105 float* out)
const override;
107 void set_typename()
override;
118 float* outDistancesData,
119 int* outIndicesData)
const;
121 void searchNonPaged_(
int n,
124 float* outDistancesData,
125 int* outIndicesData)
const;
void copyFrom(faiss::IndexFlatL2 *index)
void copyTo(faiss::IndexFlat *index) const
void reconstruct_n(faiss::Index::idx_t i0, faiss::Index::idx_t num, float *out) const override
Batch reconstruction method.
size_t getMinPagingSize() const
Returns the current minimum data size for paged searches.
Holder of GPU resources for a particular flat index.
FlatIndex * getGpuData()
For internal access.
void copyTo(faiss::IndexFlatL2 *index)
void searchFromCpuPaged_(int n, const float *x, int k, float *outDistancesData, int *outIndicesData) const
size_t getNumVecs() const
Returns the number of vectors we contain.
GpuIndexFlat(GpuResources *resources, const faiss::IndexFlat *index, GpuIndexFlatConfig config=GpuIndexFlatConfig())
void setMinPagingSize(size_t size)
GpuIndexFlatL2(GpuResources *resources, faiss::IndexFlatL2 *index, GpuIndexFlatConfig config=GpuIndexFlatConfig())
bool getUseFloat16() const
Do we store vectors and perform math in float16?
void reconstruct(faiss::Index::idx_t key, float *out) const override
void copyTo(faiss::IndexFlatIP *index)
GpuIndexFlatIP(GpuResources *resources, faiss::IndexFlatIP *index, GpuIndexFlatConfig config=GpuIndexFlatConfig())
long idx_t
all indices are this type
void copyFrom(const faiss::IndexFlat *index)
void add(Index::idx_t n, const float *x) override
size_t minPagedSize_
Size above which we page copies from the CPU to GPU.
void reset() override
Clears all vectors from this index.
void copyFrom(faiss::IndexFlatIP *index)
void search(faiss::Index::idx_t n, const float *x, faiss::Index::idx_t k, float *distances, faiss::Index::idx_t *labels) const override
void train(Index::idx_t n, const float *x) override
This index is not trained, so this does nothing.
MetricType
Some algorithms support both an inner product vetsion and a L2 search version.
FlatIndex * data_
Holds our GPU data containing the list of vectors.