12 #include "utils/MemorySpace.h"
14 namespace faiss {
namespace gpu {
40 inline int getDevice()
const {
107 void searchNonPaged_(
int n,
110 float* outDistancesData,
115 void searchFromCpuPaged_(
int n,
118 float* outDistancesData,
size_t getMinPagingSize() const
Returns the current minimum data size for paged searches.
virtual void searchImpl_(int n, const float *x, int k, float *distances, Index::idx_t *labels) const =0
virtual bool addImplRequiresIDs_() const =0
int device
GPU device on which the index is resident.
void add_with_ids(Index::idx_t n, const float *x, const Index::idx_t *ids) override
long idx_t
all indices are this type
const int device_
The GPU device we are resident on.
GpuResources * resources_
Manages streams, cuBLAS handles and scratch memory for devices.
void add(faiss::Index::idx_t, const float *x) override
const MemorySpace memorySpace_
The memory space of our primary storage on the GPU.
size_t minPagedSize_
Size above which we page copies from the CPU to GPU.
void setMinPagingSize(size_t size)
virtual void addImpl_(int n, const float *x, const Index::idx_t *ids)=0
void search(Index::idx_t n, const float *x, Index::idx_t k, float *distances, Index::idx_t *labels) const override
MetricType
Some algorithms support both an inner product version and a L2 search version.