<divclass="line"><aname="l00006"></a><spanclass="lineno"> 6</span> <spanclass="comment"> * This source code is licensed under the CC-by-NC license found in the</span></div>
<divclass="line"><aname="l00007"></a><spanclass="lineno"> 7</span> <spanclass="comment"> * LICENSE file in the root directory of this source tree.</span></div>
<divclass="line"><aname="l00010"></a><spanclass="lineno"> 10</span> <spanclass="comment">// Copyright 2004-present Facebook. All Rights Reserved.</span></div>
<divclass="line"><aname="l00040"></a><spanclass="lineno"> 40</span> <spanclass="comment">/// Wrapper around the GPU implementation that looks like</span></div>
<divclass="line"><aname="l00041"></a><spanclass="lineno"> 41</span> <spanclass="comment">/// faiss::IndexFlat; copies over centroid data from a given</span></div>
<divclass="line"><aname="l00045"></a><spanclass="lineno"> 45</span> <spanclass="comment"> /// Construct from a pre-existing faiss::IndexFlat instance, copying</span></div>
<divclass="line"><aname="l00046"></a><spanclass="lineno"> 46</span> <spanclass="comment"> /// data over to the given GPU</span></div>
<divclass="line"><aname="l00051"></a><spanclass="lineno"> 51</span> <spanclass="comment"> /// Construct an empty instance that can be added to</span></div>
<divclass="line"><aname="l00059"></a><spanclass="lineno"> 59</span> <spanclass="comment"> /// Set the minimum data size for searches (in MiB) for which we use</span></div>
<divclass="line"><aname="l00060"></a><spanclass="lineno"> 60</span> <spanclass="comment"> /// CPU -> GPU paging</span></div>
<divclass="line"><aname="l00063"></a><spanclass="lineno"> 63</span> <spanclass="comment"> /// Returns the current minimum data size for paged searches</span></div>
<divclass="line"><aname="l00066"></a><spanclass="lineno"> 66</span> <spanclass="comment"> /// Do we store vectors and perform math in float16?</span></div>
<divclass="line"><aname="l00069"></a><spanclass="lineno"> 69</span> <spanclass="comment"> /// Initialize ourselves from the given CPU index; will overwrite</span></div>
<divclass="line"><aname="l00070"></a><spanclass="lineno"> 70</span> <spanclass="comment"> /// all data in ourselves</span></div>
<divclass="line"><aname="l00073"></a><spanclass="lineno"> 73</span> <spanclass="comment"> /// Copy ourselves to the given CPU index; will overwrite all data</span></div>
<divclass="line"><aname="l00074"></a><spanclass="lineno"> 74</span> <spanclass="comment"> /// in the index instance</span></div>
<divclass="line"><aname="l00077"></a><spanclass="lineno"> 77</span> <spanclass="comment"> /// Returns the number of vectors we contain</span></div>
<divclass="line"><aname="l00083"></a><spanclass="lineno"> 83</span> <spanclass="comment"> /// This index is not trained, so this does nothing</span></div>
<divclass="line"><aname="l00084"></a><spanclass="lineno"> 84</span> <spanclass="comment"></span><spanclass="keyword">virtual</span><spanclass="keywordtype">void</span><aclass="code"href="classfaiss_1_1gpu_1_1GpuIndexFlat.html#add4d3c8dc767d4a81575829852f3996e">train</a>(<aclass="code"href="structfaiss_1_1Index.html#a040c6aed1f224f3ea7bf58eebc0c31a4">Index::idx_t</a> n, <spanclass="keyword">const</span><spanclass="keywordtype">float</span>* x);</div>
<divclass="line"><aname="l00086"></a><spanclass="lineno"> 86</span> <spanclass="comment"> /// `x`, `distances` and `labels` can be resident on the CPU or any</span></div>
<divclass="line"><aname="l00087"></a><spanclass="lineno"> 87</span> <spanclass="comment"> /// GPU; copies are performed as needed</span></div>
<divclass="line"><aname="l00088"></a><spanclass="lineno"> 88</span> <spanclass="comment"> /// We have our own implementation here which handles CPU async</span></div>
<divclass="line"><aname="l00089"></a><spanclass="lineno"> 89</span> <spanclass="comment"> /// copies; searchImpl_ is not called</span></div>
<divclass="line"><aname="l00090"></a><spanclass="lineno"> 90</span> <spanclass="comment"> /// FIXME: move paged impl into GpuIndex</span></div>
<divclass="line"><aname="l00097"></a><spanclass="lineno"> 97</span> <spanclass="comment"> /// Reconstruction methods; prefer the batch reconstruct as it will</span></div>
<divclass="line"><aname="l00098"></a><spanclass="lineno"> 98</span> <spanclass="comment"> /// be more efficient</span></div>
<divclass="line"><aname="l00117"></a><spanclass="lineno"> 117</span> <spanclass="comment"> /// Should not be called (we have our own implementation)</span></div>
<divclass="line"><aname="l00124"></a><spanclass="lineno"> 124</span> <spanclass="comment"> /// Called from search when the input data is on the CPU;</span></div>
<divclass="line"><aname="l00125"></a><spanclass="lineno"> 125</span> <spanclass="comment"> /// potentially allows for pinned memory usage</span></div>
<divclass="line"><aname="l00139"></a><spanclass="lineno"> 139</span> <spanclass="comment"> /// Size above which we page copies from the CPU to GPU</span></div>
<divclass="line"><aname="l00144"></a><spanclass="lineno"> 144</span> <spanclass="comment"> /// Holds our GPU data containing the list of vectors</span></div>
<divclass="line"><aname="l00148"></a><spanclass="lineno"> 148</span> <spanclass="comment">/// Wrapper around the GPU implementation that looks like</span></div>
<divclass="line"><aname="l00149"></a><spanclass="lineno"> 149</span> <spanclass="comment">/// faiss::IndexFlatL2; copies over centroid data from a given</span></div>
<divclass="line"><aname="l00153"></a><spanclass="lineno"> 153</span> <spanclass="comment"> /// Construct from a pre-existing faiss::IndexFlatL2 instance, copying</span></div>
<divclass="line"><aname="l00154"></a><spanclass="lineno"> 154</span> <spanclass="comment"> /// data over to the given GPU</span></div>
<divclass="line"><aname="l00159"></a><spanclass="lineno"> 159</span> <spanclass="comment"> /// Construct an empty instance that can be added to</span></div>
<divclass="line"><aname="l00164"></a><spanclass="lineno"> 164</span> <spanclass="comment"> /// Initialize ourselves from the given CPU index; will overwrite</span></div>
<divclass="line"><aname="l00165"></a><spanclass="lineno"> 165</span> <spanclass="comment"> /// all data in ourselves</span></div>
<divclass="line"><aname="l00168"></a><spanclass="lineno"> 168</span> <spanclass="comment"> /// Copy ourselves to the given CPU index; will overwrite all data</span></div>
<divclass="line"><aname="l00169"></a><spanclass="lineno"> 169</span> <spanclass="comment"> /// in the index instance</span></div>
<divclass="line"><aname="l00173"></a><spanclass="lineno"> 173</span> <spanclass="comment">/// Wrapper around the GPU implementation that looks like</span></div>
<divclass="line"><aname="l00174"></a><spanclass="lineno"> 174</span> <spanclass="comment">/// faiss::IndexFlatIP; copies over centroid data from a given</span></div>
<divclass="line"><aname="l00178"></a><spanclass="lineno"> 178</span> <spanclass="comment"> /// Construct from a pre-existing faiss::IndexFlatIP instance, copying</span></div>
<divclass="line"><aname="l00179"></a><spanclass="lineno"> 179</span> <spanclass="comment"> /// data over to the given GPU</span></div>
<divclass="line"><aname="l00184"></a><spanclass="lineno"> 184</span> <spanclass="comment"> /// Construct an empty instance that can be added to</span></div>
<divclass="line"><aname="l00189"></a><spanclass="lineno"> 189</span> <spanclass="comment"> /// Initialize ourselves from the given CPU index; will overwrite</span></div>
<divclass="line"><aname="l00190"></a><spanclass="lineno"> 190</span> <spanclass="comment"> /// all data in ourselves</span></div>
<divclass="line"><aname="l00193"></a><spanclass="lineno"> 193</span> <spanclass="comment"> /// Copy ourselves to the given CPU index; will overwrite all data</span></div>
<divclass="line"><aname="l00194"></a><spanclass="lineno"> 194</span> <spanclass="comment"> /// in the index instance</span></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_a48ad6981103976703f636b5429d00b82"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a48ad6981103976703f636b5429d00b82">faiss::gpu::GpuIndexFlat::searchImpl_</a></div><divclass="ttdeci">virtual void searchImpl_(faiss::Index::idx_t n, const float *x, faiss::Index::idx_t k, float *distances, faiss::Index::idx_t *labels) const </div><divclass="ttdoc">Should not be called (we have our own implementation) </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00255">GpuIndexFlat.cu:255</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_a44532c7e5165d36e5da57272d37544ae"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a44532c7e5165d36e5da57272d37544ae">faiss::gpu::GpuIndexFlat::getMinPagingSize</a></div><divclass="ttdeci">size_t getMinPagingSize() const </div><divclass="ttdoc">Returns the current minimum data size for paged searches. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00074">GpuIndexFlat.cu:74</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1FlatIndex_html"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1FlatIndex.html">faiss::gpu::FlatIndex</a></div><divclass="ttdoc">Holder of GPU resources for a particular flat index. </div><divclass="ttdef"><b>Definition:</b><ahref="FlatIndex_8cuh_source.html#l00023">FlatIndex.cuh:23</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_afedeff5442b6ed94f856cecb8d8e598d"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#afedeff5442b6ed94f856cecb8d8e598d">faiss::gpu::GpuIndexFlat::searchFromCpuPaged_</a></div><divclass="ttdeci">void searchFromCpuPaged_(int n, const float *x, int k, float *outDistancesData, int *outIndicesData) const </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00286">GpuIndexFlat.cu:286</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_a51b5aa0d68670ec7ccb32b0353c7387f"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a51b5aa0d68670ec7ccb32b0353c7387f">faiss::gpu::GpuIndexFlat::getNumVecs</a></div><divclass="ttdeci">size_t getNumVecs() const </div><divclass="ttdoc">Returns the number of vectors we contain. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00134">GpuIndexFlat.cu:134</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_add4d3c8dc767d4a81575829852f3996e"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#add4d3c8dc767d4a81575829852f3996e">faiss::gpu::GpuIndexFlat::train</a></div><divclass="ttdeci">virtual void train(Index::idx_t n, const float *x)</div><divclass="ttdoc">This index is not trained, so this does nothing. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00148">GpuIndexFlat.cu:148</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_a92d33af0574561c0cb208cfbe28b2742"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a92d33af0574561c0cb208cfbe28b2742">faiss::gpu::GpuIndexFlat::addImpl_</a></div><divclass="ttdeci">virtual void addImpl_(faiss::Index::idx_t n, const float *x, const faiss::Index::idx_t *ids)</div><divclass="ttdoc">Called from GpuIndex for add. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00153">GpuIndexFlat.cu:153</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_af9b5ff82eb54a6d0632d515db4ee3994"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#af9b5ff82eb54a6d0632d515db4ee3994">faiss::gpu::GpuIndexFlat::getUseFloat16</a></div><divclass="ttdeci">bool getUseFloat16() const </div><divclass="ttdoc">Do we store vectors and perform math in float16? </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00079">GpuIndexFlat.cu:79</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_a33858188d11db8bd77a28c53cb3eb323"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a33858188d11db8bd77a28c53cb3eb323">faiss::gpu::GpuIndexFlat::reset</a></div><divclass="ttdeci">virtual void reset()</div><divclass="ttdoc">Clears all vectors from this index. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00139">GpuIndexFlat.cu:139</a></div></div>
<divclass="ttc"id="structfaiss_1_1Index_html_a040c6aed1f224f3ea7bf58eebc0c31a4"><divclass="ttname"><ahref="structfaiss_1_1Index.html#a040c6aed1f224f3ea7bf58eebc0c31a4">faiss::Index::idx_t</a></div><divclass="ttdeci">long idx_t</div><divclass="ttdoc">all indices are this type </div><divclass="ttdef"><b>Definition:</b><ahref="Index_8h_source.html#l00064">Index.h:64</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_aad665a7b5888b4aafd47ae0f8d0e6c40"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#aad665a7b5888b4aafd47ae0f8d0e6c40">faiss::gpu::GpuIndexFlat::minPagedSize_</a></div><divclass="ttdeci">size_t minPagedSize_</div><divclass="ttdoc">Size above which we page copies from the CPU to GPU. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8h_source.html#l00140">GpuIndexFlat.h:140</a></div></div>
<divclass="ttc"id="namespacefaiss_html_afd12191c638da74760ff397cf319752c"><divclass="ttname"><ahref="namespacefaiss.html#afd12191c638da74760ff397cf319752c">faiss::MetricType</a></div><divclass="ttdeci">MetricType</div><divclass="ttdoc">Some algorithms support both an inner product vetsion and a L2 search version. </div><divclass="ttdef"><b>Definition:</b><ahref="Index_8h_source.html#l00044">Index.h:44</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_ae393840fc80403369452c25c9155e067"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#ae393840fc80403369452c25c9155e067">faiss::gpu::GpuIndexFlat::data_</a></div><divclass="ttdeci">FlatIndex * data_</div><divclass="ttdoc">Holds our GPU data containing the list of vectors. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8h_source.html#l00145">GpuIndexFlat.h:145</a></div></div>