<divclass="line"><aname="l00005"></a><spanclass="lineno"> 5</span> <spanclass="comment"> * This source code is licensed under the BSD+Patents license found in the</span></div>
<divclass="line"><aname="l00006"></a><spanclass="lineno"> 6</span> <spanclass="comment"> * LICENSE file in the root directory of this source tree.</span></div>
<divclass="line"><aname="l00009"></a><spanclass="lineno"> 9</span> <spanclass="comment">// Copyright 2004-present Facebook. All Rights Reserved.</span></div>
<divclass="line"><aname="l00034"></a><spanclass="lineno"> 34</span> <spanclass="comment"> /// Whether or not data is stored as float16</span></div>
<divclass="line"><aname="l00037"></a><spanclass="lineno"> 37</span> <spanclass="comment"> /// Whether or not all math is performed in float16, if useFloat16 is</span></div>
<divclass="line"><aname="l00038"></a><spanclass="lineno"> 38</span> <spanclass="comment"> /// specified. If true, we use cublasHgemm, supported only on CC</span></div>
<divclass="line"><aname="l00039"></a><spanclass="lineno"> 39</span> <spanclass="comment"> /// 5.3+. Otherwise, we use cublasSgemmEx.</span></div>
<divclass="line"><aname="l00042"></a><spanclass="lineno"> 42</span> <spanclass="comment"> /// Whether or not data is stored (transparently) in a transposed</span></div>
<divclass="line"><aname="l00043"></a><spanclass="lineno"> 43</span> <spanclass="comment"> /// layout, enabling use of the NN GEMM call, which is ~10% faster.</span></div>
<divclass="line"><aname="l00044"></a><spanclass="lineno"> 44</span> <spanclass="comment"> /// This will improve the speed of the flat index, but will</span></div>
<divclass="line"><aname="l00045"></a><spanclass="lineno"> 45</span> <spanclass="comment"> /// substantially slow down any add() calls made, as all data must</span></div>
<divclass="line"><aname="l00046"></a><spanclass="lineno"> 46</span> <spanclass="comment"> /// be transposed, and will increase storage requirements (we store</span></div>
<divclass="line"><aname="l00047"></a><spanclass="lineno"> 47</span> <spanclass="comment"> /// data in both transposed and non-transposed layouts).</span></div>
<divclass="line"><aname="l00051"></a><spanclass="lineno"> 51</span> <spanclass="comment">/// Wrapper around the GPU implementation that looks like</span></div>
<divclass="line"><aname="l00052"></a><spanclass="lineno"> 52</span> <spanclass="comment">/// faiss::IndexFlat; copies over centroid data from a given</span></div>
<divclass="line"><aname="l00056"></a><spanclass="lineno"> 56</span> <spanclass="comment"> /// Construct from a pre-existing faiss::IndexFlat instance, copying</span></div>
<divclass="line"><aname="l00057"></a><spanclass="lineno"> 57</span> <spanclass="comment"> /// data over to the given GPU</span></div>
<divclass="line"><aname="l00062"></a><spanclass="lineno"> 62</span> <spanclass="comment"> /// Construct an empty instance that can be added to</span></div>
<divclass="line"><aname="l00070"></a><spanclass="lineno"> 70</span> <spanclass="comment"> /// Set the minimum data size for searches (in MiB) for which we use</span></div>
<divclass="line"><aname="l00071"></a><spanclass="lineno"> 71</span> <spanclass="comment"> /// CPU -> GPU paging</span></div>
<divclass="line"><aname="l00074"></a><spanclass="lineno"> 74</span> <spanclass="comment"> /// Returns the current minimum data size for paged searches</span></div>
<divclass="line"><aname="l00077"></a><spanclass="lineno"> 77</span> <spanclass="comment"> /// Initialize ourselves from the given CPU index; will overwrite</span></div>
<divclass="line"><aname="l00078"></a><spanclass="lineno"> 78</span> <spanclass="comment"> /// all data in ourselves</span></div>
<divclass="line"><aname="l00081"></a><spanclass="lineno"> 81</span> <spanclass="comment"> /// Copy ourselves to the given CPU index; will overwrite all data</span></div>
<divclass="line"><aname="l00082"></a><spanclass="lineno"> 82</span> <spanclass="comment"> /// in the index instance</span></div>
<divclass="line"><aname="l00085"></a><spanclass="lineno"> 85</span> <spanclass="comment"> /// Returns the number of vectors we contain</span></div>
<divclass="line"><aname="l00091"></a><spanclass="lineno"> 91</span> <spanclass="comment"> /// This index is not trained, so this does nothing</span></div>
<divclass="line"><aname="l00092"></a><spanclass="lineno"> 92</span> <spanclass="comment"></span><spanclass="keywordtype">void</span><aclass="code"href="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a0e74fad76628ddfb5ae3dc1d1c69f7e8">train</a>(<aclass="code"href="structfaiss_1_1Index.html#a040c6aed1f224f3ea7bf58eebc0c31a4">Index::idx_t</a> n, <spanclass="keyword">const</span><spanclass="keywordtype">float</span>* x) <spanclass="keyword">override</span>;</div>
<divclass="line"><aname="l00097"></a><spanclass="lineno"> 97</span> <spanclass="comment"> /// `x`, `distances` and `labels` can be resident on the CPU or any</span></div>
<divclass="line"><aname="l00098"></a><spanclass="lineno"> 98</span> <spanclass="comment"> /// GPU; copies are performed as needed</span></div>
<divclass="line"><aname="l00099"></a><spanclass="lineno"> 99</span> <spanclass="comment"> /// We have our own implementation here which handles CPU async</span></div>
<divclass="line"><aname="l00100"></a><spanclass="lineno"> 100</span> <spanclass="comment"> /// copies; searchImpl_ is not called</span></div>
<divclass="line"><aname="l00101"></a><spanclass="lineno"> 101</span> <spanclass="comment"> /// FIXME: move paged impl into GpuIndex</span></div>
<divclass="line"><aname="l00109"></a><spanclass="lineno"> 109</span> <spanclass="comment"> /// Reconstruction methods; prefer the batch reconstruct as it will</span></div>
<divclass="line"><aname="l00110"></a><spanclass="lineno"> 110</span> <spanclass="comment"> /// be more efficient</span></div>
<divclass="line"><aname="l00129"></a><spanclass="lineno"> 129</span> <spanclass="comment"> /// Should not be called (we have our own implementation)</span></div>
<divclass="line"><aname="l00137"></a><spanclass="lineno"> 137</span> <spanclass="comment"> /// Called from search when the input data is on the CPU;</span></div>
<divclass="line"><aname="l00138"></a><spanclass="lineno"> 138</span> <spanclass="comment"> /// potentially allows for pinned memory usage</span></div>
<divclass="line"><aname="l00159"></a><spanclass="lineno"> 159</span> <spanclass="comment"> /// Size above which we page copies from the CPU to GPU</span></div>
<divclass="line"><aname="l00162"></a><spanclass="lineno"> 162</span> <spanclass="comment"> /// Holds our GPU data containing the list of vectors</span></div>
<divclass="line"><aname="l00166"></a><spanclass="lineno"> 166</span> <spanclass="comment">/// Wrapper around the GPU implementation that looks like</span></div>
<divclass="line"><aname="l00167"></a><spanclass="lineno"> 167</span> <spanclass="comment">/// faiss::IndexFlatL2; copies over centroid data from a given</span></div>
<divclass="line"><aname="l00171"></a><spanclass="lineno"> 171</span> <spanclass="comment"> /// Construct from a pre-existing faiss::IndexFlatL2 instance, copying</span></div>
<divclass="line"><aname="l00172"></a><spanclass="lineno"> 172</span> <spanclass="comment"> /// data over to the given GPU</span></div>
<divclass="line"><aname="l00177"></a><spanclass="lineno"> 177</span> <spanclass="comment"> /// Construct an empty instance that can be added to</span></div>
<divclass="line"><aname="l00182"></a><spanclass="lineno"> 182</span> <spanclass="comment"> /// Initialize ourselves from the given CPU index; will overwrite</span></div>
<divclass="line"><aname="l00183"></a><spanclass="lineno"> 183</span> <spanclass="comment"> /// all data in ourselves</span></div>
<divclass="line"><aname="l00186"></a><spanclass="lineno"> 186</span> <spanclass="comment"> /// Copy ourselves to the given CPU index; will overwrite all data</span></div>
<divclass="line"><aname="l00187"></a><spanclass="lineno"> 187</span> <spanclass="comment"> /// in the index instance</span></div>
<divclass="line"><aname="l00191"></a><spanclass="lineno"> 191</span> <spanclass="comment">/// Wrapper around the GPU implementation that looks like</span></div>
<divclass="line"><aname="l00192"></a><spanclass="lineno"> 192</span> <spanclass="comment">/// faiss::IndexFlatIP; copies over centroid data from a given</span></div>
<divclass="line"><aname="l00196"></a><spanclass="lineno"> 196</span> <spanclass="comment"> /// Construct from a pre-existing faiss::IndexFlatIP instance, copying</span></div>
<divclass="line"><aname="l00197"></a><spanclass="lineno"> 197</span> <spanclass="comment"> /// data over to the given GPU</span></div>
<divclass="line"><aname="l00202"></a><spanclass="lineno"> 202</span> <spanclass="comment"> /// Construct an empty instance that can be added to</span></div>
<divclass="line"><aname="l00207"></a><spanclass="lineno"> 207</span> <spanclass="comment"> /// Initialize ourselves from the given CPU index; will overwrite</span></div>
<divclass="line"><aname="l00208"></a><spanclass="lineno"> 208</span> <spanclass="comment"> /// all data in ourselves</span></div>
<divclass="line"><aname="l00211"></a><spanclass="lineno"> 211</span> <spanclass="comment"> /// Copy ourselves to the given CPU index; will overwrite all data</span></div>
<divclass="line"><aname="l00212"></a><spanclass="lineno"> 212</span> <spanclass="comment"> /// in the index instance</span></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_a44532c7e5165d36e5da57272d37544ae"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a44532c7e5165d36e5da57272d37544ae">faiss::gpu::GpuIndexFlat::getMinPagingSize</a></div><divclass="ttdeci">size_t getMinPagingSize() const </div><divclass="ttdoc">Returns the current minimum data size for paged searches. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00082">GpuIndexFlat.cu:82</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1FlatIndex_html"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1FlatIndex.html">faiss::gpu::FlatIndex</a></div><divclass="ttdoc">Holder of GPU resources for a particular flat index. </div><divclass="ttdef"><b>Definition:</b><ahref="FlatIndex_8cuh_source.html#l00023">FlatIndex.cuh:23</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_afedeff5442b6ed94f856cecb8d8e598d"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#afedeff5442b6ed94f856cecb8d8e598d">faiss::gpu::GpuIndexFlat::searchFromCpuPaged_</a></div><divclass="ttdeci">void searchFromCpuPaged_(int n, const float *x, int k, float *outDistancesData, int *outIndicesData) const </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00319">GpuIndexFlat.cu:319</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_ae0c916b911eda2f1a09a55f42ebc729c"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#ae0c916b911eda2f1a09a55f42ebc729c">faiss::gpu::GpuIndexFlat::addImpl_</a></div><divclass="ttdeci">void addImpl_(faiss::Index::idx_t n, const float *x, const faiss::Index::idx_t *ids) override</div><divclass="ttdoc">Called from GpuIndex for add. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00180">GpuIndexFlat.cu:180</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_ac99bfd11ff5150f29cf054ea55fc8fe4"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#ac99bfd11ff5150f29cf054ea55fc8fe4">faiss::gpu::GpuIndexFlat::searchImpl_</a></div><divclass="ttdeci">void searchImpl_(faiss::Index::idx_t n, const float *x, faiss::Index::idx_t k, float *distances, faiss::Index::idx_t *labels) const override</div><divclass="ttdoc">Should not be called (we have our own implementation) </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00288">GpuIndexFlat.cu:288</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_a51b5aa0d68670ec7ccb32b0353c7387f"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a51b5aa0d68670ec7ccb32b0353c7387f">faiss::gpu::GpuIndexFlat::getNumVecs</a></div><divclass="ttdeci">size_t getNumVecs() const </div><divclass="ttdoc">Returns the number of vectors we contain. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00143">GpuIndexFlat.cu:143</a></div></div>
<divclass="ttc"id="structfaiss_1_1gpu_1_1GpuIndexFlatConfig_html_afd694186c87751937a646f3db2c8ba3d"><divclass="ttname"><ahref="structfaiss_1_1gpu_1_1GpuIndexFlatConfig.html#afd694186c87751937a646f3db2c8ba3d">faiss::gpu::GpuIndexFlatConfig::useFloat16</a></div><divclass="ttdeci">bool useFloat16</div><divclass="ttdoc">Whether or not data is stored as float16. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8h_source.html#l00035">GpuIndexFlat.h:35</a></div></div>
<divclass="ttc"id="structfaiss_1_1Index_html_a040c6aed1f224f3ea7bf58eebc0c31a4"><divclass="ttname"><ahref="structfaiss_1_1Index.html#a040c6aed1f224f3ea7bf58eebc0c31a4">faiss::Index::idx_t</a></div><divclass="ttdeci">long idx_t</div><divclass="ttdoc">all indices are this type </div><divclass="ttdef"><b>Definition:</b><ahref="Index_8h_source.html#l00062">Index.h:62</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_aad665a7b5888b4aafd47ae0f8d0e6c40"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#aad665a7b5888b4aafd47ae0f8d0e6c40">faiss::gpu::GpuIndexFlat::minPagedSize_</a></div><divclass="ttdeci">size_t minPagedSize_</div><divclass="ttdoc">Size above which we page copies from the CPU to GPU. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8h_source.html#l00160">GpuIndexFlat.h:160</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_a67161796f274a7171a67c36bdf1ef1db"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a67161796f274a7171a67c36bdf1ef1db">faiss::gpu::GpuIndexFlat::reset</a></div><divclass="ttdeci">void reset() override</div><divclass="ttdoc">Clears all vectors from this index. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00148">GpuIndexFlat.cu:148</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_a0e74fad76628ddfb5ae3dc1d1c69f7e8"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a0e74fad76628ddfb5ae3dc1d1c69f7e8">faiss::gpu::GpuIndexFlat::train</a></div><divclass="ttdeci">void train(Index::idx_t n, const float *x) override</div><divclass="ttdoc">This index is not trained, so this does nothing. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00157">GpuIndexFlat.cu:157</a></div></div>
<divclass="ttc"id="namespacefaiss_html_afd12191c638da74760ff397cf319752c"><divclass="ttname"><ahref="namespacefaiss.html#afd12191c638da74760ff397cf319752c">faiss::MetricType</a></div><divclass="ttdeci">MetricType</div><divclass="ttdoc">Some algorithms support both an inner product version and a L2 search version. </div><divclass="ttdef"><b>Definition:</b><ahref="Index_8h_source.html#l00043">Index.h:43</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_ae393840fc80403369452c25c9155e067"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#ae393840fc80403369452c25c9155e067">faiss::gpu::GpuIndexFlat::data_</a></div><divclass="ttdeci">FlatIndex * data_</div><divclass="ttdoc">Holds our GPU data containing the list of vectors. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8h_source.html#l00163">GpuIndexFlat.h:163</a></div></div>