<divclass="line"><aname="l00006"></a><spanclass="lineno"> 6</span> <spanclass="comment"> * This source code is licensed under the CC-by-NC license found in the</span></div>
<divclass="line"><aname="l00007"></a><spanclass="lineno"> 7</span> <spanclass="comment"> * LICENSE file in the root directory of this source tree.</span></div>
<divclass="line"><aname="l00010"></a><spanclass="lineno"> 10</span> <spanclass="comment">// Copyright 2004-present Facebook. All Rights Reserved.</span></div>
<divclass="line"><aname="l00028"></a><spanclass="lineno"> 28</span> <spanclass="comment">/// Wrapper around the GPU implementation that looks like</span></div>
<divclass="line"><aname="l00029"></a><spanclass="lineno"> 29</span> <spanclass="comment">/// faiss::IndexFlat; copies over centroid data from a given</span></div>
<divclass="line"><aname="l00033"></a><spanclass="lineno"> 33</span> <spanclass="comment"> /// Construct from a pre-existing faiss::IndexFlat instance, copying</span></div>
<divclass="line"><aname="l00034"></a><spanclass="lineno"> 34</span> <spanclass="comment"> /// data over to the given GPU</span></div>
<divclass="line"><aname="l00040"></a><spanclass="lineno"> 40</span> <spanclass="comment"> /// Construct an empty instance that can be added to</span></div>
<divclass="line"><aname="l00049"></a><spanclass="lineno"> 49</span> <spanclass="comment"> /// Set the minimum data size for searches (in MiB) for which we use</span></div>
<divclass="line"><aname="l00050"></a><spanclass="lineno"> 50</span> <spanclass="comment"> /// CPU -> GPU paging</span></div>
<divclass="line"><aname="l00053"></a><spanclass="lineno"> 53</span> <spanclass="comment"> /// Returns the current minimum data size for paged searches</span></div>
<divclass="line"><aname="l00056"></a><spanclass="lineno"> 56</span> <spanclass="comment"> /// Do we store vectors and perform math in float16?</span></div>
<divclass="line"><aname="l00059"></a><spanclass="lineno"> 59</span> <spanclass="comment"> /// Initialize ourselves from the given CPU index; will overwrite</span></div>
<divclass="line"><aname="l00060"></a><spanclass="lineno"> 60</span> <spanclass="comment"> /// all data in ourselves</span></div>
<divclass="line"><aname="l00063"></a><spanclass="lineno"> 63</span> <spanclass="comment"> /// Copy ourselves to the given CPU index; will overwrite all data</span></div>
<divclass="line"><aname="l00064"></a><spanclass="lineno"> 64</span> <spanclass="comment"> /// in the index instance</span></div>
<divclass="line"><aname="l00067"></a><spanclass="lineno"> 67</span> <spanclass="comment"> /// Returns the number of vectors we contain</span></div>
<divclass="line"><aname="l00073"></a><spanclass="lineno"> 73</span> <spanclass="comment"> /// This index is not trained, so this does nothing</span></div>
<divclass="line"><aname="l00074"></a><spanclass="lineno"> 74</span> <spanclass="comment"></span><spanclass="keywordtype">void</span><aclass="code"href="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a0e74fad76628ddfb5ae3dc1d1c69f7e8">train</a>(<aclass="code"href="structfaiss_1_1Index.html#a040c6aed1f224f3ea7bf58eebc0c31a4">Index::idx_t</a> n, <spanclass="keyword">const</span><spanclass="keywordtype">float</span>* x) <spanclass="keyword">override</span>;</div>
<divclass="line"><aname="l00076"></a><spanclass="lineno">76</span> <spanclass="comment"> /// `x` can be resident on the CPU or any GPU; the proper copies are</span></div>
<divclass="line"><aname="l00080"></a><spanclass="lineno"> 80</span> <spanclass="comment"> /// `x`, `distances` and `labels` can be resident on the CPU or any</span></div>
<divclass="line"><aname="l00081"></a><spanclass="lineno"> 81</span> <spanclass="comment"> /// GPU; copies are performed as needed</span></div>
<divclass="line"><aname="l00088"></a><spanclass="lineno"> 88</span> <spanclass="comment"> /// Reconstruction methods; prefer the batch reconstruct as it will</span></div>
<divclass="line"><aname="l00089"></a><spanclass="lineno"> 89</span> <spanclass="comment"> /// be more efficient</span></div>
<divclass="line"><aname="l00103"></a><spanclass="lineno"> 103</span> <spanclass="comment"> /// Called from search when the input data is on the CPU;</span></div>
<divclass="line"><aname="l00104"></a><spanclass="lineno"> 104</span> <spanclass="comment"> /// potentially allows for pinned memory usage</span></div>
<divclass="line"><aname="l00118"></a><spanclass="lineno"> 118</span> <spanclass="comment"> /// Size above which we page copies from the CPU to GPU</span></div>
<divclass="line"><aname="l00121"></a><spanclass="lineno"> 121</span> <spanclass="comment"> /// Whether or not we store our vectors in float32 or float16</span></div>
<divclass="line"><aname="l00124"></a><spanclass="lineno"> 124</span> <spanclass="comment"> /// Holds our GPU data containing the list of vectors</span></div>
<divclass="line"><aname="l00128"></a><spanclass="lineno"> 128</span> <spanclass="comment">/// Wrapper around the GPU implementation that looks like</span></div>
<divclass="line"><aname="l00129"></a><spanclass="lineno"> 129</span> <spanclass="comment">/// faiss::IndexFlatL2; copies over centroid data from a given</span></div>
<divclass="line"><aname="l00133"></a><spanclass="lineno"> 133</span> <spanclass="comment"> /// Construct from a pre-existing faiss::IndexFlatL2 instance, copying</span></div>
<divclass="line"><aname="l00134"></a><spanclass="lineno"> 134</span> <spanclass="comment"> /// data over to the given GPU</span></div>
<divclass="line"><aname="l00140"></a><spanclass="lineno"> 140</span> <spanclass="comment"> /// Construct an empty instance that can be added to</span></div>
<divclass="line"><aname="l00146"></a><spanclass="lineno"> 146</span> <spanclass="comment"> /// Initialize ourselves from the given CPU index; will overwrite</span></div>
<divclass="line"><aname="l00147"></a><spanclass="lineno"> 147</span> <spanclass="comment"> /// all data in ourselves</span></div>
<divclass="line"><aname="l00150"></a><spanclass="lineno"> 150</span> <spanclass="comment"> /// Copy ourselves to the given CPU index; will overwrite all data</span></div>
<divclass="line"><aname="l00151"></a><spanclass="lineno"> 151</span> <spanclass="comment"> /// in the index instance</span></div>
<divclass="line"><aname="l00155"></a><spanclass="lineno"> 155</span> <spanclass="comment">/// Wrapper around the GPU implementation that looks like</span></div>
<divclass="line"><aname="l00156"></a><spanclass="lineno"> 156</span> <spanclass="comment">/// faiss::IndexFlatIP; copies over centroid data from a given</span></div>
<divclass="line"><aname="l00160"></a><spanclass="lineno"> 160</span> <spanclass="comment"> /// Construct from a pre-existing faiss::IndexFlatIP instance, copying</span></div>
<divclass="line"><aname="l00161"></a><spanclass="lineno"> 161</span> <spanclass="comment"> /// data over to the given GPU</span></div>
<divclass="line"><aname="l00167"></a><spanclass="lineno"> 167</span> <spanclass="comment"> /// Construct an empty instance that can be added to</span></div>
<divclass="line"><aname="l00173"></a><spanclass="lineno"> 173</span> <spanclass="comment"> /// Initialize ourselves from the given CPU index; will overwrite</span></div>
<divclass="line"><aname="l00174"></a><spanclass="lineno"> 174</span> <spanclass="comment"> /// all data in ourselves</span></div>
<divclass="line"><aname="l00177"></a><spanclass="lineno"> 177</span> <spanclass="comment"> /// Copy ourselves to the given CPU index; will overwrite all data</span></div>
<divclass="line"><aname="l00178"></a><spanclass="lineno"> 178</span> <spanclass="comment"> /// in the index instance</span></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_a44532c7e5165d36e5da57272d37544ae"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a44532c7e5165d36e5da57272d37544ae">faiss::gpu::GpuIndexFlat::getMinPagingSize</a></div><divclass="ttdeci">size_t getMinPagingSize() const </div><divclass="ttdoc">Returns the current minimum data size for paged searches. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00071">GpuIndexFlat.cu:71</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1FlatIndex_html"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1FlatIndex.html">faiss::gpu::FlatIndex</a></div><divclass="ttdoc">Holder of GPU resources for a particular flat index. </div><divclass="ttdef"><b>Definition:</b><ahref="FlatIndex_8cuh_source.html#l00023">FlatIndex.cuh:23</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_afedeff5442b6ed94f856cecb8d8e598d"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#afedeff5442b6ed94f856cecb8d8e598d">faiss::gpu::GpuIndexFlat::searchFromCpuPaged_</a></div><divclass="ttdeci">void searchFromCpuPaged_(int n, const float *x, int k, float *outDistancesData, int *outIndicesData) const </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00268">GpuIndexFlat.cu:268</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_ad09fbc6ed81eb30533c56a3ef262b022"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#ad09fbc6ed81eb30533c56a3ef262b022">faiss::gpu::GpuIndexFlat::useFloat16_</a></div><divclass="ttdeci">const bool useFloat16_</div><divclass="ttdoc">Whether or not we store our vectors in float32 or float16. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8h_source.html#l00122">GpuIndexFlat.h:122</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_a51b5aa0d68670ec7ccb32b0353c7387f"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a51b5aa0d68670ec7ccb32b0353c7387f">faiss::gpu::GpuIndexFlat::getNumVecs</a></div><divclass="ttdeci">size_t getNumVecs() const </div><divclass="ttdoc">Returns the number of vectors we contain. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00130">GpuIndexFlat.cu:130</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_af9b5ff82eb54a6d0632d515db4ee3994"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#af9b5ff82eb54a6d0632d515db4ee3994">faiss::gpu::GpuIndexFlat::getUseFloat16</a></div><divclass="ttdeci">bool getUseFloat16() const </div><divclass="ttdoc">Do we store vectors and perform math in float16? </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00076">GpuIndexFlat.cu:76</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlatL2_html_a2056e4356f0628afbee8881eb915406b"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlatL2.html#a2056e4356f0628afbee8881eb915406b">faiss::gpu::GpuIndexFlatL2::GpuIndexFlatL2</a></div><divclass="ttdeci">GpuIndexFlatL2(GpuResources *resources, int device, bool useFloat16, faiss::IndexFlatL2 *index)</div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00488">GpuIndexFlat.cu:488</a></div></div>
<divclass="ttc"id="structfaiss_1_1Index_html_a040c6aed1f224f3ea7bf58eebc0c31a4"><divclass="ttname"><ahref="structfaiss_1_1Index.html#a040c6aed1f224f3ea7bf58eebc0c31a4">faiss::Index::idx_t</a></div><divclass="ttdeci">long idx_t</div><divclass="ttdoc">all indices are this type </div><divclass="ttdef"><b>Definition:</b><ahref="Index_8h_source.html#l00064">Index.h:64</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_aca24d09a00743aa3087c1a14a66995b9"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#aca24d09a00743aa3087c1a14a66995b9">faiss::gpu::GpuIndexFlat::add</a></div><divclass="ttdeci">void add(Index::idx_t n, const float *x) override</div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00149">GpuIndexFlat.cu:149</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_aad665a7b5888b4aafd47ae0f8d0e6c40"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#aad665a7b5888b4aafd47ae0f8d0e6c40">faiss::gpu::GpuIndexFlat::minPagedSize_</a></div><divclass="ttdeci">size_t minPagedSize_</div><divclass="ttdoc">Size above which we page copies from the CPU to GPU. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8h_source.html#l00119">GpuIndexFlat.h:119</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlatIP_html_a5e145b6277778b884508eb351efb1814"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlatIP.html#a5e145b6277778b884508eb351efb1814">faiss::gpu::GpuIndexFlatIP::GpuIndexFlatIP</a></div><divclass="ttdeci">GpuIndexFlatIP(GpuResources *resources, int device, bool useFloat16, faiss::IndexFlatIP *index)</div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00516">GpuIndexFlat.cu:516</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_a67161796f274a7171a67c36bdf1ef1db"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a67161796f274a7171a67c36bdf1ef1db">faiss::gpu::GpuIndexFlat::reset</a></div><divclass="ttdeci">void reset() override</div><divclass="ttdoc">Clears all vectors from this index. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00135">GpuIndexFlat.cu:135</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_a0e74fad76628ddfb5ae3dc1d1c69f7e8"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#a0e74fad76628ddfb5ae3dc1d1c69f7e8">faiss::gpu::GpuIndexFlat::train</a></div><divclass="ttdeci">void train(Index::idx_t n, const float *x) override</div><divclass="ttdoc">This index is not trained, so this does nothing. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8cu_source.html#l00144">GpuIndexFlat.cu:144</a></div></div>
<divclass="ttc"id="namespacefaiss_html_afd12191c638da74760ff397cf319752c"><divclass="ttname"><ahref="namespacefaiss.html#afd12191c638da74760ff397cf319752c">faiss::MetricType</a></div><divclass="ttdeci">MetricType</div><divclass="ttdoc">Some algorithms support both an inner product vetsion and a L2 search version. </div><divclass="ttdef"><b>Definition:</b><ahref="Index_8h_source.html#l00044">Index.h:44</a></div></div>
<divclass="ttc"id="classfaiss_1_1gpu_1_1GpuIndexFlat_html_ae393840fc80403369452c25c9155e067"><divclass="ttname"><ahref="classfaiss_1_1gpu_1_1GpuIndexFlat.html#ae393840fc80403369452c25c9155e067">faiss::gpu::GpuIndexFlat::data_</a></div><divclass="ttdeci">FlatIndex * data_</div><divclass="ttdoc">Holds our GPU data containing the list of vectors. </div><divclass="ttdef"><b>Definition:</b><ahref="GpuIndexFlat_8h_source.html#l00125">GpuIndexFlat.h:125</a></div></div>
</div><!-- fragment --></div><!-- contents -->
<!-- start footer part -->
<hrclass="footer"/><addressclass="footer"><small>
Generated by  <ahref="http://www.doxygen.org/index.html">