faiss/docs/html/Tensor_8cuh_source.html

815 lines
153 KiB
HTML

<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.5"/>
<title>Faiss: /data/users/hoss/faiss/gpu/utils/Tensor.cuh Source File</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/javascript">
$(document).ready(function() { searchBox.OnSelectItem(0); });
</script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<td style="padding-left: 0.5em;">
<div id="projectname">Faiss
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.5 -->
<script type="text/javascript">
var searchBox = new SearchBox("searchBox", "search",false,'Search');
</script>
<div id="navrow1" class="tabs">
<ul class="tablist">
<li><a href="index.html"><span>Main&#160;Page</span></a></li>
<li><a href="namespaces.html"><span>Namespaces</span></a></li>
<li><a href="annotated.html"><span>Classes</span></a></li>
<li class="current"><a href="files.html"><span>Files</span></a></li>
<li>
<div id="MSearchBox" class="MSearchBoxInactive">
<span class="left">
<img id="MSearchSelect" src="search/mag_sel.png"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
alt=""/>
<input type="text" id="MSearchField" value="Search" accesskey="S"
onfocus="searchBox.OnSearchFieldFocus(true)"
onblur="searchBox.OnSearchFieldFocus(false)"
onkeyup="searchBox.OnSearchFieldChange(event)"/>
</span><span class="right">
<a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
</span>
</div>
</li>
</ul>
</div>
<div id="navrow2" class="tabs2">
<ul class="tablist">
<li><a href="files.html"><span>File&#160;List</span></a></li>
</ul>
</div>
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
<a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(0)"><span class="SelectionMark">&#160;</span>All</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(1)"><span class="SelectionMark">&#160;</span>Classes</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(2)"><span class="SelectionMark">&#160;</span>Namespaces</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(3)"><span class="SelectionMark">&#160;</span>Functions</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(4)"><span class="SelectionMark">&#160;</span>Variables</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(5)"><span class="SelectionMark">&#160;</span>Typedefs</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(6)"><span class="SelectionMark">&#160;</span>Enumerations</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(7)"><span class="SelectionMark">&#160;</span>Enumerator</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(8)"><span class="SelectionMark">&#160;</span>Friends</a></div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div id="nav-path" class="navpath">
<ul>
<li class="navelem"><a class="el" href="dir_5956a3e80a20e8e03eb577bedb92689f.html">gpu</a></li><li class="navelem"><a class="el" href="dir_5865795754b604b902e524ed1add5694.html">utils</a></li> </ul>
</div>
</div><!-- top -->
<div class="header">
<div class="headertitle">
<div class="title">Tensor.cuh</div> </div>
</div><!--header-->
<div class="contents">
<div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span>&#160;<span class="comment">/**</span></div>
<div class="line"><a name="l00002"></a><span class="lineno"> 2</span>&#160;<span class="comment"> * Copyright (c) Facebook, Inc. and its affiliates.</span></div>
<div class="line"><a name="l00003"></a><span class="lineno"> 3</span>&#160;<span class="comment"> *</span></div>
<div class="line"><a name="l00004"></a><span class="lineno"> 4</span>&#160;<span class="comment"> * This source code is licensed under the MIT license found in the</span></div>
<div class="line"><a name="l00005"></a><span class="lineno"> 5</span>&#160;<span class="comment"> * LICENSE file in the root directory of this source tree.</span></div>
<div class="line"><a name="l00006"></a><span class="lineno"> 6</span>&#160;<span class="comment"> */</span></div>
<div class="line"><a name="l00007"></a><span class="lineno"> 7</span>&#160;</div>
<div class="line"><a name="l00008"></a><span class="lineno"> 8</span>&#160;</div>
<div class="line"><a name="l00009"></a><span class="lineno"> 9</span>&#160;<span class="preprocessor">#pragma once</span></div>
<div class="line"><a name="l00010"></a><span class="lineno"> 10</span>&#160;<span class="preprocessor"></span></div>
<div class="line"><a name="l00011"></a><span class="lineno"> 11</span>&#160;<span class="preprocessor">#include &lt;assert.h&gt;</span></div>
<div class="line"><a name="l00012"></a><span class="lineno"> 12</span>&#160;<span class="preprocessor">#include &lt;cuda.h&gt;</span></div>
<div class="line"><a name="l00013"></a><span class="lineno"> 13</span>&#160;<span class="preprocessor">#include &lt;cuda_runtime.h&gt;</span></div>
<div class="line"><a name="l00014"></a><span class="lineno"> 14</span>&#160;<span class="preprocessor">#include &lt;initializer_list&gt;</span></div>
<div class="line"><a name="l00015"></a><span class="lineno"> 15</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00016"></a><span class="lineno"> 16</span>&#160;<span class="comment">/// Multi-dimensional array class for CUDA device and host usage.</span></div>
<div class="line"><a name="l00017"></a><span class="lineno"> 17</span>&#160;<span class="comment">/// Originally from Facebook&#39;s fbcunn, since added to the Torch GPU</span></div>
<div class="line"><a name="l00018"></a><span class="lineno"> 18</span>&#160;<span class="comment">/// library cutorch as well.</span></div>
<div class="line"><a name="l00019"></a><span class="lineno"> 19</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00020"></a><span class="lineno"> 20</span>&#160;<span class="keyword">namespace </span>faiss { <span class="keyword">namespace </span>gpu {</div>
<div class="line"><a name="l00021"></a><span class="lineno"> 21</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00022"></a><span class="lineno"> 22</span>&#160;<span class="comment">/// Our tensor type</span></div>
<div class="line"><a name="l00023"></a><span class="lineno"> 23</span>&#160;<span class="comment"></span><span class="keyword">template</span> &lt;<span class="keyword">typename</span> T,</div>
<div class="line"><a name="l00024"></a><span class="lineno"> 24</span>&#160; <span class="keywordtype">int</span> Dim,</div>
<div class="line"><a name="l00025"></a><span class="lineno"> 25</span>&#160; <span class="keywordtype">bool</span> InnerContig,</div>
<div class="line"><a name="l00026"></a><span class="lineno"> 26</span>&#160; <span class="keyword">typename</span> IndexT,</div>
<div class="line"><a name="l00027"></a><span class="lineno"> 27</span>&#160; <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt; <span class="keyword">class </span>PtrTraits&gt;</div>
<div class="line"><a name="l00028"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html"> 28</a></span>&#160;<span class="keyword">class </span><a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor</a>;</div>
<div class="line"><a name="l00029"></a><span class="lineno"> 29</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00030"></a><span class="lineno"> 30</span>&#160;<span class="comment">/// Type of a subspace of a tensor</span></div>
<div class="line"><a name="l00031"></a><span class="lineno"><a class="line" href="namespacefaiss_1_1gpu_1_1detail.html"> 31</a></span>&#160;<span class="comment"></span><span class="keyword">namespace </span>detail {</div>
<div class="line"><a name="l00032"></a><span class="lineno"> 32</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> TensorType,</div>
<div class="line"><a name="l00033"></a><span class="lineno"> 33</span>&#160; <span class="keywordtype">int</span> SubDim,</div>
<div class="line"><a name="l00034"></a><span class="lineno"> 34</span>&#160; <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt; <span class="keyword">class </span>PtrTraits&gt;</div>
<div class="line"><a name="l00035"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html"> 35</a></span>&#160;<span class="keyword">class </span><a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">SubTensor</a>;</div>
<div class="line"><a name="l00036"></a><span class="lineno"> 36</span>&#160;}</div>
<div class="line"><a name="l00037"></a><span class="lineno"> 37</span>&#160;</div>
<div class="line"><a name="l00038"></a><span class="lineno"> 38</span>&#160;<span class="keyword">namespace </span>traits {</div>
<div class="line"><a name="l00039"></a><span class="lineno"> 39</span>&#160;</div>
<div class="line"><a name="l00040"></a><span class="lineno"> 40</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a name="l00041"></a><span class="lineno"><a class="line" href="structfaiss_1_1gpu_1_1traits_1_1RestrictPtrTraits.html"> 41</a></span>&#160;<span class="keyword">struct </span><a class="code" href="structfaiss_1_1gpu_1_1traits_1_1RestrictPtrTraits.html">RestrictPtrTraits</a> {</div>
<div class="line"><a name="l00042"></a><span class="lineno"> 42</span>&#160; <span class="keyword">typedef</span> T* __restrict__ PtrType;</div>
<div class="line"><a name="l00043"></a><span class="lineno"> 43</span>&#160;};</div>
<div class="line"><a name="l00044"></a><span class="lineno"> 44</span>&#160;</div>
<div class="line"><a name="l00045"></a><span class="lineno"> 45</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a name="l00046"></a><span class="lineno"><a class="line" href="structfaiss_1_1gpu_1_1traits_1_1DefaultPtrTraits.html"> 46</a></span>&#160;<span class="keyword">struct </span><a class="code" href="structfaiss_1_1gpu_1_1traits_1_1DefaultPtrTraits.html">DefaultPtrTraits</a> {</div>
<div class="line"><a name="l00047"></a><span class="lineno"> 47</span>&#160; <span class="keyword">typedef</span> T* PtrType;</div>
<div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160;};</div>
<div class="line"><a name="l00049"></a><span class="lineno"> 49</span>&#160;</div>
<div class="line"><a name="l00050"></a><span class="lineno"> 50</span>&#160;}</div>
<div class="line"><a name="l00051"></a><span class="lineno"> 51</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00052"></a><span class="lineno"> 52</span>&#160;<span class="comment">/**</span></div>
<div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160;<span class="comment"> Templated multi-dimensional array that supports strided access of</span></div>
<div class="line"><a name="l00054"></a><span class="lineno"> 54</span>&#160;<span class="comment"> elements. Main access is through `operator[]`; e.g.,</span></div>
<div class="line"><a name="l00055"></a><span class="lineno"> 55</span>&#160;<span class="comment"> `tensor[x][y][z]`.</span></div>
<div class="line"><a name="l00056"></a><span class="lineno"> 56</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00057"></a><span class="lineno"> 57</span>&#160;<span class="comment"> - `T` is the contained type (e.g., `float`)</span></div>
<div class="line"><a name="l00058"></a><span class="lineno"> 58</span>&#160;<span class="comment"> - `Dim` is the tensor rank</span></div>
<div class="line"><a name="l00059"></a><span class="lineno"> 59</span>&#160;<span class="comment"> - If `InnerContig` is true, then the tensor is assumed to be innermost</span></div>
<div class="line"><a name="l00060"></a><span class="lineno"> 60</span>&#160;<span class="comment"> - contiguous, and only operations that make sense on contiguous</span></div>
<div class="line"><a name="l00061"></a><span class="lineno"> 61</span>&#160;<span class="comment"> - arrays are allowed (e.g., no transpose). Strides are still</span></div>
<div class="line"><a name="l00062"></a><span class="lineno"> 62</span>&#160;<span class="comment"> - calculated, but innermost stride is assumed to be 1.</span></div>
<div class="line"><a name="l00063"></a><span class="lineno"> 63</span>&#160;<span class="comment"> - `IndexT` is the integer type used for size/stride arrays, and for</span></div>
<div class="line"><a name="l00064"></a><span class="lineno"> 64</span>&#160;<span class="comment"> - all indexing math. Default is `int`, but for large tensors, `long`</span></div>
<div class="line"><a name="l00065"></a><span class="lineno"> 65</span>&#160;<span class="comment"> - can be used instead.</span></div>
<div class="line"><a name="l00066"></a><span class="lineno"> 66</span>&#160;<span class="comment"> - `PtrTraits` are traits applied to our data pointer (T*). By default,</span></div>
<div class="line"><a name="l00067"></a><span class="lineno"> 67</span>&#160;<span class="comment"> - this is just T*, but RestrictPtrTraits can be used to apply T*</span></div>
<div class="line"><a name="l00068"></a><span class="lineno"> 68</span>&#160;<span class="comment"> - __restrict__ for alias-free analysis.</span></div>
<div class="line"><a name="l00069"></a><span class="lineno"> 69</span>&#160;<span class="comment">*/</span></div>
<div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> T,</div>
<div class="line"><a name="l00071"></a><span class="lineno"> 71</span>&#160; <span class="keywordtype">int</span> Dim,</div>
<div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160; <span class="keywordtype">bool</span> InnerContig = <span class="keyword">false</span>,</div>
<div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160; <span class="keyword">typename</span> IndexT = int,</div>
<div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160; <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt; <span class="keyword">class </span>PtrTraits = <a class="code" href="structfaiss_1_1gpu_1_1traits_1_1DefaultPtrTraits.html">traits::DefaultPtrTraits</a>&gt;</div>
<div class="line"><a name="l00075"></a><span class="lineno"> 75</span>&#160;<span class="keyword">class </span><a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor</a> {</div>
<div class="line"><a name="l00076"></a><span class="lineno"> 76</span>&#160; <span class="keyword">public</span>:</div>
<div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160; <span class="keyword">enum</span> { NumDim = Dim };</div>
<div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160; <span class="keyword">typedef</span> T DataType;</div>
<div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160; <span class="keyword">typedef</span> IndexT IndexType;</div>
<div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160; <span class="keyword">enum</span> { IsInnerContig = InnerContig };</div>
<div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160; <span class="keyword">typedef</span> <span class="keyword">typename</span> PtrTraits&lt;T&gt;::PtrType DataPtrType;</div>
<div class="line"><a name="l00082"></a><span class="lineno"> 82</span>&#160; <span class="keyword">typedef</span> Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt; TensorType;</div>
<div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00084"></a><span class="lineno"> 84</span>&#160;<span class="comment"> /// Default constructor</span></div>
<div class="line"><a name="l00085"></a><span class="lineno"> 85</span>&#160;<span class="comment"></span> __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a8ae7b3f95991125a5648c3b78afd40bd">Tensor</a>();</div>
<div class="line"><a name="l00086"></a><span class="lineno"> 86</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00087"></a><span class="lineno"> 87</span>&#160;<span class="comment"> /// Copy constructor</span></div>
<div class="line"><a name="l00088"></a><span class="lineno"> 88</span>&#160;<span class="comment"></span> __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a8ae7b3f95991125a5648c3b78afd40bd">Tensor</a>(Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;&amp; t);</div>
<div class="line"><a name="l00089"></a><span class="lineno"> 89</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00090"></a><span class="lineno"> 90</span>&#160;<span class="comment"> /// Move constructor</span></div>
<div class="line"><a name="l00091"></a><span class="lineno"> 91</span>&#160;<span class="comment"></span> __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a8ae7b3f95991125a5648c3b78afd40bd">Tensor</a>(Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;&amp;&amp; t);</div>
<div class="line"><a name="l00092"></a><span class="lineno"> 92</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00093"></a><span class="lineno"> 93</span>&#160;<span class="comment"> /// Assignment</span></div>
<div class="line"><a name="l00094"></a><span class="lineno"> 94</span>&#160;<span class="comment"></span> __host__ __device__ Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;&amp;</div>
<div class="line"><a name="l00095"></a><span class="lineno"> 95</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a0d831a352531281e06250cc6fe52a38a">operator=</a>(Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;&amp; t);</div>
<div class="line"><a name="l00096"></a><span class="lineno"> 96</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00097"></a><span class="lineno"> 97</span>&#160;<span class="comment"> /// Move assignment</span></div>
<div class="line"><a name="l00098"></a><span class="lineno"> 98</span>&#160;<span class="comment"></span> __host__ __device__ Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;&amp;</div>
<div class="line"><a name="l00099"></a><span class="lineno"> 99</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a0d831a352531281e06250cc6fe52a38a">operator=</a>(Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;&amp;&amp; t);</div>
<div class="line"><a name="l00100"></a><span class="lineno"> 100</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00101"></a><span class="lineno"> 101</span>&#160;<span class="comment"> /// Constructor that calculates strides with no padding</span></div>
<div class="line"><a name="l00102"></a><span class="lineno"> 102</span>&#160;<span class="comment"></span> __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a8ae7b3f95991125a5648c3b78afd40bd">Tensor</a>(DataPtrType <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a50411ce4d0fa32ef715e3321b6e33212">data</a>,</div>
<div class="line"><a name="l00103"></a><span class="lineno"> 103</span>&#160; <span class="keyword">const</span> IndexT <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#abc0ecc4f882ee09632b5a06be0619adb">sizes</a>[Dim]);</div>
<div class="line"><a name="l00104"></a><span class="lineno"> 104</span>&#160; __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a8ae7b3f95991125a5648c3b78afd40bd">Tensor</a>(DataPtrType <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a50411ce4d0fa32ef715e3321b6e33212">data</a>,</div>
<div class="line"><a name="l00105"></a><span class="lineno"> 105</span>&#160; std::initializer_list&lt;IndexT&gt; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#abc0ecc4f882ee09632b5a06be0619adb">sizes</a>);</div>
<div class="line"><a name="l00106"></a><span class="lineno"> 106</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00107"></a><span class="lineno"> 107</span>&#160;<span class="comment"> /// Constructor that takes arbitrary size/stride arrays.</span></div>
<div class="line"><a name="l00108"></a><span class="lineno"> 108</span>&#160;<span class="comment"> /// Errors if you attempt to pass non-contiguous strides to a</span></div>
<div class="line"><a name="l00109"></a><span class="lineno"> 109</span>&#160;<span class="comment"> /// contiguous tensor.</span></div>
<div class="line"><a name="l00110"></a><span class="lineno"> 110</span>&#160;<span class="comment"></span> __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a8ae7b3f95991125a5648c3b78afd40bd">Tensor</a>(DataPtrType <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a50411ce4d0fa32ef715e3321b6e33212">data</a>,</div>
<div class="line"><a name="l00111"></a><span class="lineno"> 111</span>&#160; <span class="keyword">const</span> IndexT <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#abc0ecc4f882ee09632b5a06be0619adb">sizes</a>[Dim],</div>
<div class="line"><a name="l00112"></a><span class="lineno"> 112</span>&#160; <span class="keyword">const</span> IndexT <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a87a777247486756e99060547a3cc833a">strides</a>[Dim]);</div>
<div class="line"><a name="l00113"></a><span class="lineno"> 113</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00114"></a><span class="lineno"> 114</span>&#160;<span class="comment"> /// Copies a tensor into ourselves; sizes must match</span></div>
<div class="line"><a name="l00115"></a><span class="lineno"> 115</span>&#160;<span class="comment"></span> __host__ <span class="keywordtype">void</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a6dc00c182a92389b74c89ba7fcab40d3">copyFrom</a>(Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;&amp; t,</div>
<div class="line"><a name="l00116"></a><span class="lineno"> 116</span>&#160; cudaStream_t stream);</div>
<div class="line"><a name="l00117"></a><span class="lineno"> 117</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00118"></a><span class="lineno"> 118</span>&#160;<span class="comment"> /// Copies ourselves into a tensor; sizes must match</span></div>
<div class="line"><a name="l00119"></a><span class="lineno"> 119</span>&#160;<span class="comment"></span> __host__ <span class="keywordtype">void</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a6cc21376070a03d77661d6e333972c6a">copyTo</a>(Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;&amp; t,</div>
<div class="line"><a name="l00120"></a><span class="lineno"> 120</span>&#160; cudaStream_t stream);</div>
<div class="line"><a name="l00121"></a><span class="lineno"> 121</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00122"></a><span class="lineno"> 122</span>&#160;<span class="comment"> /// Returns true if the two tensors are of the same dimensionality,</span></div>
<div class="line"><a name="l00123"></a><span class="lineno"> 123</span>&#160;<span class="comment"> /// size and stride.</span></div>
<div class="line"><a name="l00124"></a><span class="lineno"> 124</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> OtherT, <span class="keywordtype">int</span> OtherDim&gt;</div>
<div class="line"><a name="l00125"></a><span class="lineno"> 125</span>&#160; __host__ __device__ <span class="keywordtype">bool</span></div>
<div class="line"><a name="l00126"></a><span class="lineno"> 126</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a3067941f8f8f09fc73e2f06243699825">isSame</a>(<span class="keyword">const</span> Tensor&lt;OtherT, OtherDim, InnerContig, IndexT, PtrTraits&gt;&amp; rhs) <span class="keyword">const</span>;</div>
<div class="line"><a name="l00127"></a><span class="lineno"> 127</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00128"></a><span class="lineno"> 128</span>&#160;<span class="comment"> /// Returns true if the two tensors are of the same dimensionality and size</span></div>
<div class="line"><a name="l00129"></a><span class="lineno"> 129</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> OtherT, <span class="keywordtype">int</span> OtherDim&gt;</div>
<div class="line"><a name="l00130"></a><span class="lineno"> 130</span>&#160; __host__ __device__ <span class="keywordtype">bool</span></div>
<div class="line"><a name="l00131"></a><span class="lineno"> 131</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a22c1e45f81f7f9e5427e2eed19f9cd11">isSameSize</a>(<span class="keyword">const</span> Tensor&lt;OtherT, OtherDim, InnerContig, IndexT, PtrTraits&gt;&amp; rhs) <span class="keyword">const</span>;</div>
<div class="line"><a name="l00132"></a><span class="lineno"> 132</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00133"></a><span class="lineno"> 133</span>&#160;<span class="comment"> /// Cast to a tensor of a different type of the same size and</span></div>
<div class="line"><a name="l00134"></a><span class="lineno"> 134</span>&#160;<span class="comment"> /// stride. U and our type T must be of the same size</span></div>
<div class="line"><a name="l00135"></a><span class="lineno"> 135</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt;</div>
<div class="line"><a name="l00136"></a><span class="lineno"> 136</span>&#160; __host__ __device__ Tensor&lt;U, Dim, InnerContig, IndexT, PtrTraits&gt; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a2894f8fdfab8ec3245364a6f9e8a5259">cast</a>();</div>
<div class="line"><a name="l00137"></a><span class="lineno"> 137</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00138"></a><span class="lineno"> 138</span>&#160;<span class="comment"> /// Const version of `cast`</span></div>
<div class="line"><a name="l00139"></a><span class="lineno"> 139</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt;</div>
<div class="line"><a name="l00140"></a><span class="lineno"> 140</span>&#160; __host__ __device__</div>
<div class="line"><a name="l00141"></a><span class="lineno"> 141</span>&#160; <span class="keyword">const</span> Tensor&lt;U, Dim, InnerContig, IndexT, PtrTraits&gt; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a2894f8fdfab8ec3245364a6f9e8a5259">cast</a>() <span class="keyword">const</span>;</div>
<div class="line"><a name="l00142"></a><span class="lineno"> 142</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00143"></a><span class="lineno"> 143</span>&#160;<span class="comment"> /// Cast to a tensor of a different type which is potentially a</span></div>
<div class="line"><a name="l00144"></a><span class="lineno"> 144</span>&#160;<span class="comment"> /// different size than our type T. Tensor must be aligned and the</span></div>
<div class="line"><a name="l00145"></a><span class="lineno"> 145</span>&#160;<span class="comment"> /// innermost dimension must be a size that is a multiple of</span></div>
<div class="line"><a name="l00146"></a><span class="lineno"> 146</span>&#160;<span class="comment"> /// sizeof(U) / sizeof(T), and the stride of the innermost dimension</span></div>
<div class="line"><a name="l00147"></a><span class="lineno"> 147</span>&#160;<span class="comment"> /// must be contiguous. The stride of all outer dimensions must be a</span></div>
<div class="line"><a name="l00148"></a><span class="lineno"> 148</span>&#160;<span class="comment"> /// multiple of sizeof(U) / sizeof(T) as well.</span></div>
<div class="line"><a name="l00149"></a><span class="lineno"> 149</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt;</div>
<div class="line"><a name="l00150"></a><span class="lineno"> 150</span>&#160; __host__ __device__ Tensor&lt;U, Dim, InnerContig, IndexT, PtrTraits&gt; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a6c9640c365134ccc33cdb2695b016eb3">castResize</a>();</div>
<div class="line"><a name="l00151"></a><span class="lineno"> 151</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00152"></a><span class="lineno"> 152</span>&#160;<span class="comment"> /// Const version of `castResize`</span></div>
<div class="line"><a name="l00153"></a><span class="lineno"> 153</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt;</div>
<div class="line"><a name="l00154"></a><span class="lineno"> 154</span>&#160; __host__ __device__ <span class="keyword">const</span> Tensor&lt;U, Dim, InnerContig, IndexT, PtrTraits&gt;</div>
<div class="line"><a name="l00155"></a><span class="lineno"> 155</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a6c9640c365134ccc33cdb2695b016eb3">castResize</a>() <span class="keyword">const</span>;</div>
<div class="line"><a name="l00156"></a><span class="lineno"> 156</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00157"></a><span class="lineno"> 157</span>&#160;<span class="comment"> /// Returns true if we can castResize() this tensor to the new type</span></div>
<div class="line"><a name="l00158"></a><span class="lineno"> 158</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt;</div>
<div class="line"><a name="l00159"></a><span class="lineno"> 159</span>&#160; __host__ __device__ <span class="keywordtype">bool</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a7fbbf51f8ef6bea9cc863a86e20d994e">canCastResize</a>() <span class="keyword">const</span>;</div>
<div class="line"><a name="l00160"></a><span class="lineno"> 160</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00161"></a><span class="lineno"> 161</span>&#160;<span class="comment"> /// Attempts to cast this tensor to a tensor of a different IndexT.</span></div>
<div class="line"><a name="l00162"></a><span class="lineno"> 162</span>&#160;<span class="comment"> /// Fails if size or stride entries are not representable in the new</span></div>
<div class="line"><a name="l00163"></a><span class="lineno"> 163</span>&#160;<span class="comment"> /// IndexT.</span></div>
<div class="line"><a name="l00164"></a><span class="lineno"> 164</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> NewIndexT&gt;</div>
<div class="line"><a name="l00165"></a><span class="lineno"> 165</span>&#160; __host__ Tensor&lt;T, Dim, InnerContig, NewIndexT, PtrTraits&gt;</div>
<div class="line"><a name="l00166"></a><span class="lineno"> 166</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a9f0c817e9751fe02926c2346a97f0350">castIndexType</a>() <span class="keyword">const</span>;</div>
<div class="line"><a name="l00167"></a><span class="lineno"> 167</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00168"></a><span class="lineno"> 168</span>&#160;<span class="comment"> /// Returns true if we can use this indexing type to access all elements</span></div>
<div class="line"><a name="l00169"></a><span class="lineno"> 169</span>&#160;<span class="comment"> /// index type</span></div>
<div class="line"><a name="l00170"></a><span class="lineno"> 170</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> NewIndexT&gt;</div>
<div class="line"><a name="l00171"></a><span class="lineno"> 171</span>&#160; __host__ <span class="keywordtype">bool</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a2ac9dc9fa8d81f2651a1be486c14ba62">canUseIndexType</a>() <span class="keyword">const</span>;</div>
<div class="line"><a name="l00172"></a><span class="lineno"> 172</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00173"></a><span class="lineno"> 173</span>&#160;<span class="comment"> /// Returns a raw pointer to the start of our data.</span></div>
<div class="line"><a name="l00174"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#a50411ce4d0fa32ef715e3321b6e33212"> 174</a></span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span> DataPtrType <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a50411ce4d0fa32ef715e3321b6e33212">data</a>() {</div>
<div class="line"><a name="l00175"></a><span class="lineno"> 175</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a2ec506a25e46cf7001060a6ba5ae3b94">data_</a>;</div>
<div class="line"><a name="l00176"></a><span class="lineno"> 176</span>&#160; }</div>
<div class="line"><a name="l00177"></a><span class="lineno"> 177</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00178"></a><span class="lineno"> 178</span>&#160;<span class="comment"> /// Returns a raw pointer to the end of our data, assuming</span></div>
<div class="line"><a name="l00179"></a><span class="lineno"> 179</span>&#160;<span class="comment"> /// continuity</span></div>
<div class="line"><a name="l00180"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#a1afd11b16869df9d352ee8ab1f8c7a1f"> 180</a></span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span> DataPtrType <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a1afd11b16869df9d352ee8ab1f8c7a1f">end</a>() {</div>
<div class="line"><a name="l00181"></a><span class="lineno"> 181</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a50411ce4d0fa32ef715e3321b6e33212">data</a>() + <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a0ba9ab7c1676b7a41a6e6b2e5a490d2f">numElements</a>();</div>
<div class="line"><a name="l00182"></a><span class="lineno"> 182</span>&#160; }</div>
<div class="line"><a name="l00183"></a><span class="lineno"> 183</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00184"></a><span class="lineno"> 184</span>&#160;<span class="comment"> /// Returns a raw pointer to the start of our data (const).</span></div>
<div class="line"><a name="l00185"></a><span class="lineno"> 185</span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00186"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#afde15195e51318fd1811ea402f63c1ab"> 186</a></span>&#160; <span class="keyword">const</span> DataPtrType <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#afde15195e51318fd1811ea402f63c1ab">data</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00187"></a><span class="lineno"> 187</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a2ec506a25e46cf7001060a6ba5ae3b94">data_</a>;</div>
<div class="line"><a name="l00188"></a><span class="lineno"> 188</span>&#160; }</div>
<div class="line"><a name="l00189"></a><span class="lineno"> 189</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00190"></a><span class="lineno"> 190</span>&#160;<span class="comment"> /// Returns a raw pointer to the end of our data, assuming</span></div>
<div class="line"><a name="l00191"></a><span class="lineno"> 191</span>&#160;<span class="comment"> /// continuity (const)</span></div>
<div class="line"><a name="l00192"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#a7e6b9cd8cc3cc0bfe39bd3fed7733e51"> 192</a></span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span> DataPtrType <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a7e6b9cd8cc3cc0bfe39bd3fed7733e51">end</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00193"></a><span class="lineno"> 193</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a50411ce4d0fa32ef715e3321b6e33212">data</a>() + <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a0ba9ab7c1676b7a41a6e6b2e5a490d2f">numElements</a>();</div>
<div class="line"><a name="l00194"></a><span class="lineno"> 194</span>&#160; }</div>
<div class="line"><a name="l00195"></a><span class="lineno"> 195</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00196"></a><span class="lineno"> 196</span>&#160;<span class="comment"> /// Cast to a different datatype</span></div>
<div class="line"><a name="l00197"></a><span class="lineno"> 197</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt;</div>
<div class="line"><a name="l00198"></a><span class="lineno"> 198</span>&#160; __host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00199"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#a38adf20225c9f8f764aafe273c4ee122"> 199</a></span>&#160; <span class="keyword">typename</span> PtrTraits&lt;U&gt;::PtrType <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a38adf20225c9f8f764aafe273c4ee122">dataAs</a>() {</div>
<div class="line"><a name="l00200"></a><span class="lineno"> 200</span>&#160; <span class="keywordflow">return</span> <span class="keyword">reinterpret_cast&lt;</span>typename PtrTraits&lt;U&gt;::PtrType<span class="keyword">&gt;</span>(<a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a2ec506a25e46cf7001060a6ba5ae3b94">data_</a>);</div>
<div class="line"><a name="l00201"></a><span class="lineno"> 201</span>&#160; }</div>
<div class="line"><a name="l00202"></a><span class="lineno"> 202</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00203"></a><span class="lineno"> 203</span>&#160;<span class="comment"> /// Cast to a different datatype</span></div>
<div class="line"><a name="l00204"></a><span class="lineno"> 204</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt;</div>
<div class="line"><a name="l00205"></a><span class="lineno"> 205</span>&#160; __host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00206"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#a76383e7f62a826ba55955bd3d1dddce7"> 206</a></span>&#160; <span class="keyword">const</span> <span class="keyword">typename</span> PtrTraits&lt;const U&gt;::PtrType <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a76383e7f62a826ba55955bd3d1dddce7">dataAs</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00207"></a><span class="lineno"> 207</span>&#160; <span class="keywordflow">return</span> <span class="keyword">reinterpret_cast&lt;</span>typename PtrTraits&lt;const U&gt;::PtrType<span class="keyword">&gt;</span>(<a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a2ec506a25e46cf7001060a6ba5ae3b94">data_</a>);</div>
<div class="line"><a name="l00208"></a><span class="lineno"> 208</span>&#160; }</div>
<div class="line"><a name="l00209"></a><span class="lineno"> 209</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00210"></a><span class="lineno"> 210</span>&#160;<span class="comment"> /// Returns a read/write view of a portion of our tensor.</span></div>
<div class="line"><a name="l00211"></a><span class="lineno"> 211</span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00212"></a><span class="lineno"> 212</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">detail::SubTensor</a>&lt;TensorType, Dim - 1, PtrTraits&gt;</div>
<div class="line"><a name="l00213"></a><span class="lineno"> 213</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a7926dc43f0fa998d16b9497676e118e6">operator[]</a>(IndexT);</div>
<div class="line"><a name="l00214"></a><span class="lineno"> 214</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00215"></a><span class="lineno"> 215</span>&#160;<span class="comment"> /// Returns a read/write view of a portion of our tensor (const).</span></div>
<div class="line"><a name="l00216"></a><span class="lineno"> 216</span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00217"></a><span class="lineno"> 217</span>&#160; <span class="keyword">const</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">detail::SubTensor</a>&lt;TensorType, Dim - 1, PtrTraits&gt;</div>
<div class="line"><a name="l00218"></a><span class="lineno"> 218</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a7926dc43f0fa998d16b9497676e118e6">operator[]</a>(IndexT) <span class="keyword">const</span>;</div>
<div class="line"><a name="l00219"></a><span class="lineno"> 219</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00220"></a><span class="lineno"> 220</span>&#160;<span class="comment"> /// Returns the size of a given dimension, `[0, Dim - 1]`. No bounds</span></div>
<div class="line"><a name="l00221"></a><span class="lineno"> 221</span>&#160;<span class="comment"> /// checking.</span></div>
<div class="line"><a name="l00222"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#a6699c311648457f257afa340c61f417c"> 222</a></span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span> IndexT <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a6699c311648457f257afa340c61f417c">getSize</a>(<span class="keywordtype">int</span> i)<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00223"></a><span class="lineno"> 223</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#ad96fbf0f5e7c06a1031b8b18f7fc01d7">size_</a>[i];</div>
<div class="line"><a name="l00224"></a><span class="lineno"> 224</span>&#160; }</div>
<div class="line"><a name="l00225"></a><span class="lineno"> 225</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00226"></a><span class="lineno"> 226</span>&#160;<span class="comment"> /// Returns the stride of a given dimension, `[0, Dim - 1]`. No bounds</span></div>
<div class="line"><a name="l00227"></a><span class="lineno"> 227</span>&#160;<span class="comment"> /// checking.</span></div>
<div class="line"><a name="l00228"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#a0b8bba630f7a1fa217f90b20d298420a"> 228</a></span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span> IndexT <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a0b8bba630f7a1fa217f90b20d298420a">getStride</a>(<span class="keywordtype">int</span> i)<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00229"></a><span class="lineno"> 229</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#af4b8fe4b632cdca51ee7972ed93fc3fa">stride_</a>[i];</div>
<div class="line"><a name="l00230"></a><span class="lineno"> 230</span>&#160; }</div>
<div class="line"><a name="l00231"></a><span class="lineno"> 231</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00232"></a><span class="lineno"> 232</span>&#160;<span class="comment"> /// Returns the total number of elements contained within our data</span></div>
<div class="line"><a name="l00233"></a><span class="lineno"> 233</span>&#160;<span class="comment"> /// (product of `getSize(i)`)</span></div>
<div class="line"><a name="l00234"></a><span class="lineno"> 234</span>&#160;<span class="comment"></span> __host__ __device__ <span class="keywordtype">size_t</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a0ba9ab7c1676b7a41a6e6b2e5a490d2f">numElements</a>() <span class="keyword">const</span>;</div>
<div class="line"><a name="l00235"></a><span class="lineno"> 235</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00236"></a><span class="lineno"> 236</span>&#160;<span class="comment"> /// If we are contiguous, returns the total size in bytes of our</span></div>
<div class="line"><a name="l00237"></a><span class="lineno"> 237</span>&#160;<span class="comment"> /// data</span></div>
<div class="line"><a name="l00238"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#a8220da958d022c322b80b0539c99f8d4"> 238</a></span>&#160;<span class="comment"></span> __host__ __device__ <span class="keywordtype">size_t</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a8220da958d022c322b80b0539c99f8d4">getSizeInBytes</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00239"></a><span class="lineno"> 239</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a0ba9ab7c1676b7a41a6e6b2e5a490d2f">numElements</a>() * <span class="keyword">sizeof</span>(T);</div>
<div class="line"><a name="l00240"></a><span class="lineno"> 240</span>&#160; }</div>
<div class="line"><a name="l00241"></a><span class="lineno"> 241</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00242"></a><span class="lineno"> 242</span>&#160;<span class="comment"> /// Returns the size array.</span></div>
<div class="line"><a name="l00243"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#abc0ecc4f882ee09632b5a06be0619adb"> 243</a></span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span> <span class="keyword">const</span> IndexT* <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#abc0ecc4f882ee09632b5a06be0619adb">sizes</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00244"></a><span class="lineno"> 244</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#ad96fbf0f5e7c06a1031b8b18f7fc01d7">size_</a>;</div>
<div class="line"><a name="l00245"></a><span class="lineno"> 245</span>&#160; }</div>
<div class="line"><a name="l00246"></a><span class="lineno"> 246</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00247"></a><span class="lineno"> 247</span>&#160;<span class="comment"> /// Returns the stride array.</span></div>
<div class="line"><a name="l00248"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#a87a777247486756e99060547a3cc833a"> 248</a></span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span> <span class="keyword">const</span> IndexT* <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a87a777247486756e99060547a3cc833a">strides</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00249"></a><span class="lineno"> 249</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#af4b8fe4b632cdca51ee7972ed93fc3fa">stride_</a>;</div>
<div class="line"><a name="l00250"></a><span class="lineno"> 250</span>&#160; }</div>
<div class="line"><a name="l00251"></a><span class="lineno"> 251</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00252"></a><span class="lineno"> 252</span>&#160;<span class="comment"> /// Returns true if there is no padding within the tensor and no</span></div>
<div class="line"><a name="l00253"></a><span class="lineno"> 253</span>&#160;<span class="comment"> /// re-ordering of the dimensions.</span></div>
<div class="line"><a name="l00254"></a><span class="lineno"> 254</span>&#160;<span class="comment"> /// ~~~</span></div>
<div class="line"><a name="l00255"></a><span class="lineno"> 255</span>&#160;<span class="comment"> /// (stride(i) == size(i + 1) * stride(i + 1)) &amp;&amp; stride(dim - 1) == 0</span></div>
<div class="line"><a name="l00256"></a><span class="lineno"> 256</span>&#160;<span class="comment"> /// ~~~</span></div>
<div class="line"><a name="l00257"></a><span class="lineno"> 257</span>&#160;<span class="comment"></span> __host__ __device__ <span class="keywordtype">bool</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a09019c54911db891c9321fd3b34509c2">isContiguous</a>() <span class="keyword">const</span>;</div>
<div class="line"><a name="l00258"></a><span class="lineno"> 258</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00259"></a><span class="lineno"> 259</span>&#160;<span class="comment"> /// Returns whether a given dimension has only increasing stride</span></div>
<div class="line"><a name="l00260"></a><span class="lineno"> 260</span>&#160;<span class="comment"> /// from the previous dimension. A tensor that was permuted by</span></div>
<div class="line"><a name="l00261"></a><span class="lineno"> 261</span>&#160;<span class="comment"> /// exchanging size and stride only will fail this check.</span></div>
<div class="line"><a name="l00262"></a><span class="lineno"> 262</span>&#160;<span class="comment"> /// If `i == 0` just check `size &gt; 0`. Returns `false` if `stride` is `&lt;= 0`.</span></div>
<div class="line"><a name="l00263"></a><span class="lineno"> 263</span>&#160;<span class="comment"></span> __host__ __device__ <span class="keywordtype">bool</span> isConsistentlySized(<span class="keywordtype">int</span> i) <span class="keyword">const</span>;</div>
<div class="line"><a name="l00264"></a><span class="lineno"> 264</span>&#160;</div>
<div class="line"><a name="l00265"></a><span class="lineno"> 265</span>&#160; <span class="comment">// Returns whether at each dimension `stride &lt;= size`.</span></div>
<div class="line"><a name="l00266"></a><span class="lineno"> 266</span>&#160; <span class="comment">// If this is not the case then iterating once over the size space will</span></div>
<div class="line"><a name="l00267"></a><span class="lineno"> 267</span>&#160; <span class="comment">// touch the same memory locations multiple times.</span></div>
<div class="line"><a name="l00268"></a><span class="lineno"> 268</span>&#160; __host__ __device__ <span class="keywordtype">bool</span> isConsistentlySized() <span class="keyword">const</span>;</div>
<div class="line"><a name="l00269"></a><span class="lineno"> 269</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00270"></a><span class="lineno"> 270</span>&#160;<span class="comment"> /// Returns true if the given dimension index has no padding</span></div>
<div class="line"><a name="l00271"></a><span class="lineno"> 271</span>&#160;<span class="comment"></span> __host__ __device__ <span class="keywordtype">bool</span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a3f4e3c6afdf4a03308756b6ae6462c38">isContiguousDim</a>(<span class="keywordtype">int</span> i) <span class="keyword">const</span>;</div>
<div class="line"><a name="l00272"></a><span class="lineno"> 272</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00273"></a><span class="lineno"> 273</span>&#160;<span class="comment"> /// Returns a tensor of the same dimension after transposing the two</span></div>
<div class="line"><a name="l00274"></a><span class="lineno"> 274</span>&#160;<span class="comment"> /// dimensions given. Does not actually move elements; transposition</span></div>
<div class="line"><a name="l00275"></a><span class="lineno"> 275</span>&#160;<span class="comment"> /// is made by permuting the size/stride arrays.</span></div>
<div class="line"><a name="l00276"></a><span class="lineno"> 276</span>&#160;<span class="comment"> /// If the dimensions are not valid, asserts.</span></div>
<div class="line"><a name="l00277"></a><span class="lineno"> 277</span>&#160;<span class="comment"></span> __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;</a></div>
<div class="line"><a name="l00278"></a><span class="lineno"> 278</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a82a3484a6458e3e95bb91d320f2c6731">transpose</a>(<span class="keywordtype">int</span> dim1, <span class="keywordtype">int</span> dim2) <span class="keyword">const</span>;</div>
<div class="line"><a name="l00279"></a><span class="lineno"> 279</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00280"></a><span class="lineno"> 280</span>&#160;<span class="comment"> /// Upcast a tensor of dimension `D` to some tensor of dimension</span></div>
<div class="line"><a name="l00281"></a><span class="lineno"> 281</span>&#160;<span class="comment"> /// D&#39; &gt; D by padding the leading dimensions by 1</span></div>
<div class="line"><a name="l00282"></a><span class="lineno"> 282</span>&#160;<span class="comment"> /// e.g., upcasting a 2-d tensor `[2][3]` to a 4-d tensor `[1][1][2][3]`</span></div>
<div class="line"><a name="l00283"></a><span class="lineno"> 283</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keywordtype">int</span> NewDim&gt;</div>
<div class="line"><a name="l00284"></a><span class="lineno"> 284</span>&#160; __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor&lt;T, NewDim, InnerContig, IndexT, PtrTraits&gt;</a></div>
<div class="line"><a name="l00285"></a><span class="lineno"> 285</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a309eb97e9c6dbfdecf383343c072d38c">upcastOuter</a>();</div>
<div class="line"><a name="l00286"></a><span class="lineno"> 286</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00287"></a><span class="lineno"> 287</span>&#160;<span class="comment"> /// Upcast a tensor of dimension `D` to some tensor of dimension</span></div>
<div class="line"><a name="l00288"></a><span class="lineno"> 288</span>&#160;<span class="comment"> /// D&#39; &gt; D by padding the lowest/most varying dimensions by 1</span></div>
<div class="line"><a name="l00289"></a><span class="lineno"> 289</span>&#160;<span class="comment"> /// e.g., upcasting a 2-d tensor `[2][3]` to a 4-d tensor `[2][3][1][1]`</span></div>
<div class="line"><a name="l00290"></a><span class="lineno"> 290</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keywordtype">int</span> NewDim&gt;</div>
<div class="line"><a name="l00291"></a><span class="lineno"> 291</span>&#160; __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor&lt;T, NewDim, InnerContig, IndexT, PtrTraits&gt;</a></div>
<div class="line"><a name="l00292"></a><span class="lineno"> 292</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#aee5cf46d16344e2a055cf63adb07d24a">upcastInner</a>();</div>
<div class="line"><a name="l00293"></a><span class="lineno"> 293</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00294"></a><span class="lineno"> 294</span>&#160;<span class="comment"> /// Downcast a tensor of dimension `D` to some tensor of dimension</span></div>
<div class="line"><a name="l00295"></a><span class="lineno"> 295</span>&#160;<span class="comment"> /// D&#39; &lt; D by collapsing the leading dimensions. asserts if there is</span></div>
<div class="line"><a name="l00296"></a><span class="lineno"> 296</span>&#160;<span class="comment"> /// padding on the leading dimensions.</span></div>
<div class="line"><a name="l00297"></a><span class="lineno"> 297</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keywordtype">int</span> NewDim&gt;</div>
<div class="line"><a name="l00298"></a><span class="lineno"> 298</span>&#160; __host__ __device__</div>
<div class="line"><a name="l00299"></a><span class="lineno"> 299</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor&lt;T, NewDim, InnerContig, IndexT, PtrTraits&gt;</a> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a2185b0c1c2c06cc3a4dab6a88eb6d001">downcastOuter</a>();</div>
<div class="line"><a name="l00300"></a><span class="lineno"> 300</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00301"></a><span class="lineno"> 301</span>&#160;<span class="comment"> /// Downcast a tensor of dimension `D` to some tensor of dimension</span></div>
<div class="line"><a name="l00302"></a><span class="lineno"> 302</span>&#160;<span class="comment"> /// D&#39; &lt; D by collapsing the leading dimensions. asserts if there is</span></div>
<div class="line"><a name="l00303"></a><span class="lineno"> 303</span>&#160;<span class="comment"> /// padding on the leading dimensions.</span></div>
<div class="line"><a name="l00304"></a><span class="lineno"> 304</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keywordtype">int</span> NewDim&gt;</div>
<div class="line"><a name="l00305"></a><span class="lineno"> 305</span>&#160; __host__ __device__</div>
<div class="line"><a name="l00306"></a><span class="lineno"> 306</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor&lt;T, NewDim, InnerContig, IndexT, PtrTraits&gt;</a> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a6a43125c6f429f28161d59f19eb8e5c5">downcastInner</a>();</div>
<div class="line"><a name="l00307"></a><span class="lineno"> 307</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00308"></a><span class="lineno"> 308</span>&#160;<span class="comment"> /// Returns a tensor that is a view of the `SubDim`-dimensional slice</span></div>
<div class="line"><a name="l00309"></a><span class="lineno"> 309</span>&#160;<span class="comment"> /// of this tensor, starting at `at`.</span></div>
<div class="line"><a name="l00310"></a><span class="lineno"> 310</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keywordtype">int</span> SubDim&gt;</div>
<div class="line"><a name="l00311"></a><span class="lineno"> 311</span>&#160; __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor&lt;T, SubDim, InnerContig, IndexT, PtrTraits&gt;</a></div>
<div class="line"><a name="l00312"></a><span class="lineno"> 312</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a35a63cfa4034a8ee14a999132d8a1828">view</a>(DataPtrType at);</div>
<div class="line"><a name="l00313"></a><span class="lineno"> 313</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00314"></a><span class="lineno"> 314</span>&#160;<span class="comment"> /// Returns a tensor that is a view of the `SubDim`-dimensional slice</span></div>
<div class="line"><a name="l00315"></a><span class="lineno"> 315</span>&#160;<span class="comment"> /// of this tensor, starting where our data begins</span></div>
<div class="line"><a name="l00316"></a><span class="lineno"> 316</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keywordtype">int</span> SubDim&gt;</div>
<div class="line"><a name="l00317"></a><span class="lineno"> 317</span>&#160; __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor&lt;T, SubDim, InnerContig, IndexT, PtrTraits&gt;</a></div>
<div class="line"><a name="l00318"></a><span class="lineno"> 318</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a35a63cfa4034a8ee14a999132d8a1828">view</a>();</div>
<div class="line"><a name="l00319"></a><span class="lineno"> 319</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00320"></a><span class="lineno"> 320</span>&#160;<span class="comment"> /// Returns a tensor of the same dimension that is a view of the</span></div>
<div class="line"><a name="l00321"></a><span class="lineno"> 321</span>&#160;<span class="comment"> /// original tensor with the specified dimension restricted to the</span></div>
<div class="line"><a name="l00322"></a><span class="lineno"> 322</span>&#160;<span class="comment"> /// elements in the range [start, start + size)</span></div>
<div class="line"><a name="l00323"></a><span class="lineno"> 323</span>&#160;<span class="comment"></span> __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;</a></div>
<div class="line"><a name="l00324"></a><span class="lineno"> 324</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#ac2d0fc7199901a8e0788b58f0970b133">narrowOutermost</a>(IndexT start, IndexT size);</div>
<div class="line"><a name="l00325"></a><span class="lineno"> 325</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00326"></a><span class="lineno"> 326</span>&#160;<span class="comment"> /// Returns a tensor of the same dimension that is a view of the</span></div>
<div class="line"><a name="l00327"></a><span class="lineno"> 327</span>&#160;<span class="comment"> /// original tensor with the specified dimension restricted to the</span></div>
<div class="line"><a name="l00328"></a><span class="lineno"> 328</span>&#160;<span class="comment"> /// elements in the range [start, start + size).</span></div>
<div class="line"><a name="l00329"></a><span class="lineno"> 329</span>&#160;<span class="comment"> /// Can occur in an arbitrary dimension</span></div>
<div class="line"><a name="l00330"></a><span class="lineno"> 330</span>&#160;<span class="comment"></span> __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;</a></div>
<div class="line"><a name="l00331"></a><span class="lineno"> 331</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#ab6db6bf86dd0f7e877af3a6ae2100fe3">narrow</a>(<span class="keywordtype">int</span> dim, IndexT start, IndexT size);</div>
<div class="line"><a name="l00332"></a><span class="lineno"> 332</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00333"></a><span class="lineno"> 333</span>&#160;<span class="comment"> /// Returns a view of the given tensor expressed as a tensor of a</span></div>
<div class="line"><a name="l00334"></a><span class="lineno"> 334</span>&#160;<span class="comment"> /// different number of dimensions.</span></div>
<div class="line"><a name="l00335"></a><span class="lineno"> 335</span>&#160;<span class="comment"> /// Only works if we are contiguous.</span></div>
<div class="line"><a name="l00336"></a><span class="lineno"> 336</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keywordtype">int</span> NewDim&gt;</div>
<div class="line"><a name="l00337"></a><span class="lineno"> 337</span>&#160; __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor&lt;T, NewDim, InnerContig, IndexT, PtrTraits&gt;</a></div>
<div class="line"><a name="l00338"></a><span class="lineno"> 338</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a35a63cfa4034a8ee14a999132d8a1828">view</a>(std::initializer_list&lt;IndexT&gt; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#abc0ecc4f882ee09632b5a06be0619adb">sizes</a>);</div>
<div class="line"><a name="l00339"></a><span class="lineno"> 339</span>&#160;</div>
<div class="line"><a name="l00340"></a><span class="lineno"> 340</span>&#160; <span class="keyword">protected</span>:<span class="comment"></span></div>
<div class="line"><a name="l00341"></a><span class="lineno"> 341</span>&#160;<span class="comment"> /// Raw pointer to where the tensor data begins</span></div>
<div class="line"><a name="l00342"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#a2ec506a25e46cf7001060a6ba5ae3b94"> 342</a></span>&#160;<span class="comment"></span> DataPtrType <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a2ec506a25e46cf7001060a6ba5ae3b94">data_</a>;</div>
<div class="line"><a name="l00343"></a><span class="lineno"> 343</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00344"></a><span class="lineno"> 344</span>&#160;<span class="comment"> /// Array of strides (in sizeof(T) terms) per each dimension</span></div>
<div class="line"><a name="l00345"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#af4b8fe4b632cdca51ee7972ed93fc3fa"> 345</a></span>&#160;<span class="comment"></span> IndexT <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#af4b8fe4b632cdca51ee7972ed93fc3fa">stride_</a>[Dim];</div>
<div class="line"><a name="l00346"></a><span class="lineno"> 346</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00347"></a><span class="lineno"> 347</span>&#160;<span class="comment"> /// Size per each dimension</span></div>
<div class="line"><a name="l00348"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#ad96fbf0f5e7c06a1031b8b18f7fc01d7"> 348</a></span>&#160;<span class="comment"></span> IndexT <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#ad96fbf0f5e7c06a1031b8b18f7fc01d7">size_</a>[Dim];</div>
<div class="line"><a name="l00349"></a><span class="lineno"> 349</span>&#160;};</div>
<div class="line"><a name="l00350"></a><span class="lineno"> 350</span>&#160;</div>
<div class="line"><a name="l00351"></a><span class="lineno"> 351</span>&#160;<span class="comment">// Utilities for checking a collection of tensors</span></div>
<div class="line"><a name="l00352"></a><span class="lineno"> 352</span>&#160;<span class="keyword">namespace </span>detail {</div>
<div class="line"><a name="l00353"></a><span class="lineno"> 353</span>&#160;</div>
<div class="line"><a name="l00354"></a><span class="lineno"> 354</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> IndexType&gt;</div>
<div class="line"><a name="l00355"></a><span class="lineno"> 355</span>&#160;<span class="keywordtype">bool</span> canUseIndexType() {</div>
<div class="line"><a name="l00356"></a><span class="lineno"> 356</span>&#160; <span class="keywordflow">return</span> <span class="keyword">true</span>;</div>
<div class="line"><a name="l00357"></a><span class="lineno"> 357</span>&#160;}</div>
<div class="line"><a name="l00358"></a><span class="lineno"> 358</span>&#160;</div>
<div class="line"><a name="l00359"></a><span class="lineno"> 359</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> IndexType, <span class="keyword">typename</span> T, <span class="keyword">typename</span>... U&gt;</div>
<div class="line"><a name="l00360"></a><span class="lineno"> 360</span>&#160;<span class="keywordtype">bool</span> canUseIndexType(<span class="keyword">const</span> T&amp; arg, <span class="keyword">const</span> U&amp;... args) {</div>
<div class="line"><a name="l00361"></a><span class="lineno"> 361</span>&#160; <span class="keywordflow">return</span> arg.template canUseIndexType&lt;IndexType&gt;() &amp;&amp;</div>
<div class="line"><a name="l00362"></a><span class="lineno"> 362</span>&#160; canUseIndexType(args...);</div>
<div class="line"><a name="l00363"></a><span class="lineno"> 363</span>&#160;}</div>
<div class="line"><a name="l00364"></a><span class="lineno"> 364</span>&#160;</div>
<div class="line"><a name="l00365"></a><span class="lineno"> 365</span>&#160;} <span class="comment">// namespace detail</span></div>
<div class="line"><a name="l00366"></a><span class="lineno"> 366</span>&#160;</div>
<div class="line"><a name="l00367"></a><span class="lineno"> 367</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> IndexType, <span class="keyword">typename</span>... T&gt;</div>
<div class="line"><a name="l00368"></a><span class="lineno"> 368</span>&#160;<span class="keywordtype">bool</span> canUseIndexType(<span class="keyword">const</span> T&amp;... args) {</div>
<div class="line"><a name="l00369"></a><span class="lineno"> 369</span>&#160; <span class="keywordflow">return</span> detail::canUseIndexType(args...);</div>
<div class="line"><a name="l00370"></a><span class="lineno"> 370</span>&#160;}</div>
<div class="line"><a name="l00371"></a><span class="lineno"> 371</span>&#160;</div>
<div class="line"><a name="l00372"></a><span class="lineno"> 372</span>&#160;<span class="keyword">namespace </span>detail {</div>
<div class="line"><a name="l00373"></a><span class="lineno"> 373</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00374"></a><span class="lineno"> 374</span>&#160;<span class="comment">/// Specialization for a view of a single value (0-dimensional)</span></div>
<div class="line"><a name="l00375"></a><span class="lineno"> 375</span>&#160;<span class="comment"></span><span class="keyword">template</span> &lt;<span class="keyword">typename</span> TensorType, <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt; <span class="keyword">class </span>PtrTraits&gt;</div>
<div class="line"><a name="l00376"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html"> 376</a></span>&#160;<span class="keyword">class </span><a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">SubTensor</a>&lt;TensorType, 0, PtrTraits&gt; {</div>
<div class="line"><a name="l00377"></a><span class="lineno"> 377</span>&#160; <span class="keyword">public</span>:</div>
<div class="line"><a name="l00378"></a><span class="lineno"> 378</span>&#160; __host__ __device__ <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html">SubTensor&lt;TensorType, 0, PtrTraits&gt;</a></div>
<div class="line"><a name="l00379"></a><span class="lineno"> 379</span>&#160; operator=(<span class="keyword">typename</span> TensorType::DataType val) {</div>
<div class="line"><a name="l00380"></a><span class="lineno"> 380</span>&#160; *<a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a> = val;</div>
<div class="line"><a name="l00381"></a><span class="lineno"> 381</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00382"></a><span class="lineno"> 382</span>&#160; }</div>
<div class="line"><a name="l00383"></a><span class="lineno"> 383</span>&#160;</div>
<div class="line"><a name="l00384"></a><span class="lineno"> 384</span>&#160; <span class="comment">// operator T&amp;</span></div>
<div class="line"><a name="l00385"></a><span class="lineno"> 385</span>&#160; __host__ __device__ <span class="keyword">operator</span> <span class="keyword">typename</span> TensorType::DataType&amp;() {</div>
<div class="line"><a name="l00386"></a><span class="lineno"> 386</span>&#160; <span class="keywordflow">return</span> *<a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>;</div>
<div class="line"><a name="l00387"></a><span class="lineno"> 387</span>&#160; }</div>
<div class="line"><a name="l00388"></a><span class="lineno"> 388</span>&#160;</div>
<div class="line"><a name="l00389"></a><span class="lineno"> 389</span>&#160; <span class="comment">// const operator T&amp; returning const T&amp;</span></div>
<div class="line"><a name="l00390"></a><span class="lineno"> 390</span>&#160; __host__ __device__ <span class="keyword">operator</span> <span class="keyword">const</span> <span class="keyword">typename</span> TensorType::DataType&amp;() <span class="keyword">const</span> {</div>
<div class="line"><a name="l00391"></a><span class="lineno"> 391</span>&#160; <span class="keywordflow">return</span> *<a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>;</div>
<div class="line"><a name="l00392"></a><span class="lineno"> 392</span>&#160; }</div>
<div class="line"><a name="l00393"></a><span class="lineno"> 393</span>&#160;</div>
<div class="line"><a name="l00394"></a><span class="lineno"> 394</span>&#160; <span class="comment">// operator&amp; returning T*</span></div>
<div class="line"><a name="l00395"></a><span class="lineno"> 395</span>&#160; __host__ __device__ <span class="keyword">typename</span> TensorType::DataType* operator&amp;() {</div>
<div class="line"><a name="l00396"></a><span class="lineno"> 396</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>;</div>
<div class="line"><a name="l00397"></a><span class="lineno"> 397</span>&#160; }</div>
<div class="line"><a name="l00398"></a><span class="lineno"> 398</span>&#160;</div>
<div class="line"><a name="l00399"></a><span class="lineno"> 399</span>&#160; <span class="comment">// const operator&amp; returning const T*</span></div>
<div class="line"><a name="l00400"></a><span class="lineno"> 400</span>&#160; __host__ __device__ <span class="keyword">const</span> <span class="keyword">typename</span> TensorType::DataType* operator&amp;()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00401"></a><span class="lineno"> 401</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>;</div>
<div class="line"><a name="l00402"></a><span class="lineno"> 402</span>&#160; }</div>
<div class="line"><a name="l00403"></a><span class="lineno"> 403</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00404"></a><span class="lineno"> 404</span>&#160;<span class="comment"> /// Returns a raw accessor to our slice.</span></div>
<div class="line"><a name="l00405"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#aae8c90b402493f5656f94701157a7417"> 405</a></span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span> <span class="keyword">typename</span> TensorType::DataPtrType <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#aae8c90b402493f5656f94701157a7417">data</a>() {</div>
<div class="line"><a name="l00406"></a><span class="lineno"> 406</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>;</div>
<div class="line"><a name="l00407"></a><span class="lineno"> 407</span>&#160; }</div>
<div class="line"><a name="l00408"></a><span class="lineno"> 408</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00409"></a><span class="lineno"> 409</span>&#160;<span class="comment"> /// Returns a raw accessor to our slice (const).</span></div>
<div class="line"><a name="l00410"></a><span class="lineno"> 410</span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00411"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a750047ff919799af43b4861b580c82e3"> 411</a></span>&#160; <span class="keyword">const</span> <span class="keyword">typename</span> TensorType::DataPtrType <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a750047ff919799af43b4861b580c82e3">data</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00412"></a><span class="lineno"> 412</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>;</div>
<div class="line"><a name="l00413"></a><span class="lineno"> 413</span>&#160; }</div>
<div class="line"><a name="l00414"></a><span class="lineno"> 414</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00415"></a><span class="lineno"> 415</span>&#160;<span class="comment"> /// Cast to a different datatype.</span></div>
<div class="line"><a name="l00416"></a><span class="lineno"> 416</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a name="l00417"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a67bfa92466e03834b7f007cb9cdf8d50"> 417</a></span>&#160; __host__ __device__ T&amp; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a67bfa92466e03834b7f007cb9cdf8d50">as</a>() {</div>
<div class="line"><a name="l00418"></a><span class="lineno"> 418</span>&#160; <span class="keywordflow">return</span> *dataAs&lt;T&gt;();</div>
<div class="line"><a name="l00419"></a><span class="lineno"> 419</span>&#160; }</div>
<div class="line"><a name="l00420"></a><span class="lineno"> 420</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00421"></a><span class="lineno"> 421</span>&#160;<span class="comment"> /// Cast to a different datatype (const).</span></div>
<div class="line"><a name="l00422"></a><span class="lineno"> 422</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a name="l00423"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#ad1d375e64756991dadeb5a1e63ed2cfd"> 423</a></span>&#160; __host__ __device__ <span class="keyword">const</span> T&amp; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#ad1d375e64756991dadeb5a1e63ed2cfd">as</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00424"></a><span class="lineno"> 424</span>&#160; <span class="keywordflow">return</span> *dataAs&lt;T&gt;();</div>
<div class="line"><a name="l00425"></a><span class="lineno"> 425</span>&#160; }</div>
<div class="line"><a name="l00426"></a><span class="lineno"> 426</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00427"></a><span class="lineno"> 427</span>&#160;<span class="comment"> /// Cast to a different datatype</span></div>
<div class="line"><a name="l00428"></a><span class="lineno"> 428</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a name="l00429"></a><span class="lineno"> 429</span>&#160; __host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00430"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a23e80555a443797d60ae16d605dacd23"> 430</a></span>&#160; <span class="keyword">typename</span> PtrTraits&lt;T&gt;::PtrType <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a23e80555a443797d60ae16d605dacd23">dataAs</a>() {</div>
<div class="line"><a name="l00431"></a><span class="lineno"> 431</span>&#160; <span class="keywordflow">return</span> <span class="keyword">reinterpret_cast&lt;</span>typename PtrTraits&lt;T&gt;::PtrType<span class="keyword">&gt;</span>(<a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>);</div>
<div class="line"><a name="l00432"></a><span class="lineno"> 432</span>&#160; }</div>
<div class="line"><a name="l00433"></a><span class="lineno"> 433</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00434"></a><span class="lineno"> 434</span>&#160;<span class="comment"> /// Cast to a different datatype (const)</span></div>
<div class="line"><a name="l00435"></a><span class="lineno"> 435</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a name="l00436"></a><span class="lineno"> 436</span>&#160; __host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00437"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a0ec3a48f265de627490e7cdf540e9fc5"> 437</a></span>&#160; <span class="keyword">typename</span> PtrTraits&lt;const T&gt;::PtrType <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a0ec3a48f265de627490e7cdf540e9fc5">dataAs</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00438"></a><span class="lineno"> 438</span>&#160; <span class="keywordflow">return</span> <span class="keyword">reinterpret_cast&lt;</span>typename PtrTraits&lt;const T&gt;::PtrType<span class="keyword">&gt;</span>(<a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>);</div>
<div class="line"><a name="l00439"></a><span class="lineno"> 439</span>&#160; }</div>
<div class="line"><a name="l00440"></a><span class="lineno"> 440</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00441"></a><span class="lineno"> 441</span>&#160;<span class="comment"> /// Use the texture cache for reads</span></div>
<div class="line"><a name="l00442"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#ac44400045b113c527d6ed59a910f885c"> 442</a></span>&#160;<span class="comment"></span> __device__ <span class="keyword">inline</span> <span class="keyword">typename</span> TensorType::DataType <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#ac44400045b113c527d6ed59a910f885c">ldg</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00443"></a><span class="lineno"> 443</span>&#160;<span class="preprocessor">#if __CUDA_ARCH__ &gt;= 350</span></div>
<div class="line"><a name="l00444"></a><span class="lineno"> 444</span>&#160;<span class="preprocessor"></span> <span class="keywordflow">return</span> __ldg(<a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>);</div>
<div class="line"><a name="l00445"></a><span class="lineno"> 445</span>&#160;<span class="preprocessor">#else</span></div>
<div class="line"><a name="l00446"></a><span class="lineno"> 446</span>&#160;<span class="preprocessor"></span> <span class="keywordflow">return</span> *<a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>;</div>
<div class="line"><a name="l00447"></a><span class="lineno"> 447</span>&#160;<span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00448"></a><span class="lineno"> 448</span>&#160;<span class="preprocessor"></span> }</div>
<div class="line"><a name="l00449"></a><span class="lineno"> 449</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00450"></a><span class="lineno"> 450</span>&#160;<span class="comment"> /// Use the texture cache for reads; cast as a particular type</span></div>
<div class="line"><a name="l00451"></a><span class="lineno"> 451</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a name="l00452"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#aa56767066e40a4758e37b26e43449f1d"> 452</a></span>&#160; __device__ <span class="keyword">inline</span> T <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#aa56767066e40a4758e37b26e43449f1d">ldgAs</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00453"></a><span class="lineno"> 453</span>&#160;<span class="preprocessor">#if __CUDA_ARCH__ &gt;= 350</span></div>
<div class="line"><a name="l00454"></a><span class="lineno"> 454</span>&#160;<span class="preprocessor"></span> <span class="keywordflow">return</span> __ldg(dataAs&lt;T&gt;());</div>
<div class="line"><a name="l00455"></a><span class="lineno"> 455</span>&#160;<span class="preprocessor">#else</span></div>
<div class="line"><a name="l00456"></a><span class="lineno"> 456</span>&#160;<span class="preprocessor"></span> <span class="keywordflow">return</span> as&lt;T&gt;();</div>
<div class="line"><a name="l00457"></a><span class="lineno"> 457</span>&#160;<span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00458"></a><span class="lineno"> 458</span>&#160;<span class="preprocessor"></span> }</div>
<div class="line"><a name="l00459"></a><span class="lineno"> 459</span>&#160;</div>
<div class="line"><a name="l00460"></a><span class="lineno"> 460</span>&#160; <span class="keyword">protected</span>:<span class="comment"></span></div>
<div class="line"><a name="l00461"></a><span class="lineno"> 461</span>&#160;<span class="comment"> /// One dimension greater can create us</span></div>
<div class="line"><a name="l00462"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a5aa8367d35e6c281c29855d7cf24bd6d"> 462</a></span>&#160;<span class="comment"></span> <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">SubTensor</a>&lt;TensorType, 1, PtrTraits&gt;;</div>
<div class="line"><a name="l00463"></a><span class="lineno"> 463</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00464"></a><span class="lineno"> 464</span>&#160;<span class="comment"> /// Our parent tensor can create us</span></div>
<div class="line"><a name="l00465"></a><span class="lineno"> 465</span>&#160;<span class="comment"></span> <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor</a>&lt;typename TensorType::DataType,</div>
<div class="line"><a name="l00466"></a><span class="lineno"> 466</span>&#160; 1,</div>
<div class="line"><a name="l00467"></a><span class="lineno"> 467</span>&#160; TensorType::IsInnerContig,</div>
<div class="line"><a name="l00468"></a><span class="lineno"> 468</span>&#160; typename TensorType::IndexType,</div>
<div class="line"><a name="l00469"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#af1086b6201fb3ed3bfbbf8a38a3d2913"> 469</a></span>&#160; PtrTraits&gt;;</div>
<div class="line"><a name="l00470"></a><span class="lineno"> 470</span>&#160;</div>
<div class="line"><a name="l00471"></a><span class="lineno"> 471</span>&#160; __host__ __device__ inline <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">SubTensor</a>(</div>
<div class="line"><a name="l00472"></a><span class="lineno"> 472</span>&#160; TensorType&amp; t,</div>
<div class="line"><a name="l00473"></a><span class="lineno"> 473</span>&#160; typename TensorType::DataPtrType data)</div>
<div class="line"><a name="l00474"></a><span class="lineno"> 474</span>&#160; : <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a2d4e62fb08c180dfe2bde8d47361d61f">tensor_</a>(t),</div>
<div class="line"><a name="l00475"></a><span class="lineno"> 475</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>(data) {</div>
<div class="line"><a name="l00476"></a><span class="lineno"> 476</span>&#160; }</div>
<div class="line"><a name="l00477"></a><span class="lineno"> 477</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00478"></a><span class="lineno"> 478</span>&#160;<span class="comment"> /// The tensor we&#39;re referencing</span></div>
<div class="line"><a name="l00479"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a6e0f585a739cd1474ec24f56609d6501"> 479</a></span>&#160;<span class="comment"></span> TensorType&amp; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a6e0f585a739cd1474ec24f56609d6501">tensor_</a>;</div>
<div class="line"><a name="l00480"></a><span class="lineno"> 480</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00481"></a><span class="lineno"> 481</span>&#160;<span class="comment"> /// Where our value is located</span></div>
<div class="line"><a name="l00482"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a6a548d4edb57d072be52cd827f055d6d"> 482</a></span>&#160;<span class="comment"></span> <span class="keyword">typename</span> TensorType::DataPtrType <span class="keyword">const</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a6a548d4edb57d072be52cd827f055d6d">data_</a>;</div>
<div class="line"><a name="l00483"></a><span class="lineno"> 483</span>&#160;};</div>
<div class="line"><a name="l00484"></a><span class="lineno"> 484</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00485"></a><span class="lineno"> 485</span>&#160;<span class="comment">/// A `SubDim`-rank slice of a parent Tensor</span></div>
<div class="line"><a name="l00486"></a><span class="lineno"> 486</span>&#160;<span class="comment"></span><span class="keyword">template</span> &lt;<span class="keyword">typename</span> TensorType,</div>
<div class="line"><a name="l00487"></a><span class="lineno"> 487</span>&#160; <span class="keywordtype">int</span> SubDim,</div>
<div class="line"><a name="l00488"></a><span class="lineno"> 488</span>&#160; <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt; <span class="keyword">class </span>PtrTraits&gt;</div>
<div class="line"><a name="l00489"></a><span class="lineno"> 489</span>&#160;<span class="keyword">class </span><a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">SubTensor</a> {</div>
<div class="line"><a name="l00490"></a><span class="lineno"> 490</span>&#160; <span class="keyword">public</span>:<span class="comment"></span></div>
<div class="line"><a name="l00491"></a><span class="lineno"> 491</span>&#160;<span class="comment"> /// Returns a view of the data located at our offset (the dimension</span></div>
<div class="line"><a name="l00492"></a><span class="lineno"> 492</span>&#160;<span class="comment"> /// `SubDim` - 1 tensor).</span></div>
<div class="line"><a name="l00493"></a><span class="lineno"> 493</span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00494"></a><span class="lineno"> 494</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">SubTensor</a>&lt;TensorType, SubDim - 1, PtrTraits&gt;</div>
<div class="line"><a name="l00495"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#ac722ca465d06da122898a07ce38276e2"> 495</a></span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#ac722ca465d06da122898a07ce38276e2">operator[]</a>(<span class="keyword">typename</span> TensorType::IndexType index) {</div>
<div class="line"><a name="l00496"></a><span class="lineno"> 496</span>&#160; <span class="keywordflow">if</span> (TensorType::IsInnerContig &amp;&amp; SubDim == 1) {</div>
<div class="line"><a name="l00497"></a><span class="lineno"> 497</span>&#160; <span class="comment">// Innermost dimension is stride 1 for contiguous arrays</span></div>
<div class="line"><a name="l00498"></a><span class="lineno"> 498</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">SubTensor</a>&lt;TensorType, SubDim - 1, PtrTraits&gt;(</div>
<div class="line"><a name="l00499"></a><span class="lineno"> 499</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a2d4e62fb08c180dfe2bde8d47361d61f">tensor_</a>, <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a> + index);</div>
<div class="line"><a name="l00500"></a><span class="lineno"> 500</span>&#160; } <span class="keywordflow">else</span> {</div>
<div class="line"><a name="l00501"></a><span class="lineno"> 501</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">SubTensor</a>&lt;TensorType, SubDim - 1, PtrTraits&gt;(</div>
<div class="line"><a name="l00502"></a><span class="lineno"> 502</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a2d4e62fb08c180dfe2bde8d47361d61f">tensor_</a>,</div>
<div class="line"><a name="l00503"></a><span class="lineno"> 503</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a> + index * tensor_.getStride(TensorType::NumDim - SubDim));</div>
<div class="line"><a name="l00504"></a><span class="lineno"> 504</span>&#160; }</div>
<div class="line"><a name="l00505"></a><span class="lineno"> 505</span>&#160; }</div>
<div class="line"><a name="l00506"></a><span class="lineno"> 506</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00507"></a><span class="lineno"> 507</span>&#160;<span class="comment"> /// Returns a view of the data located at our offset (the dimension</span></div>
<div class="line"><a name="l00508"></a><span class="lineno"> 508</span>&#160;<span class="comment"> /// `SubDim` - 1 tensor) (const).</span></div>
<div class="line"><a name="l00509"></a><span class="lineno"> 509</span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00510"></a><span class="lineno"> 510</span>&#160; <span class="keyword">const</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">SubTensor</a>&lt;TensorType, SubDim - 1, PtrTraits&gt;</div>
<div class="line"><a name="l00511"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a4c3006fcd82c301b11505620e3e96378"> 511</a></span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a4c3006fcd82c301b11505620e3e96378">operator[]</a>(<span class="keyword">typename</span> TensorType::IndexType index)<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00512"></a><span class="lineno"> 512</span>&#160; <span class="keywordflow">if</span> (TensorType::IsInnerContig &amp;&amp; SubDim == 1) {</div>
<div class="line"><a name="l00513"></a><span class="lineno"> 513</span>&#160; <span class="comment">// Innermost dimension is stride 1 for contiguous arrays</span></div>
<div class="line"><a name="l00514"></a><span class="lineno"> 514</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">SubTensor</a>&lt;TensorType, SubDim - 1, PtrTraits&gt;(</div>
<div class="line"><a name="l00515"></a><span class="lineno"> 515</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a2d4e62fb08c180dfe2bde8d47361d61f">tensor_</a>, <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a> + index);</div>
<div class="line"><a name="l00516"></a><span class="lineno"> 516</span>&#160; } <span class="keywordflow">else</span> {</div>
<div class="line"><a name="l00517"></a><span class="lineno"> 517</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">SubTensor</a>&lt;TensorType, SubDim - 1, PtrTraits&gt;(</div>
<div class="line"><a name="l00518"></a><span class="lineno"> 518</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a2d4e62fb08c180dfe2bde8d47361d61f">tensor_</a>,</div>
<div class="line"><a name="l00519"></a><span class="lineno"> 519</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a> + index * tensor_.getStride(TensorType::NumDim - SubDim));</div>
<div class="line"><a name="l00520"></a><span class="lineno"> 520</span>&#160; }</div>
<div class="line"><a name="l00521"></a><span class="lineno"> 521</span>&#160; }</div>
<div class="line"><a name="l00522"></a><span class="lineno"> 522</span>&#160;</div>
<div class="line"><a name="l00523"></a><span class="lineno"> 523</span>&#160; <span class="comment">// operator&amp; returning T*</span></div>
<div class="line"><a name="l00524"></a><span class="lineno"> 524</span>&#160; __host__ __device__ <span class="keyword">typename</span> TensorType::DataType* operator&amp;() {</div>
<div class="line"><a name="l00525"></a><span class="lineno"> 525</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>;</div>
<div class="line"><a name="l00526"></a><span class="lineno"> 526</span>&#160; }</div>
<div class="line"><a name="l00527"></a><span class="lineno"> 527</span>&#160;</div>
<div class="line"><a name="l00528"></a><span class="lineno"> 528</span>&#160; <span class="comment">// const operator&amp; returning const T*</span></div>
<div class="line"><a name="l00529"></a><span class="lineno"> 529</span>&#160; __host__ __device__ <span class="keyword">const</span> <span class="keyword">typename</span> TensorType::DataType* operator&amp;()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00530"></a><span class="lineno"> 530</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>;</div>
<div class="line"><a name="l00531"></a><span class="lineno"> 531</span>&#160; }</div>
<div class="line"><a name="l00532"></a><span class="lineno"> 532</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00533"></a><span class="lineno"> 533</span>&#160;<span class="comment"> /// Returns a raw accessor to our slice.</span></div>
<div class="line"><a name="l00534"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a30dff4e7bea94cd894e17f6bdd7a7eb1"> 534</a></span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span> <span class="keyword">typename</span> TensorType::DataPtrType <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a30dff4e7bea94cd894e17f6bdd7a7eb1">data</a>() {</div>
<div class="line"><a name="l00535"></a><span class="lineno"> 535</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>;</div>
<div class="line"><a name="l00536"></a><span class="lineno"> 536</span>&#160; }</div>
<div class="line"><a name="l00537"></a><span class="lineno"> 537</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00538"></a><span class="lineno"> 538</span>&#160;<span class="comment"> /// Returns a raw accessor to our slice (const).</span></div>
<div class="line"><a name="l00539"></a><span class="lineno"> 539</span>&#160;<span class="comment"></span> __host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00540"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a6e7097578ba17c10895ec0dafa385901"> 540</a></span>&#160; <span class="keyword">const</span> <span class="keyword">typename</span> TensorType::DataPtrType <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a6e7097578ba17c10895ec0dafa385901">data</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00541"></a><span class="lineno"> 541</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>;</div>
<div class="line"><a name="l00542"></a><span class="lineno"> 542</span>&#160; }</div>
<div class="line"><a name="l00543"></a><span class="lineno"> 543</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00544"></a><span class="lineno"> 544</span>&#160;<span class="comment"> /// Cast to a different datatype.</span></div>
<div class="line"><a name="l00545"></a><span class="lineno"> 545</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a name="l00546"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a0d32586e8f6f22f5f90bca566d901d0b"> 546</a></span>&#160; __host__ __device__ T&amp; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a0d32586e8f6f22f5f90bca566d901d0b">as</a>() {</div>
<div class="line"><a name="l00547"></a><span class="lineno"> 547</span>&#160; <span class="keywordflow">return</span> *dataAs&lt;T&gt;();</div>
<div class="line"><a name="l00548"></a><span class="lineno"> 548</span>&#160; }</div>
<div class="line"><a name="l00549"></a><span class="lineno"> 549</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00550"></a><span class="lineno"> 550</span>&#160;<span class="comment"> /// Cast to a different datatype (const).</span></div>
<div class="line"><a name="l00551"></a><span class="lineno"> 551</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a name="l00552"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aefdafcf236e5c49ad3bce1646797f8f2"> 552</a></span>&#160; __host__ __device__ <span class="keyword">const</span> T&amp; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aefdafcf236e5c49ad3bce1646797f8f2">as</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00553"></a><span class="lineno"> 553</span>&#160; <span class="keywordflow">return</span> *dataAs&lt;T&gt;();</div>
<div class="line"><a name="l00554"></a><span class="lineno"> 554</span>&#160; }</div>
<div class="line"><a name="l00555"></a><span class="lineno"> 555</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00556"></a><span class="lineno"> 556</span>&#160;<span class="comment"> /// Cast to a different datatype</span></div>
<div class="line"><a name="l00557"></a><span class="lineno"> 557</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a name="l00558"></a><span class="lineno"> 558</span>&#160; __host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00559"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a9825bed624c3abb6337a1ab7654d7db7"> 559</a></span>&#160; <span class="keyword">typename</span> PtrTraits&lt;T&gt;::PtrType <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a9825bed624c3abb6337a1ab7654d7db7">dataAs</a>() {</div>
<div class="line"><a name="l00560"></a><span class="lineno"> 560</span>&#160; <span class="keywordflow">return</span> <span class="keyword">reinterpret_cast&lt;</span>typename PtrTraits&lt;T&gt;::PtrType<span class="keyword">&gt;</span>(<a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>);</div>
<div class="line"><a name="l00561"></a><span class="lineno"> 561</span>&#160; }</div>
<div class="line"><a name="l00562"></a><span class="lineno"> 562</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00563"></a><span class="lineno"> 563</span>&#160;<span class="comment"> /// Cast to a different datatype (const)</span></div>
<div class="line"><a name="l00564"></a><span class="lineno"> 564</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a name="l00565"></a><span class="lineno"> 565</span>&#160; __host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00566"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a485abbadb5b5de23e88822366857a78f"> 566</a></span>&#160; <span class="keyword">typename</span> PtrTraits&lt;const T&gt;::PtrType <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a485abbadb5b5de23e88822366857a78f">dataAs</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00567"></a><span class="lineno"> 567</span>&#160; <span class="keywordflow">return</span> <span class="keyword">reinterpret_cast&lt;</span>typename PtrTraits&lt;const T&gt;::PtrType<span class="keyword">&gt;</span>(<a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>);</div>
<div class="line"><a name="l00568"></a><span class="lineno"> 568</span>&#160; }</div>
<div class="line"><a name="l00569"></a><span class="lineno"> 569</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00570"></a><span class="lineno"> 570</span>&#160;<span class="comment"> /// Use the texture cache for reads</span></div>
<div class="line"><a name="l00571"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a760782118b71504348d073ca1c92843a"> 571</a></span>&#160;<span class="comment"></span> __device__ <span class="keyword">inline</span> <span class="keyword">typename</span> TensorType::DataType <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a760782118b71504348d073ca1c92843a">ldg</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00572"></a><span class="lineno"> 572</span>&#160;<span class="preprocessor">#if __CUDA_ARCH__ &gt;= 350</span></div>
<div class="line"><a name="l00573"></a><span class="lineno"> 573</span>&#160;<span class="preprocessor"></span> <span class="keywordflow">return</span> __ldg(<a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>);</div>
<div class="line"><a name="l00574"></a><span class="lineno"> 574</span>&#160;<span class="preprocessor">#else</span></div>
<div class="line"><a name="l00575"></a><span class="lineno"> 575</span>&#160;<span class="preprocessor"></span> <span class="keywordflow">return</span> *<a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>;</div>
<div class="line"><a name="l00576"></a><span class="lineno"> 576</span>&#160;<span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00577"></a><span class="lineno"> 577</span>&#160;<span class="preprocessor"></span> }</div>
<div class="line"><a name="l00578"></a><span class="lineno"> 578</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00579"></a><span class="lineno"> 579</span>&#160;<span class="comment"> /// Use the texture cache for reads; cast as a particular type</span></div>
<div class="line"><a name="l00580"></a><span class="lineno"> 580</span>&#160;<span class="comment"></span> <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a name="l00581"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a3f29fab81a72a8bdd93901851af98ec7"> 581</a></span>&#160; __device__ <span class="keyword">inline</span> T <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a3f29fab81a72a8bdd93901851af98ec7">ldgAs</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00582"></a><span class="lineno"> 582</span>&#160;<span class="preprocessor">#if __CUDA_ARCH__ &gt;= 350</span></div>
<div class="line"><a name="l00583"></a><span class="lineno"> 583</span>&#160;<span class="preprocessor"></span> <span class="keywordflow">return</span> __ldg(dataAs&lt;T&gt;());</div>
<div class="line"><a name="l00584"></a><span class="lineno"> 584</span>&#160;<span class="preprocessor">#else</span></div>
<div class="line"><a name="l00585"></a><span class="lineno"> 585</span>&#160;<span class="preprocessor"></span> <span class="keywordflow">return</span> as&lt;T&gt;();</div>
<div class="line"><a name="l00586"></a><span class="lineno"> 586</span>&#160;<span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00587"></a><span class="lineno"> 587</span>&#160;<span class="preprocessor"></span> }</div>
<div class="line"><a name="l00588"></a><span class="lineno"> 588</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00589"></a><span class="lineno"> 589</span>&#160;<span class="comment"> /// Returns a tensor that is a view of the SubDim-dimensional slice</span></div>
<div class="line"><a name="l00590"></a><span class="lineno"> 590</span>&#160;<span class="comment"> /// of this tensor, starting where our data begins</span></div>
<div class="line"><a name="l00591"></a><span class="lineno"> 591</span>&#160;<span class="comment"></span> <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor</a>&lt;<span class="keyword">typename</span> TensorType::DataType,</div>
<div class="line"><a name="l00592"></a><span class="lineno"> 592</span>&#160; SubDim,</div>
<div class="line"><a name="l00593"></a><span class="lineno"> 593</span>&#160; TensorType::IsInnerContig,</div>
<div class="line"><a name="l00594"></a><span class="lineno"> 594</span>&#160; <span class="keyword">typename</span> TensorType::IndexType,</div>
<div class="line"><a name="l00595"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a62aa5465abe64321c40763f74cfb028a"> 595</a></span>&#160; PtrTraits&gt; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a62aa5465abe64321c40763f74cfb028a">view</a>() {</div>
<div class="line"><a name="l00596"></a><span class="lineno"> 596</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a2d4e62fb08c180dfe2bde8d47361d61f">tensor_</a>.template view&lt;SubDim&gt;(<a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>);</div>
<div class="line"><a name="l00597"></a><span class="lineno"> 597</span>&#160; }</div>
<div class="line"><a name="l00598"></a><span class="lineno"> 598</span>&#160;</div>
<div class="line"><a name="l00599"></a><span class="lineno"> 599</span>&#160; <span class="keyword">protected</span>:<span class="comment"></span></div>
<div class="line"><a name="l00600"></a><span class="lineno"> 600</span>&#160;<span class="comment"> /// One dimension greater can create us</span></div>
<div class="line"><a name="l00601"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a33929e7387099e4e49be139ba467ebfc"> 601</a></span>&#160;<span class="comment"></span> <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">SubTensor</a>&lt;TensorType, SubDim + 1, PtrTraits&gt;;</div>
<div class="line"><a name="l00602"></a><span class="lineno"> 602</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00603"></a><span class="lineno"> 603</span>&#160;<span class="comment"> /// Our parent tensor can create us</span></div>
<div class="line"><a name="l00604"></a><span class="lineno"> 604</span>&#160;<span class="comment"></span> <span class="keyword">friend</span> <span class="keyword">class</span></div>
<div class="line"><a name="l00605"></a><span class="lineno"> 605</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">Tensor</a>&lt;<span class="keyword">typename</span> TensorType::DataType,</div>
<div class="line"><a name="l00606"></a><span class="lineno"> 606</span>&#160; TensorType::NumDim,</div>
<div class="line"><a name="l00607"></a><span class="lineno"> 607</span>&#160; TensorType::IsInnerContig,</div>
<div class="line"><a name="l00608"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#ab7d47a355bc7a671447c9bc86919c2bc"> 608</a></span>&#160; <span class="keyword">typename</span> TensorType::IndexType,</div>
<div class="line"><a name="l00609"></a><span class="lineno"> 609</span>&#160; PtrTraits&gt;;</div>
<div class="line"><a name="l00610"></a><span class="lineno"> 610</span>&#160;</div>
<div class="line"><a name="l00611"></a><span class="lineno"> 611</span>&#160; __host__ __device__ <span class="keyword">inline</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">SubTensor</a>(</div>
<div class="line"><a name="l00612"></a><span class="lineno"> 612</span>&#160; TensorType&amp; t,</div>
<div class="line"><a name="l00613"></a><span class="lineno"> 613</span>&#160; <span class="keyword">typename</span> TensorType::DataPtrType <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a30dff4e7bea94cd894e17f6bdd7a7eb1">data</a>)</div>
<div class="line"><a name="l00614"></a><span class="lineno"> 614</span>&#160; : <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a2d4e62fb08c180dfe2bde8d47361d61f">tensor_</a>(t),</div>
<div class="line"><a name="l00615"></a><span class="lineno"> 615</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>(data) {</div>
<div class="line"><a name="l00616"></a><span class="lineno"> 616</span>&#160; }</div>
<div class="line"><a name="l00617"></a><span class="lineno"> 617</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00618"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a2d4e62fb08c180dfe2bde8d47361d61f"> 618</a></span>&#160;<span class="comment"> /// The tensor we&#39;re referencing</span></div>
<div class="line"><a name="l00619"></a><span class="lineno"> 619</span>&#160;<span class="comment"></span> TensorType&amp; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a2d4e62fb08c180dfe2bde8d47361d61f">tensor_</a>;</div>
<div class="line"><a name="l00620"></a><span class="lineno"> 620</span>&#160;<span class="comment"></span></div>
<div class="line"><a name="l00621"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36"> 621</a></span>&#160;<span class="comment"> /// The start of our sub-region</span></div>
<div class="line"><a name="l00622"></a><span class="lineno"> 622</span>&#160;<span class="comment"></span> <span class="keyword">typename</span> TensorType::DataPtrType <span class="keyword">const</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">data_</a>;</div>
<div class="line"><a name="l00623"></a><span class="lineno"> 623</span>&#160;};</div>
<div class="line"><a name="l00624"></a><span class="lineno"> 624</span>&#160;</div>
<div class="line"><a name="l00625"></a><span class="lineno"> 625</span>&#160;} <span class="comment">// namespace detail</span></div>
<div class="line"><a name="l00626"></a><span class="lineno"> 626</span>&#160;</div>
<div class="line"><a name="l00627"></a><span class="lineno"> 627</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> T, <span class="keywordtype">int</span> Dim, <span class="keywordtype">bool</span> InnerContig,</div>
<div class="line"><a name="l00628"></a><span class="lineno"> 628</span>&#160; <span class="keyword">typename</span> IndexT, <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt; <span class="keyword">class </span>PtrTraits&gt;</div>
<div class="line"><a name="l00629"></a><span class="lineno"> 629</span>&#160;__host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00630"></a><span class="lineno"> 630</span>&#160;<a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">detail::SubTensor&lt;Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;</a>,</div>
<div class="line"><a name="l00631"></a><span class="lineno"> 631</span>&#160; Dim - 1, PtrTraits&gt;</div>
<div class="line"><a name="l00632"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#a7926dc43f0fa998d16b9497676e118e6"> 632</a></span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a7926dc43f0fa998d16b9497676e118e6">Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;::operator[]</a>(IndexT index) {</div>
<div class="line"><a name="l00633"></a><span class="lineno"> 633</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">detail::SubTensor</a>&lt;<a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">TensorType</a>, Dim - 1, PtrTraits&gt;(</div>
<div class="line"><a name="l00634"></a><span class="lineno"> 634</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">detail::SubTensor&lt;TensorType, Dim, PtrTraits&gt;</a>(</div>
<div class="line"><a name="l00635"></a><span class="lineno"> 635</span>&#160; *<span class="keyword">this</span>, data_)[index]);</div>
<div class="line"><a name="l00636"></a><span class="lineno"> 636</span>&#160;}</div>
<div class="line"><a name="l00637"></a><span class="lineno"> 637</span>&#160;</div>
<div class="line"><a name="l00638"></a><span class="lineno"> 638</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> T, <span class="keywordtype">int</span> Dim, <span class="keywordtype">bool</span> InnerContig,</div>
<div class="line"><a name="l00639"></a><span class="lineno"> 639</span>&#160; <span class="keyword">typename</span> IndexT, <span class="keyword">template</span> &lt;<span class="keyword">typename</span> U&gt; <span class="keyword">class </span>PtrTraits&gt;</div>
<div class="line"><a name="l00640"></a><span class="lineno"> 640</span>&#160;__host__ __device__ <span class="keyword">inline</span></div>
<div class="line"><a name="l00641"></a><span class="lineno"> 641</span>&#160;<span class="keyword">const</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">detail::SubTensor&lt;Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;</a>,</div>
<div class="line"><a name="l00642"></a><span class="lineno"> 642</span>&#160; Dim - 1, PtrTraits&gt;</div>
<div class="line"><a name="l00643"></a><span class="lineno"><a class="line" href="classfaiss_1_1gpu_1_1Tensor.html#a0c8ec0ba81275d369caac6f0324d80bd"> 643</a></span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1Tensor.html#a7926dc43f0fa998d16b9497676e118e6">Tensor&lt;T, Dim, InnerContig, IndexT, PtrTraits&gt;::operator[]</a>(IndexT index)<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00644"></a><span class="lineno"> 644</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">detail::SubTensor</a>&lt;<a class="code" href="classfaiss_1_1gpu_1_1Tensor.html">TensorType</a>, Dim - 1, PtrTraits&gt;(</div>
<div class="line"><a name="l00645"></a><span class="lineno"> 645</span>&#160; <a class="code" href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">detail::SubTensor&lt;TensorType, Dim, PtrTraits&gt;</a>(</div>
<div class="line"><a name="l00646"></a><span class="lineno"> 646</span>&#160; <span class="keyword">const_cast&lt;</span>TensorType&amp;<span class="keyword">&gt;</span>(*this), data_)[index]);</div>
<div class="line"><a name="l00647"></a><span class="lineno"> 647</span>&#160;}</div>
<div class="line"><a name="l00648"></a><span class="lineno"> 648</span>&#160;</div>
<div class="line"><a name="l00649"></a><span class="lineno"> 649</span>&#160;} } <span class="comment">// namespace</span></div>
<div class="line"><a name="l00650"></a><span class="lineno"> 650</span>&#160;</div>
<div class="line"><a name="l00651"></a><span class="lineno"> 651</span>&#160;<span class="preprocessor">#include &quot;Tensor-inl.cuh&quot;</span></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a309eb97e9c6dbfdecf383343c072d38c"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a309eb97e9c6dbfdecf383343c072d38c">faiss::gpu::Tensor::upcastOuter</a></div><div class="ttdeci">__host__ __device__ Tensor&lt; T, NewDim, InnerContig, IndexT, PtrTraits &gt; upcastOuter()</div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00488">Tensor-inl.cuh:488</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a7926dc43f0fa998d16b9497676e118e6"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a7926dc43f0fa998d16b9497676e118e6">faiss::gpu::Tensor::operator[]</a></div><div class="ttdeci">__host__ __device__ detail::SubTensor&lt; TensorType, Dim-1, PtrTraits &gt; operator[](IndexT)</div><div class="ttdoc">Returns a read/write view of a portion of our tensor. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00632">Tensor.cuh:632</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a9f0c817e9751fe02926c2346a97f0350"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a9f0c817e9751fe02926c2346a97f0350">faiss::gpu::Tensor::castIndexType</a></div><div class="ttdeci">__host__ Tensor&lt; T, Dim, InnerContig, NewIndexT, PtrTraits &gt; castIndexType() const </div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00339">Tensor-inl.cuh:339</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html_a62aa5465abe64321c40763f74cfb028a"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a62aa5465abe64321c40763f74cfb028a">faiss::gpu::detail::SubTensor::view</a></div><div class="ttdeci">Tensor&lt; typename TensorType::DataType, SubDim, TensorType::IsInnerContig, typename TensorType::IndexType, PtrTraits &gt; view()</div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00595">Tensor.cuh:595</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a3f4e3c6afdf4a03308756b6ae6462c38"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a3f4e3c6afdf4a03308756b6ae6462c38">faiss::gpu::Tensor::isContiguousDim</a></div><div class="ttdeci">__host__ __device__ bool isContiguousDim(int i) const </div><div class="ttdoc">Returns true if the given dimension index has no padding. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00445">Tensor-inl.cuh:445</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a2894f8fdfab8ec3245364a6f9e8a5259"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a2894f8fdfab8ec3245364a6f9e8a5259">faiss::gpu::Tensor::cast</a></div><div class="ttdeci">__host__ __device__ Tensor&lt; U, Dim, InnerContig, IndexT, PtrTraits &gt; cast()</div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00251">Tensor-inl.cuh:251</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a0ba9ab7c1676b7a41a6e6b2e5a490d2f"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a0ba9ab7c1676b7a41a6e6b2e5a490d2f">faiss::gpu::Tensor::numElements</a></div><div class="ttdeci">__host__ __device__ size_t numElements() const </div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00386">Tensor-inl.cuh:386</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a2185b0c1c2c06cc3a4dab6a88eb6d001"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a2185b0c1c2c06cc3a4dab6a88eb6d001">faiss::gpu::Tensor::downcastOuter</a></div><div class="ttdeci">__host__ __device__ Tensor&lt; T, NewDim, InnerContig, IndexT, PtrTraits &gt; downcastOuter()</div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00544">Tensor-inl.cuh:544</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html_a485abbadb5b5de23e88822366857a78f"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a485abbadb5b5de23e88822366857a78f">faiss::gpu::detail::SubTensor::dataAs</a></div><div class="ttdeci">__host__ __device__ PtrTraits&lt; const T &gt;::PtrType dataAs() const </div><div class="ttdoc">Cast to a different datatype (const) </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00566">Tensor.cuh:566</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a76383e7f62a826ba55955bd3d1dddce7"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a76383e7f62a826ba55955bd3d1dddce7">faiss::gpu::Tensor::dataAs</a></div><div class="ttdeci">__host__ __device__ const PtrTraits&lt; const U &gt;::PtrType dataAs() const </div><div class="ttdoc">Cast to a different datatype. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00206">Tensor.cuh:206</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a38adf20225c9f8f764aafe273c4ee122"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a38adf20225c9f8f764aafe273c4ee122">faiss::gpu::Tensor::dataAs</a></div><div class="ttdeci">__host__ __device__ PtrTraits&lt; U &gt;::PtrType dataAs()</div><div class="ttdoc">Cast to a different datatype. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00199">Tensor.cuh:199</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html_a3f29fab81a72a8bdd93901851af98ec7"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a3f29fab81a72a8bdd93901851af98ec7">faiss::gpu::detail::SubTensor::ldgAs</a></div><div class="ttdeci">__device__ T ldgAs() const </div><div class="ttdoc">Use the texture cache for reads; cast as a particular type. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00581">Tensor.cuh:581</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a7fbbf51f8ef6bea9cc863a86e20d994e"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a7fbbf51f8ef6bea9cc863a86e20d994e">faiss::gpu::Tensor::canCastResize</a></div><div class="ttdeci">__host__ __device__ bool canCastResize() const </div><div class="ttdoc">Returns true if we can castResize() this tensor to the new type. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00307">Tensor-inl.cuh:307</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a2ec506a25e46cf7001060a6ba5ae3b94"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a2ec506a25e46cf7001060a6ba5ae3b94">faiss::gpu::Tensor::data_</a></div><div class="ttdeci">DataPtrType data_</div><div class="ttdoc">Raw pointer to where the tensor data begins. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00342">Tensor.cuh:342</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html_a9825bed624c3abb6337a1ab7654d7db7"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a9825bed624c3abb6337a1ab7654d7db7">faiss::gpu::detail::SubTensor::dataAs</a></div><div class="ttdeci">__host__ __device__ PtrTraits&lt; T &gt;::PtrType dataAs()</div><div class="ttdoc">Cast to a different datatype. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00559">Tensor.cuh:559</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a8ae7b3f95991125a5648c3b78afd40bd"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a8ae7b3f95991125a5648c3b78afd40bd">faiss::gpu::Tensor::Tensor</a></div><div class="ttdeci">__host__ __device__ Tensor()</div><div class="ttdoc">Default constructor. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00018">Tensor-inl.cuh:18</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a7e6b9cd8cc3cc0bfe39bd3fed7733e51"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a7e6b9cd8cc3cc0bfe39bd3fed7733e51">faiss::gpu::Tensor::end</a></div><div class="ttdeci">__host__ __device__ DataPtrType end() const </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00192">Tensor.cuh:192</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4_html_a0ec3a48f265de627490e7cdf540e9fc5"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a0ec3a48f265de627490e7cdf540e9fc5">faiss::gpu::detail::SubTensor&lt; TensorType, 0, PtrTraits &gt;::dataAs</a></div><div class="ttdeci">__host__ __device__ PtrTraits&lt; const T &gt;::PtrType dataAs() const </div><div class="ttdoc">Cast to a different datatype (const) </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00437">Tensor.cuh:437</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_aee5cf46d16344e2a055cf63adb07d24a"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#aee5cf46d16344e2a055cf63adb07d24a">faiss::gpu::Tensor::upcastInner</a></div><div class="ttdeci">__host__ __device__ Tensor&lt; T, NewDim, InnerContig, IndexT, PtrTraits &gt; upcastInner()</div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00517">Tensor-inl.cuh:517</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html_a6e7097578ba17c10895ec0dafa385901"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a6e7097578ba17c10895ec0dafa385901">faiss::gpu::detail::SubTensor::data</a></div><div class="ttdeci">__host__ __device__ const TensorType::DataPtrType data() const </div><div class="ttdoc">Returns a raw accessor to our slice (const). </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00540">Tensor.cuh:540</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4_html_ac44400045b113c527d6ed59a910f885c"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#ac44400045b113c527d6ed59a910f885c">faiss::gpu::detail::SubTensor&lt; TensorType, 0, PtrTraits &gt;::ldg</a></div><div class="ttdeci">__device__ TensorType::DataType ldg() const </div><div class="ttdoc">Use the texture cache for reads. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00442">Tensor.cuh:442</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_ac2d0fc7199901a8e0788b58f0970b133"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#ac2d0fc7199901a8e0788b58f0970b133">faiss::gpu::Tensor::narrowOutermost</a></div><div class="ttdeci">__host__ __device__ Tensor&lt; T, Dim, InnerContig, IndexT, PtrTraits &gt; narrowOutermost(IndexT start, IndexT size)</div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00659">Tensor-inl.cuh:659</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_af4b8fe4b632cdca51ee7972ed93fc3fa"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#af4b8fe4b632cdca51ee7972ed93fc3fa">faiss::gpu::Tensor::stride_</a></div><div class="ttdeci">IndexT stride_[Dim]</div><div class="ttdoc">Array of strides (in sizeof(T) terms) per each dimension. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00345">Tensor.cuh:345</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4_html_a67bfa92466e03834b7f007cb9cdf8d50"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a67bfa92466e03834b7f007cb9cdf8d50">faiss::gpu::detail::SubTensor&lt; TensorType, 0, PtrTraits &gt;::as</a></div><div class="ttdeci">__host__ __device__ T &amp; as()</div><div class="ttdoc">Cast to a different datatype. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00417">Tensor.cuh:417</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html_a0d32586e8f6f22f5f90bca566d901d0b"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a0d32586e8f6f22f5f90bca566d901d0b">faiss::gpu::detail::SubTensor::as</a></div><div class="ttdeci">__host__ __device__ T &amp; as()</div><div class="ttdoc">Cast to a different datatype. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00546">Tensor.cuh:546</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4_html_a6e0f585a739cd1474ec24f56609d6501"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a6e0f585a739cd1474ec24f56609d6501">faiss::gpu::detail::SubTensor&lt; TensorType, 0, PtrTraits &gt;::tensor_</a></div><div class="ttdeci">TensorType &amp; tensor_</div><div class="ttdoc">The tensor we&amp;#39;re referencing. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00479">Tensor.cuh:479</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a09019c54911db891c9321fd3b34509c2"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a09019c54911db891c9321fd3b34509c2">faiss::gpu::Tensor::isContiguous</a></div><div class="ttdeci">__host__ __device__ bool isContiguous() const </div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00399">Tensor-inl.cuh:399</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_afde15195e51318fd1811ea402f63c1ab"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#afde15195e51318fd1811ea402f63c1ab">faiss::gpu::Tensor::data</a></div><div class="ttdeci">__host__ __device__ const DataPtrType data() const </div><div class="ttdoc">Returns a raw pointer to the start of our data (const). </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00186">Tensor.cuh:186</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html_a760782118b71504348d073ca1c92843a"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a760782118b71504348d073ca1c92843a">faiss::gpu::detail::SubTensor::ldg</a></div><div class="ttdeci">__device__ TensorType::DataType ldg() const </div><div class="ttdoc">Use the texture cache for reads. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00571">Tensor.cuh:571</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_abc0ecc4f882ee09632b5a06be0619adb"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#abc0ecc4f882ee09632b5a06be0619adb">faiss::gpu::Tensor::sizes</a></div><div class="ttdeci">__host__ __device__ const IndexT * sizes() const </div><div class="ttdoc">Returns the size array. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00243">Tensor.cuh:243</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html_aa4aa193f6140219872839ad8665b5d36"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aa4aa193f6140219872839ad8665b5d36">faiss::gpu::detail::SubTensor::data_</a></div><div class="ttdeci">TensorType::DataPtrType const data_</div><div class="ttdoc">The start of our sub-region. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00621">Tensor.cuh:621</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a6dc00c182a92389b74c89ba7fcab40d3"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a6dc00c182a92389b74c89ba7fcab40d3">faiss::gpu::Tensor::copyFrom</a></div><div class="ttdeci">__host__ void copyFrom(Tensor&lt; T, Dim, InnerContig, IndexT, PtrTraits &gt; &amp;t, cudaStream_t stream)</div><div class="ttdoc">Copies a tensor into ourselves; sizes must match. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00130">Tensor-inl.cuh:130</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_ad96fbf0f5e7c06a1031b8b18f7fc01d7"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#ad96fbf0f5e7c06a1031b8b18f7fc01d7">faiss::gpu::Tensor::size_</a></div><div class="ttdeci">IndexT size_[Dim]</div><div class="ttdoc">Size per each dimension. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00348">Tensor.cuh:348</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a0d831a352531281e06250cc6fe52a38a"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a0d831a352531281e06250cc6fe52a38a">faiss::gpu::Tensor::operator=</a></div><div class="ttdeci">__host__ __device__ Tensor&lt; T, Dim, InnerContig, IndexT, PtrTraits &gt; &amp; operator=(Tensor&lt; T, Dim, InnerContig, IndexT, PtrTraits &gt; &amp;t)</div><div class="ttdoc">Assignment. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00048">Tensor-inl.cuh:48</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4_html_aa56767066e40a4758e37b26e43449f1d"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#aa56767066e40a4758e37b26e43449f1d">faiss::gpu::detail::SubTensor&lt; TensorType, 0, PtrTraits &gt;::ldgAs</a></div><div class="ttdeci">__device__ T ldgAs() const </div><div class="ttdoc">Use the texture cache for reads; cast as a particular type. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00452">Tensor.cuh:452</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html_a4c3006fcd82c301b11505620e3e96378"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a4c3006fcd82c301b11505620e3e96378">faiss::gpu::detail::SubTensor::operator[]</a></div><div class="ttdeci">__host__ __device__ const SubTensor&lt; TensorType, SubDim-1, PtrTraits &gt; operator[](typename TensorType::IndexType index) const </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00511">Tensor.cuh:511</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a87a777247486756e99060547a3cc833a"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a87a777247486756e99060547a3cc833a">faiss::gpu::Tensor::strides</a></div><div class="ttdeci">__host__ __device__ const IndexT * strides() const </div><div class="ttdoc">Returns the stride array. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00248">Tensor.cuh:248</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a6699c311648457f257afa340c61f417c"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a6699c311648457f257afa340c61f417c">faiss::gpu::Tensor::getSize</a></div><div class="ttdeci">__host__ __device__ IndexT getSize(int i) const </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00222">Tensor.cuh:222</a></div></div>
<div class="ttc" id="structfaiss_1_1gpu_1_1traits_1_1RestrictPtrTraits_html"><div class="ttname"><a href="structfaiss_1_1gpu_1_1traits_1_1RestrictPtrTraits.html">faiss::gpu::traits::RestrictPtrTraits</a></div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00041">Tensor.cuh:41</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a22c1e45f81f7f9e5427e2eed19f9cd11"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a22c1e45f81f7f9e5427e2eed19f9cd11">faiss::gpu::Tensor::isSameSize</a></div><div class="ttdeci">__host__ __device__ bool isSameSize(const Tensor&lt; OtherT, OtherDim, InnerContig, IndexT, PtrTraits &gt; &amp;rhs) const </div><div class="ttdoc">Returns true if the two tensors are of the same dimensionality and size. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00232">Tensor-inl.cuh:232</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4_html_a6a548d4edb57d072be52cd827f055d6d"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a6a548d4edb57d072be52cd827f055d6d">faiss::gpu::detail::SubTensor&lt; TensorType, 0, PtrTraits &gt;::data_</a></div><div class="ttdeci">TensorType::DataPtrType const data_</div><div class="ttdoc">Where our value is located. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00482">Tensor.cuh:482</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a6a43125c6f429f28161d59f19eb8e5c5"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a6a43125c6f429f28161d59f19eb8e5c5">faiss::gpu::Tensor::downcastInner</a></div><div class="ttdeci">__host__ __device__ Tensor&lt; T, NewDim, InnerContig, IndexT, PtrTraits &gt; downcastInner()</div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00589">Tensor-inl.cuh:589</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_ab6db6bf86dd0f7e877af3a6ae2100fe3"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#ab6db6bf86dd0f7e877af3a6ae2100fe3">faiss::gpu::Tensor::narrow</a></div><div class="ttdeci">__host__ __device__ Tensor&lt; T, Dim, InnerContig, IndexT, PtrTraits &gt; narrow(int dim, IndexT start, IndexT size)</div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00667">Tensor-inl.cuh:667</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a50411ce4d0fa32ef715e3321b6e33212"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a50411ce4d0fa32ef715e3321b6e33212">faiss::gpu::Tensor::data</a></div><div class="ttdeci">__host__ __device__ DataPtrType data()</div><div class="ttdoc">Returns a raw pointer to the start of our data. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00174">Tensor.cuh:174</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a6cc21376070a03d77661d6e333972c6a"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a6cc21376070a03d77661d6e333972c6a">faiss::gpu::Tensor::copyTo</a></div><div class="ttdeci">__host__ void copyTo(Tensor&lt; T, Dim, InnerContig, IndexT, PtrTraits &gt; &amp;t, cudaStream_t stream)</div><div class="ttdoc">Copies ourselves into a tensor; sizes must match. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00169">Tensor-inl.cuh:169</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html">faiss::gpu::Tensor</a></div><div class="ttdoc">Our tensor type. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00028">Tensor.cuh:28</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a2ac9dc9fa8d81f2651a1be486c14ba62"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a2ac9dc9fa8d81f2651a1be486c14ba62">faiss::gpu::Tensor::canUseIndexType</a></div><div class="ttdeci">__host__ bool canUseIndexType() const </div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00359">Tensor-inl.cuh:359</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a82a3484a6458e3e95bb91d320f2c6731"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a82a3484a6458e3e95bb91d320f2c6731">faiss::gpu::Tensor::transpose</a></div><div class="ttdeci">__host__ __device__ Tensor&lt; T, Dim, InnerContig, IndexT, PtrTraits &gt; transpose(int dim1, int dim2) const </div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00454">Tensor-inl.cuh:454</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a0b8bba630f7a1fa217f90b20d298420a"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a0b8bba630f7a1fa217f90b20d298420a">faiss::gpu::Tensor::getStride</a></div><div class="ttdeci">__host__ __device__ IndexT getStride(int i) const </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00228">Tensor.cuh:228</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4_html"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html">faiss::gpu::detail::SubTensor&lt; TensorType, 0, PtrTraits &gt;</a></div><div class="ttdoc">Specialization for a view of a single value (0-dimensional) </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00376">Tensor.cuh:376</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a1afd11b16869df9d352ee8ab1f8c7a1f"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a1afd11b16869df9d352ee8ab1f8c7a1f">faiss::gpu::Tensor::end</a></div><div class="ttdeci">__host__ __device__ DataPtrType end()</div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00180">Tensor.cuh:180</a></div></div>
<div class="ttc" id="structfaiss_1_1gpu_1_1traits_1_1DefaultPtrTraits_html"><div class="ttname"><a href="structfaiss_1_1gpu_1_1traits_1_1DefaultPtrTraits.html">faiss::gpu::traits::DefaultPtrTraits</a></div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00046">Tensor.cuh:46</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html_a2d4e62fb08c180dfe2bde8d47361d61f"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a2d4e62fb08c180dfe2bde8d47361d61f">faiss::gpu::detail::SubTensor::tensor_</a></div><div class="ttdeci">TensorType &amp; tensor_</div><div class="ttdoc">The tensor we&amp;#39;re referencing. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00618">Tensor.cuh:618</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4_html_a750047ff919799af43b4861b580c82e3"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a750047ff919799af43b4861b580c82e3">faiss::gpu::detail::SubTensor&lt; TensorType, 0, PtrTraits &gt;::data</a></div><div class="ttdeci">__host__ __device__ const TensorType::DataPtrType data() const </div><div class="ttdoc">Returns a raw accessor to our slice (const). </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00411">Tensor.cuh:411</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html_ac722ca465d06da122898a07ce38276e2"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#ac722ca465d06da122898a07ce38276e2">faiss::gpu::detail::SubTensor::operator[]</a></div><div class="ttdeci">__host__ __device__ SubTensor&lt; TensorType, SubDim-1, PtrTraits &gt; operator[](typename TensorType::IndexType index)</div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00495">Tensor.cuh:495</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html_aefdafcf236e5c49ad3bce1646797f8f2"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#aefdafcf236e5c49ad3bce1646797f8f2">faiss::gpu::detail::SubTensor::as</a></div><div class="ttdeci">__host__ __device__ const T &amp; as() const </div><div class="ttdoc">Cast to a different datatype (const). </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00552">Tensor.cuh:552</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html">faiss::gpu::detail::SubTensor</a></div><div class="ttdoc">A SubDim-rank slice of a parent Tensor. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00035">Tensor.cuh:35</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4_html_a23e80555a443797d60ae16d605dacd23"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#a23e80555a443797d60ae16d605dacd23">faiss::gpu::detail::SubTensor&lt; TensorType, 0, PtrTraits &gt;::dataAs</a></div><div class="ttdeci">__host__ __device__ PtrTraits&lt; T &gt;::PtrType dataAs()</div><div class="ttdoc">Cast to a different datatype. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00430">Tensor.cuh:430</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_html_a30dff4e7bea94cd894e17f6bdd7a7eb1"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor.html#a30dff4e7bea94cd894e17f6bdd7a7eb1">faiss::gpu::detail::SubTensor::data</a></div><div class="ttdeci">__host__ __device__ TensorType::DataPtrType data()</div><div class="ttdoc">Returns a raw accessor to our slice. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00534">Tensor.cuh:534</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a6c9640c365134ccc33cdb2695b016eb3"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a6c9640c365134ccc33cdb2695b016eb3">faiss::gpu::Tensor::castResize</a></div><div class="ttdeci">__host__ __device__ Tensor&lt; U, Dim, InnerContig, IndexT, PtrTraits &gt; castResize()</div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00273">Tensor-inl.cuh:273</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4_html_aae8c90b402493f5656f94701157a7417"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#aae8c90b402493f5656f94701157a7417">faiss::gpu::detail::SubTensor&lt; TensorType, 0, PtrTraits &gt;::data</a></div><div class="ttdeci">__host__ __device__ TensorType::DataPtrType data()</div><div class="ttdoc">Returns a raw accessor to our slice. </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00405">Tensor.cuh:405</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4_html_ad1d375e64756991dadeb5a1e63ed2cfd"><div class="ttname"><a href="classfaiss_1_1gpu_1_1detail_1_1SubTensor_3_01TensorType_00_010_00_01PtrTraits_01_4.html#ad1d375e64756991dadeb5a1e63ed2cfd">faiss::gpu::detail::SubTensor&lt; TensorType, 0, PtrTraits &gt;::as</a></div><div class="ttdeci">__host__ __device__ const T &amp; as() const </div><div class="ttdoc">Cast to a different datatype (const). </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00423">Tensor.cuh:423</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a8220da958d022c322b80b0539c99f8d4"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a8220da958d022c322b80b0539c99f8d4">faiss::gpu::Tensor::getSizeInBytes</a></div><div class="ttdeci">__host__ __device__ size_t getSizeInBytes() const </div><div class="ttdef"><b>Definition:</b> <a href="Tensor_8cuh_source.html#l00238">Tensor.cuh:238</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a35a63cfa4034a8ee14a999132d8a1828"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a35a63cfa4034a8ee14a999132d8a1828">faiss::gpu::Tensor::view</a></div><div class="ttdeci">__host__ __device__ Tensor&lt; T, SubDim, InnerContig, IndexT, PtrTraits &gt; view()</div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00652">Tensor-inl.cuh:652</a></div></div>
<div class="ttc" id="classfaiss_1_1gpu_1_1Tensor_html_a3067941f8f8f09fc73e2f06243699825"><div class="ttname"><a href="classfaiss_1_1gpu_1_1Tensor.html#a3067941f8f8f09fc73e2f06243699825">faiss::gpu::Tensor::isSame</a></div><div class="ttdeci">__host__ __device__ bool isSame(const Tensor&lt; OtherT, OtherDim, InnerContig, IndexT, PtrTraits &gt; &amp;rhs) const </div><div class="ttdef"><b>Definition:</b> <a href="Tensor-inl_8cuh_source.html#l00209">Tensor-inl.cuh:209</a></div></div>
</div><!-- fragment --></div><!-- contents -->
<!-- start footer part -->
<hr class="footer"/><address class="footer"><small>
Generated by &#160;<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/>
</a> 1.8.5
</small></address>
</body>
</html>