From 222f38075b8dfd5f11b56b7918f67fd983de7e28 Mon Sep 17 00:00:00 2001 From: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com> Date: Thu, 9 Dec 2021 22:15:52 +0800 Subject: [PATCH] [Docs] Fix the format of the docstring (#1573) * [Docs] Fix the format of docstring * fix format --- mmcv/cnn/bricks/activation.py | 1 + mmcv/cnn/bricks/norm.py | 6 +-- mmcv/cnn/bricks/plugin.py | 10 ++-- mmcv/cnn/bricks/upsample.py | 2 +- mmcv/cnn/utils/flops_counter.py | 20 ++++---- mmcv/fileio/file_client.py | 2 +- mmcv/image/colorspace.py | 8 ++-- mmcv/image/geometric.py | 24 +++++----- mmcv/ops/group_points.py | 2 +- mmcv/ops/nms.py | 10 ++-- mmcv/ops/scatter_points.py | 27 +++++------ mmcv/runner/base_module.py | 13 +++--- mmcv/runner/base_runner.py | 10 ++-- mmcv/runner/hooks/ema.py | 4 +- mmcv/runner/hooks/logger/neptune.py | 26 ++++++----- mmcv/utils/config.py | 4 +- mmcv/utils/path.py | 2 +- mmcv/utils/registry.py | 10 ++-- mmcv/utils/timer.py | 72 ++++++++++++++--------------- mmcv/video/io.py | 19 ++++---- mmcv/video/optflow.py | 2 +- 21 files changed, 136 insertions(+), 138 deletions(-) diff --git a/mmcv/cnn/bricks/activation.py b/mmcv/cnn/bricks/activation.py index 79f198838..26be59581 100644 --- a/mmcv/cnn/bricks/activation.py +++ b/mmcv/cnn/bricks/activation.py @@ -83,6 +83,7 @@ def build_activation_layer(cfg): Args: cfg (dict): The activation layer config, which should contain: + - type (str): Layer type. - layer args: Args needed to instantiate an activation layer. diff --git a/mmcv/cnn/bricks/norm.py b/mmcv/cnn/bricks/norm.py index cfb326bdb..51efdc184 100644 --- a/mmcv/cnn/bricks/norm.py +++ b/mmcv/cnn/bricks/norm.py @@ -83,9 +83,9 @@ def build_norm_layer(cfg, num_features, postfix=''): to create named layer. Returns: - (str, nn.Module): The first element is the layer name consisting of - abbreviation and postfix, e.g., bn1, gn. The second element is the - created norm layer. + tuple[str, nn.Module]: The first element is the layer name consisting + of abbreviation and postfix, e.g., bn1, gn. The second element is the + created norm layer. """ if not isinstance(cfg, dict): raise TypeError('cfg must be a dict') diff --git a/mmcv/cnn/bricks/plugin.py b/mmcv/cnn/bricks/plugin.py index 07c010d40..82a876f01 100644 --- a/mmcv/cnn/bricks/plugin.py +++ b/mmcv/cnn/bricks/plugin.py @@ -57,15 +57,15 @@ def build_plugin_layer(cfg, postfix='', **kwargs): Args: cfg (None or dict): cfg should contain: - type (str): identify plugin layer type. - layer args: args needed to instantiate a plugin layer. + + - type (str): identify plugin layer type. + - layer args: args needed to instantiate a plugin layer. postfix (int, str): appended into norm abbreviation to create named layer. Default: ''. Returns: - tuple[str, nn.Module]: - name (str): abbreviation + postfix - layer (nn.Module): created plugin layer + tuple[str, nn.Module]: The first one is the concatenation of + abbreviation and postfix. The second is the created plugin layer. """ if not isinstance(cfg, dict): raise TypeError('cfg must be a dict') diff --git a/mmcv/cnn/bricks/upsample.py b/mmcv/cnn/bricks/upsample.py index a1a353767..0fd21fbf9 100644 --- a/mmcv/cnn/bricks/upsample.py +++ b/mmcv/cnn/bricks/upsample.py @@ -55,7 +55,7 @@ def build_upsample_layer(cfg, *args, **kwargs): - type (str): Layer type. - scale_factor (int): Upsample ratio, which is not applicable to - deconv. + deconv. - layer args: Args needed to instantiate a upsample layer. args (argument list): Arguments passed to the ``__init__`` method of the corresponding conv layer. diff --git a/mmcv/cnn/utils/flops_counter.py b/mmcv/cnn/utils/flops_counter.py index dceeb398b..878a67735 100644 --- a/mmcv/cnn/utils/flops_counter.py +++ b/mmcv/cnn/utils/flops_counter.py @@ -48,16 +48,16 @@ def get_model_complexity_info(model, Supported layers are listed as below: - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``. - - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``, - ``nn.ReLU6``. + - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, + ``nn.LeakyReLU``, ``nn.ReLU6``. - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``, - ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``, - ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``, - ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``, - ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``. + ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``, + ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``, + ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``, + ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``. - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``, - ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``, - ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``. + ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``, + ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``. - Linear: ``nn.Linear``. - Deconvolution: ``nn.ConvTranspose2d``. - Upsample: ``nn.Upsample``. @@ -78,8 +78,8 @@ def get_model_complexity_info(model, Returns: tuple[float | str]: If ``as_strings`` is set to True, it will return - FLOPs and parameter counts in a string format. otherwise, it will - return those in a float number format. + FLOPs and parameter counts in a string format. otherwise, it will + return those in a float number format. """ assert type(input_shape) is tuple assert len(input_shape) >= 1 diff --git a/mmcv/fileio/file_client.py b/mmcv/fileio/file_client.py index 8c2ce0238..ba77f81e3 100644 --- a/mmcv/fileio/file_client.py +++ b/mmcv/fileio/file_client.py @@ -1072,7 +1072,7 @@ class FileClient: Returns: bool: Return ``True`` if ``filepath`` points to a file, ``False`` - otherwise. + otherwise. """ return self.client.isfile(filepath) diff --git a/mmcv/image/colorspace.py b/mmcv/image/colorspace.py index 814533952..4337720ea 100644 --- a/mmcv/image/colorspace.py +++ b/mmcv/image/colorspace.py @@ -160,7 +160,7 @@ def rgb2ycbcr(img, y_only=False): Returns: ndarray: The converted YCbCr image. The output image has the same type - and range as input image. + and range as input image. """ img_type = img.dtype img = _convert_input_type_range(img) @@ -194,7 +194,7 @@ def bgr2ycbcr(img, y_only=False): Returns: ndarray: The converted YCbCr image. The output image has the same type - and range as input image. + and range as input image. """ img_type = img.dtype img = _convert_input_type_range(img) @@ -227,7 +227,7 @@ def ycbcr2rgb(img): Returns: ndarray: The converted RGB image. The output image has the same type - and range as input image. + and range as input image. """ img_type = img.dtype img = _convert_input_type_range(img) * 255 @@ -259,7 +259,7 @@ def ycbcr2bgr(img): Returns: ndarray: The converted BGR image. The output image has the same type - and range as input image. + and range as input image. """ img_type = img.dtype img = _convert_input_type_range(img) * 255 diff --git a/mmcv/image/geometric.py b/mmcv/image/geometric.py index cf97c201c..75cee48d9 100644 --- a/mmcv/image/geometric.py +++ b/mmcv/image/geometric.py @@ -70,7 +70,7 @@ def imresize(img, Returns: tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or - `resized_img`. + `resized_img`. """ h, w = img.shape[:2] if backend is None: @@ -130,7 +130,7 @@ def imresize_to_multiple(img, Returns: tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or - `resized_img`. + `resized_img`. """ h, w = img.shape[:2] if size is not None and scale_factor is not None: @@ -175,7 +175,7 @@ def imresize_like(img, Returns: tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or - `resized_img`. + `resized_img`. """ h, w = dst_img.shape[:2] return imresize(img, (w, h), return_scale, interpolation, backend=backend) @@ -462,16 +462,16 @@ def impad(img, reflect or symmetric. Default: constant. - constant: pads with a constant value, this value is specified - with pad_val. + with pad_val. - edge: pads with the last value at the edge of the image. - - reflect: pads with reflection of image without repeating the - last value on the edge. For example, padding [1, 2, 3, 4] - with 2 elements on both sides in reflect mode will result - in [3, 2, 1, 2, 3, 4, 3, 2]. - - symmetric: pads with reflection of image repeating the last - value on the edge. For example, padding [1, 2, 3, 4] with - 2 elements on both sides in symmetric mode will result in - [2, 1, 1, 2, 3, 4, 4, 3] + - reflect: pads with reflection of image without repeating the last + value on the edge. For example, padding [1, 2, 3, 4] with 2 + elements on both sides in reflect mode will result in + [3, 2, 1, 2, 3, 4, 3, 2]. + - symmetric: pads with reflection of image repeating the last value + on the edge. For example, padding [1, 2, 3, 4] with 2 elements on + both sides in symmetric mode will result in + [2, 1, 1, 2, 3, 4, 4, 3] Returns: ndarray: The padded image. diff --git a/mmcv/ops/group_points.py b/mmcv/ops/group_points.py index 0ece8a163..80c7d294f 100644 --- a/mmcv/ops/group_points.py +++ b/mmcv/ops/group_points.py @@ -76,7 +76,7 @@ class QueryAndGroup(nn.Module): Returns: torch.Tensor: (B, 3 + C, npoint, sample_num) Grouped - concatenated coordinates and features of points. + concatenated coordinates and features of points. """ # if self.max_radius is None, we will perform kNN instead of ball query # idx is of shape [B, npoint, sample_num] diff --git a/mmcv/ops/nms.py b/mmcv/ops/nms.py index 4cf2d9872..303c5facb 100644 --- a/mmcv/ops/nms.py +++ b/mmcv/ops/nms.py @@ -277,11 +277,11 @@ def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False): - iou_thr (float): IoU threshold used for NMS. - split_thr (float): threshold number of boxes. In some cases the - number of boxes is large (e.g., 200k). To avoid OOM during - training, the users could set `split_thr` to a small value. - If the number of boxes is greater than the threshold, it will - perform NMS on each group of boxes separately and sequentially. - Defaults to 10000. + number of boxes is large (e.g., 200k). To avoid OOM during + training, the users could set `split_thr` to a small value. + If the number of boxes is greater than the threshold, it will + perform NMS on each group of boxes separately and sequentially. + Defaults to 10000. class_agnostic (bool): if true, nms is class agnostic, i.e. IoU thresholding happens over all boxes, regardless of the predicted class. diff --git a/mmcv/ops/scatter_points.py b/mmcv/ops/scatter_points.py index 06c04e686..c1c1187ef 100644 --- a/mmcv/ops/scatter_points.py +++ b/mmcv/ops/scatter_points.py @@ -25,11 +25,10 @@ class _DynamicScatter(Function): 'mean'. Default: 'max'. Returns: - tuple[torch.Tensor]: tuple[torch.Tensor]: A tuple contains two - elements. The first one is the voxel features with shape [M, C] - which are respectively reduced from input features that share - the same voxel coordinates . The second is voxel coordinates - with shape [M, ndim]. + tuple[torch.Tensor]: A tuple contains two elements. The first one + is the voxel features with shape [M, C] which are respectively + reduced from input features that share the same voxel coordinates. + The second is voxel coordinates with shape [M, ndim]. """ results = ext_module.dynamic_point_to_voxel_forward( feats, coors, reduce_type) @@ -89,11 +88,10 @@ class DynamicScatter(nn.Module): multi-dim voxel index) of each points. Returns: - tuple[torch.Tensor]: tuple[torch.Tensor]: A tuple contains two - elements. The first one is the voxel features with shape [M, C] - which are respectively reduced from input features that share - the same voxel coordinates . The second is voxel coordinates - with shape [M, ndim]. + tuple[torch.Tensor]: A tuple contains two elements. The first one + is the voxel features with shape [M, C] which are respectively + reduced from input features that share the same voxel coordinates. + The second is voxel coordinates with shape [M, ndim]. """ reduce = 'mean' if self.average_points else 'max' return dynamic_scatter(points.contiguous(), coors.contiguous(), reduce) @@ -107,11 +105,10 @@ class DynamicScatter(nn.Module): multi-dim voxel index) of each points. Returns: - tuple[torch.Tensor]:tuple[torch.Tensor]: A tuple contains two - elements. The first one is the voxel features with shape [M, C] - which are respectively reduced from input features that share - the same voxel coordinates . The second is voxel coordinates - with shape [M, ndim]. + tuple[torch.Tensor]: A tuple contains two elements. The first one + is the voxel features with shape [M, C] which are respectively + reduced from input features that share the same voxel coordinates. + The second is voxel coordinates with shape [M, ndim]. """ if coors.size(-1) == 3: return self.forward_single(points, coors) diff --git a/mmcv/runner/base_module.py b/mmcv/runner/base_module.py index f754ee651..7937eca37 100644 --- a/mmcv/runner/base_module.py +++ b/mmcv/runner/base_module.py @@ -18,13 +18,12 @@ class BaseModule(nn.Module, metaclass=ABCMeta): functionality of parameter initialization. Compared with ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes. - - ``init_cfg``: the config to control the initialization. - - ``init_weights``: The function of parameter - initialization and recording initialization - information. - - ``_params_init_info``: Used to track the parameter - initialization information. This attribute only - exists during executing the ``init_weights``. + - ``init_cfg``: the config to control the initialization. + - ``init_weights``: The function of parameter initialization and recording + initialization information. + - ``_params_init_info``: Used to track the parameter initialization + information. This attribute only exists during executing the + ``init_weights``. Args: init_cfg (dict, optional): Initialization config dict. diff --git a/mmcv/runner/base_runner.py b/mmcv/runner/base_runner.py index 25cd98f51..b533c7199 100644 --- a/mmcv/runner/base_runner.py +++ b/mmcv/runner/base_runner.py @@ -207,8 +207,8 @@ class BaseRunner(metaclass=ABCMeta): Returns: list[float] | dict[str, list[float]]: Current learning rates of all - param groups. If the runner has a dict of optimizers, this - method will return a dict. + param groups. If the runner has a dict of optimizers, this method + will return a dict. """ if isinstance(self.optimizer, torch.optim.Optimizer): lr = [group['lr'] for group in self.optimizer.param_groups] @@ -226,8 +226,8 @@ class BaseRunner(metaclass=ABCMeta): Returns: list[float] | dict[str, list[float]]: Current momentums of all - param groups. If the runner has a dict of optimizers, this - method will return a dict. + param groups. If the runner has a dict of optimizers, this method + will return a dict. """ def _get_momentum(optimizer): @@ -287,7 +287,7 @@ class BaseRunner(metaclass=ABCMeta): hook_cfg (dict): Hook config. It should have at least keys 'type' and 'priority' indicating its type and priority. - Notes: + Note: The specific hook class to register should not use 'type' and 'priority' arguments during initialization. """ diff --git a/mmcv/runner/hooks/ema.py b/mmcv/runner/hooks/ema.py index 15c7e6808..6ed77b84e 100644 --- a/mmcv/runner/hooks/ema.py +++ b/mmcv/runner/hooks/ema.py @@ -13,8 +13,8 @@ class EMAHook(Hook): .. math:: - \text{Xema\_{t+1}} = (1 - \text{momentum}) \times - \text{Xema\_{t}} + \text{momentum} \times X_t + Xema\_{t+1} = (1 - \text{momentum}) \times + Xema\_{t} + \text{momentum} \times X_t Args: momentum (float): The momentum used for updating ema parameter. diff --git a/mmcv/runner/hooks/logger/neptune.py b/mmcv/runner/hooks/logger/neptune.py index 7a38772b0..abd83ccfc 100644 --- a/mmcv/runner/hooks/logger/neptune.py +++ b/mmcv/runner/hooks/logger/neptune.py @@ -12,19 +12,21 @@ class NeptuneLoggerHook(LoggerHook): Args: init_kwargs (dict): a dict contains the initialization keys as below: + - project (str): Name of a project in a form of - namespace/project_name. If None, the value of - NEPTUNE_PROJECT environment variable will be taken. - - api_token (str): User’s API token. - If None, the value of NEPTUNE_API_TOKEN environment - variable will be taken. Note: It is strongly recommended - to use NEPTUNE_API_TOKEN environment variable rather than - placing your API token in plain text in your source code. - - name (str, optional, default is 'Untitled'): Editable name of - the run. Name is displayed in the run's Details and in - Runs table as a column. - Check https://docs.neptune.ai/api-reference/neptune#init for - more init arguments. + namespace/project_name. If None, the value of NEPTUNE_PROJECT + environment variable will be taken. + - api_token (str): User’s API token. If None, the value of + NEPTUNE_API_TOKEN environment variable will be taken. Note: It is + strongly recommended to use NEPTUNE_API_TOKEN environment + variable rather than placing your API token in plain text in your + source code. + - name (str, optional, default is 'Untitled'): Editable name of the + run. Name is displayed in the run's Details and in Runs table as + a column. + + Check https://docs.neptune.ai/api-reference/neptune#init for more + init arguments. interval (int): Logging interval (every k iterations). ignore_last (bool): Ignore the log of last iterations in each epoch if less than `interval`. diff --git a/mmcv/utils/config.py b/mmcv/utils/config.py index c71377c07..5b9dfbb5a 100644 --- a/mmcv/utils/config.py +++ b/mmcv/utils/config.py @@ -344,7 +344,7 @@ class Config: config str. Only py/yml/yaml/json type are supported now! Returns: - obj:`Config`: Config obj. + :obj:`Config`: Config obj. """ if file_format not in ['.py', '.json', '.yaml', '.yml']: raise IOError('Only py/yml/yaml/json type are supported now!') @@ -561,7 +561,7 @@ class Config: >>> assert cfg_dict == dict( ... model=dict(backbone=dict(depth=50, with_cp=True))) - # Merge list element + >>> # Merge list element >>> cfg = Config(dict(pipeline=[ ... dict(type='LoadImage'), dict(type='LoadAnnotations')])) >>> options = dict(pipeline={'0': dict(type='SelfLoadImage')}) diff --git a/mmcv/utils/path.py b/mmcv/utils/path.py index 7dab4b304..568081837 100644 --- a/mmcv/utils/path.py +++ b/mmcv/utils/path.py @@ -40,7 +40,7 @@ def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True): """Scan a directory to find the interested files. Args: - dir_path (str | obj:`Path`): Path of the directory. + dir_path (str | :obj:`Path`): Path of the directory. suffix (str | tuple(str), optional): File suffix that we are interested in. Default: None. recursive (bool, optional): If set to True, recursively scan the diff --git a/mmcv/utils/registry.py b/mmcv/utils/registry.py index fa9df39bc..44db8b99c 100644 --- a/mmcv/utils/registry.py +++ b/mmcv/utils/registry.py @@ -59,6 +59,7 @@ class Registry: """A registry to map strings to classes. Registered object could be built from registry. + Example: >>> MODELS = Registry('models') >>> @MODELS.register_module() @@ -128,16 +129,15 @@ class Registry: The name of the package where registry is defined will be returned. Example: - # in mmdet/models/backbone/resnet.py + >>> # in mmdet/models/backbone/resnet.py >>> MODELS = Registry('models') >>> @MODELS.register_module() >>> class ResNet: >>> pass The scope of ``ResNet`` will be ``mmdet``. - Returns: - scope (str): The inferred scope name. + str: The inferred scope name. """ # inspect.stack() trace where this function is called, the index-2 # indicates the frame where `infer_scope()` is called @@ -158,8 +158,8 @@ class Registry: None, 'ResNet' Return: - scope (str, None): The first scope. - key (str): The remaining key. + tuple[str | None, str]: The former element is the first scope of + the key, which can be ``None``. The latter is the remaining key. """ split_index = key.find('.') if split_index != -1: diff --git a/mmcv/utils/timer.py b/mmcv/utils/timer.py index 66d4a78a8..02e96e537 100644 --- a/mmcv/utils/timer.py +++ b/mmcv/utils/timer.py @@ -12,27 +12,26 @@ class TimerError(Exception): class Timer: """A flexible Timer class. - :Example: - - >>> import time - >>> import mmcv - >>> with mmcv.Timer(): - >>> # simulate a code block that will run for 1s - >>> time.sleep(1) - 1.000 - >>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'): - >>> # simulate a code block that will run for 1s - >>> time.sleep(1) - it takes 1.0 seconds - >>> timer = mmcv.Timer() - >>> time.sleep(0.5) - >>> print(timer.since_start()) - 0.500 - >>> time.sleep(0.5) - >>> print(timer.since_last_check()) - 0.500 - >>> print(timer.since_start()) - 1.000 + Examples: + >>> import time + >>> import mmcv + >>> with mmcv.Timer(): + >>> # simulate a code block that will run for 1s + >>> time.sleep(1) + 1.000 + >>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'): + >>> # simulate a code block that will run for 1s + >>> time.sleep(1) + it takes 1.0 seconds + >>> timer = mmcv.Timer() + >>> time.sleep(0.5) + >>> print(timer.since_start()) + 0.500 + >>> time.sleep(0.5) + >>> print(timer.since_last_check()) + 0.500 + >>> print(timer.since_start()) + 1.000 """ def __init__(self, start=True, print_tmpl=None): @@ -64,7 +63,8 @@ class Timer: def since_start(self): """Total time since the timer is started. - Returns (float): Time in seconds. + Returns: + float: Time in seconds. """ if not self._is_running: raise TimerError('timer is not running') @@ -77,7 +77,8 @@ class Timer: Either :func:`since_start` or :func:`since_last_check` is a checking operation. - Returns (float): Time in seconds. + Returns: + float: Time in seconds. """ if not self._is_running: raise TimerError('timer is not running') @@ -95,21 +96,20 @@ def check_time(timer_id): This method is suitable for running a task on a list of items. A timer will be registered when the method is called for the first time. - :Example: - - >>> import time - >>> import mmcv - >>> for i in range(1, 6): - >>> # simulate a code block - >>> time.sleep(i) - >>> mmcv.check_time('task1') - 2.000 - 3.000 - 4.000 - 5.000 + Examples: + >>> import time + >>> import mmcv + >>> for i in range(1, 6): + >>> # simulate a code block + >>> time.sleep(i) + >>> mmcv.check_time('task1') + 2.000 + 3.000 + 4.000 + 5.000 Args: - timer_id (str): Timer identifier. + str: Timer identifier. """ if timer_id not in _g_timers: _g_timers[timer_id] = Timer() diff --git a/mmcv/video/io.py b/mmcv/video/io.py index 30971a15a..8d9bf9d3a 100644 --- a/mmcv/video/io.py +++ b/mmcv/video/io.py @@ -50,15 +50,14 @@ class VideoReader: the second time, there is no need to decode again if it is stored in the cache. - :Example: - - >>> import mmcv - >>> v = mmcv.VideoReader('sample.mp4') - >>> len(v) # get the total frame number with `len()` - 120 - >>> for img in v: # v is iterable - >>> mmcv.imshow(img) - >>> v[5] # get the 6th frame + Examples: + >>> import mmcv + >>> v = mmcv.VideoReader('sample.mp4') + >>> len(v) # get the total frame number with `len()` + 120 + >>> for img in v: # v is iterable + >>> mmcv.imshow(img) + >>> v[5] # get the 6th frame """ def __init__(self, filename, cache_capacity=10): @@ -189,7 +188,7 @@ class VideoReader: Returns: ndarray or None: If the video is fresh, return None, otherwise - return the frame. + return the frame. """ if self._position == 0: return None diff --git a/mmcv/video/optflow.py b/mmcv/video/optflow.py index c246f5b47..2e6518900 100644 --- a/mmcv/video/optflow.py +++ b/mmcv/video/optflow.py @@ -242,7 +242,7 @@ def sparse_flow_from_bytes(content): Returns: Tuple(ndarray, ndarray): Loaded optical flow with the shape (H, W, 2) - and flow valid mask with the shape (H, W). + and flow valid mask with the shape (H, W). """ # nopa content = np.frombuffer(content, np.uint8)