mirror of https://github.com/open-mmlab/mmocr.git
[Docs] Fix the misleading description in test.py (#908)
* [Docs] Fix misleading description in test.py * update docspull/922/head
parent
a9309d83b6
commit
4c57bd35ac
|
@ -25,21 +25,21 @@ CUDA_VISIBLE_DEVICES= python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [AR
|
|||
|
||||
:::
|
||||
|
||||
| ARGS | Type | Description |
|
||||
| ------------------ | --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `--out` | str | Output result file in pickle format. |
|
||||
| `--fuse-conv-bn` | bool | Path to the custom config of the selected det model. |
|
||||
| `--format-only` | bool | Format the output results without performing evaluation. It is useful when you want to format the results to a specific format and submit them to the test server. |
|
||||
| `--gpu-id` | int | GPU id to use. Only applicable to non-distributed training. |
|
||||
| `--eval` | 'hmean-ic13', 'hmean-iou', 'acc' | The evaluation metrics, which depends on the task. For text detection, the metric should be either 'hmean-ic13' or 'hmean-iou'. For text recognition, the metric should be 'acc'. |
|
||||
| `--show` | bool | Whether to show results. |
|
||||
| `--show-dir` | str | Directory where the output images will be saved. |
|
||||
| `--show-score-thr` | float | Score threshold (default: 0.3). |
|
||||
| `--gpu-collect` | bool | Whether to use gpu to collect results. |
|
||||
| `--tmpdir` | str | The tmp directory used for collecting results from multiple workers, available when gpu-collect is not specified. |
|
||||
| `--cfg-options` | str | Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into the config file. If the value to be overwritten is a list, it should be of the form of either key="[a,b]" or key=a,b. The argument also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]". Note that the quotation marks are necessary and that no white space is allowed. |
|
||||
| `--eval-options` | str | Custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function. |
|
||||
| `--launcher` | 'none', 'pytorch', 'slurm', 'mpi' | Options for job launcher. |
|
||||
| ARGS | Type | Description |
|
||||
| ------------------ | -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `--out` | str | Output result file in pickle format. |
|
||||
| `--fuse-conv-bn` | bool | Path to the custom config of the selected det model. |
|
||||
| `--format-only` | bool | Format the output results without performing evaluation. It is useful when you want to format the results to a specific format and submit them to the test server. |
|
||||
| `--gpu-id` | int | GPU id to use. Only applicable to non-distributed training. |
|
||||
| `--eval` | 'hmean-ic13', 'hmean-iou', 'acc', 'macro-f1' | The evaluation metrics. Options: 'hmean-ic13', 'hmean-iou' for text detection tasks, 'acc' for text recognition tasks, and 'macro-f1' for key information extraction tasks. |
|
||||
| `--show` | bool | Whether to show results. |
|
||||
| `--show-dir` | str | Directory where the output images will be saved. |
|
||||
| `--show-score-thr` | float | Score threshold (default: 0.3). |
|
||||
| `--gpu-collect` | bool | Whether to use gpu to collect results. |
|
||||
| `--tmpdir` | str | The tmp directory used for collecting results from multiple workers, available when gpu-collect is not specified. |
|
||||
| `--cfg-options` | str | Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into the config file. If the value to be overwritten is a list, it should be of the form of either key="[a,b]" or key=a,b. The argument also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]". Note that the quotation marks are necessary and that no white space is allowed. |
|
||||
| `--eval-options` | str | Custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function. |
|
||||
| `--launcher` | 'none', 'pytorch', 'slurm', 'mpi' | Options for job launcher. |
|
||||
|
||||
## Testing on Multiple GPUs
|
||||
|
||||
|
@ -51,13 +51,13 @@ You can use the following command to test a dataset with multiple GPUs.
|
|||
[PORT={PORT}] ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS]
|
||||
```
|
||||
|
||||
| Arguments | Type | Description |
|
||||
| --------- | ---- | -------------------------------------------------------------------------------- |
|
||||
| `PORT` | int | The master port that will be used by the machine with rank 0. Defaults to 29500. |
|
||||
| `CONFIG_FILE` | str | The path to config. |
|
||||
| `CHECKPOINT_FILE` | str | The path to the checkpoint. |
|
||||
| `GPU_NUM` | int | The number of GPUs to be used per node. Defaults to 8. |
|
||||
| `PY_ARGS` | str | Arguments to be parsed by `tools/test.py`. |
|
||||
| Arguments | Type | Description |
|
||||
| ----------------- | ---- | -------------------------------------------------------------------------------- |
|
||||
| `PORT` | int | The master port that will be used by the machine with rank 0. Defaults to 29500. |
|
||||
| `CONFIG_FILE` | str | The path to config. |
|
||||
| `CHECKPOINT_FILE` | str | The path to the checkpoint. |
|
||||
| `GPU_NUM` | int | The number of GPUs to be used per node. Defaults to 8. |
|
||||
| `PY_ARGS` | str | Arguments to be parsed by `tools/test.py`. |
|
||||
|
||||
For example,
|
||||
|
||||
|
@ -73,16 +73,16 @@ You can launch a task on multiple machines connected to the same network.
|
|||
NNODES=${NNODES} NODE_RANK=${NODE_RANK} PORT=${MASTER_PORT} MASTER_ADDR=${MASTER_ADDR} ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS]
|
||||
```
|
||||
|
||||
| Arguments | Type | Description |
|
||||
| --------------- | ---- | ----------------------------------------------------------------------------------------------------------- |
|
||||
| `NNODES` | int | The number of nodes.
|
||||
| `NODE_RANK` | int | The rank of current node.
|
||||
| `PORT` | int | The master port that will be used by rank 0 node. Defaults to 29500. |
|
||||
| `MASTER_ADDR` | str | The address of rank 0 node. Defaults to "127.0.0.1". |
|
||||
| `CONFIG_FILE` | str | The path to config. |
|
||||
| `CHECKPOINT_FILE` | str | The path to the checkpoint. |
|
||||
| `GPU_NUM` | int | The number of GPUs to be used per node. Defaults to 8. |
|
||||
| `PY_ARGS` | str | Arguments to be parsed by `tools/test.py`. |
|
||||
| Arguments | Type | Description |
|
||||
| ----------------- | ---- | -------------------------------------------------------------------- |
|
||||
| `NNODES` | int | The number of nodes. |
|
||||
| `NODE_RANK` | int | The rank of current node. |
|
||||
| `PORT` | int | The master port that will be used by rank 0 node. Defaults to 29500. |
|
||||
| `MASTER_ADDR` | str | The address of rank 0 node. Defaults to "127.0.0.1". |
|
||||
| `CONFIG_FILE` | str | The path to config. |
|
||||
| `CHECKPOINT_FILE` | str | The path to the checkpoint. |
|
||||
| `GPU_NUM` | int | The number of GPUs to be used per node. Defaults to 8. |
|
||||
| `PY_ARGS` | str | Arguments to be parsed by `tools/test.py`. |
|
||||
|
||||
:::{note}
|
||||
MMOCR relies on torch.distributed package for distributed testing. Find more information at PyTorch’s [launch utility](https://pytorch.org/docs/stable/distributed.html#launch-utility).
|
||||
|
|
|
@ -48,9 +48,9 @@ def parse_args():
|
|||
'--eval',
|
||||
type=str,
|
||||
nargs='+',
|
||||
help='The evaluation metrics, which depends on the dataset, e.g.,'
|
||||
'"bbox", "seg", "proposal" for COCO, and "mAP", "recall" for'
|
||||
'PASCAL VOC.')
|
||||
help='The evaluation metrics. Options: \'hmean-ic13\', \'hmean-iou'
|
||||
'\' for text detection tasks, \'acc\' for text recognition tasks, and '
|
||||
'\'macro-f1\' for key information extraction tasks.')
|
||||
parser.add_argument('--show', action='store_true', help='Show results.')
|
||||
parser.add_argument(
|
||||
'--show-dir', help='Directory where the output images will be saved.')
|
||||
|
|
Loading…
Reference in New Issue