mirror of
https://github.com/open-mmlab/mmpretrain.git
synced 2025-06-03 14:59:18 +08:00
* hivit added * Update hivit.py * Update hivit.py * Add files via upload * Update __init__.py * Add files via upload * Update __init__.py * Add files via upload * Update hivit.py * Add files via upload * Add files via upload * Add files via upload * Add files via upload * Update itpn.py * Add files via upload * Update __init__.py * Update mae_hivit-base-p16.py * Delete mim_itpn-base-p16.py * Add files via upload * Update itpn_hivit-base-p16.py * Update itpn.py * Update hivit.py * Update __init__.py * Update mae.py * Delete hivit.py * Update __init__.py * Delete configs/itpn directory * Add files via upload * Add files via upload * Delete configs/hivit directory * Add files via upload * refactor and add metafile and readme * update clip * add ut * update ut * update * update docstring * update model.rst --------- Co-authored-by: 田运杰 <48153283+sunsmarterjie@users.noreply.github.com>
34 lines
884 B
Python
34 lines
884 B
Python
# model settings
|
|
model = dict(
|
|
type='iTPN',
|
|
backbone=dict(
|
|
type='iTPNHiViT',
|
|
arch='base',
|
|
reconstruction_type='pixel',
|
|
mask_ratio=0.75),
|
|
neck=dict(
|
|
type='iTPNPretrainDecoder',
|
|
num_patches=196,
|
|
patch_size=16,
|
|
in_chans=3,
|
|
embed_dim=512,
|
|
decoder_embed_dim=512,
|
|
decoder_depth=6,
|
|
decoder_num_heads=16,
|
|
mlp_ratio=4.,
|
|
reconstruction_type='pixel',
|
|
# transformer pyramid
|
|
fpn_dim=256,
|
|
fpn_depth=2,
|
|
num_outs=3,
|
|
),
|
|
head=dict(
|
|
type='MAEPretrainHead',
|
|
norm_pix=True,
|
|
patch_size=16,
|
|
loss=dict(type='PixelReconstructionLoss', criterion='L2')),
|
|
init_cfg=[
|
|
dict(type='Xavier', layer='Linear', distribution='uniform'),
|
|
dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0)
|
|
])
|