修改空格等代码规范
parent
f83ff59c97
commit
009f347d64
|
@ -33,13 +33,13 @@
|
|||
<td>pytorch版本</td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td>95.54</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>paddle版本</td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td>95.61</td>
|
||||
</tr>
|
||||
</table>
|
||||
cifar10上,paddle版本的配置文件及训练好的模型如下表所示
|
||||
|
|
|
@ -14,8 +14,6 @@
|
|||
|
||||
import copy
|
||||
import importlib
|
||||
from pyexpat import features
|
||||
|
||||
import paddle.nn as nn
|
||||
from paddle.jit import to_static
|
||||
from paddle.static import InputSpec
|
||||
|
|
|
@ -235,4 +235,4 @@ def WideResNet(depth,
|
|||
num_classes=num_classes,
|
||||
proj=proj,
|
||||
low_dim=low_dim,
|
||||
**kwargs)
|
||||
**kwargs)
|
|
@ -30,8 +30,8 @@ Arch:
|
|||
name: WideResNet
|
||||
widen_factor: 8
|
||||
depth: 28
|
||||
dropout: 0 # CCSSL为 drop_rate
|
||||
num_classes: &sign_num_classes 100
|
||||
dropout: 0
|
||||
num_classes: 100
|
||||
low_dim: 64
|
||||
proj: true
|
||||
proj_after: false
|
||||
|
@ -59,14 +59,6 @@ UnLabelLoss:
|
|||
- SoftSupConLoss:
|
||||
weight: 1.0
|
||||
temperature: 0.07
|
||||
# - CCSSLLoss:
|
||||
# CELoss:
|
||||
# weight: 1.0
|
||||
# reduction: "none"
|
||||
# SoftSupConLoss:
|
||||
# weight: 1.0
|
||||
# temperature: 0.07
|
||||
# weight: 1.
|
||||
|
||||
Optimizer:
|
||||
name: Momentum
|
||||
|
@ -80,8 +72,8 @@ Optimizer:
|
|||
num_training_steps: 524800
|
||||
|
||||
DataLoader:
|
||||
mean: &sign_mean [0.5071, 0.4867, 0.4408]
|
||||
std: &sign_std [0.2675, 0.2565, 0.2761]
|
||||
mean: [0.5071, 0.4867, 0.4408]
|
||||
std: [0.2675, 0.2565, 0.2761]
|
||||
Train:
|
||||
dataset:
|
||||
name: CIFAR100SSL
|
||||
|
@ -99,11 +91,11 @@ DataLoader:
|
|||
padding_mode: "reflect"
|
||||
- ToTensor:
|
||||
- Normalize:
|
||||
mean: *sign_mean
|
||||
std: *sign_std
|
||||
mean: [0.5071, 0.4867, 0.4408]
|
||||
std: [0.2675, 0.2565, 0.2761]
|
||||
|
||||
sampler:
|
||||
name: DistributedBatchSampler # DistributedBatchSampler
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 16
|
||||
drop_last: true
|
||||
shuffle: true
|
||||
|
@ -111,8 +103,6 @@ DataLoader:
|
|||
num_workers: 4
|
||||
use_shared_memory: true
|
||||
|
||||
|
||||
|
||||
UnLabelTrain:
|
||||
dataset:
|
||||
name: CIFAR100SSL
|
||||
|
@ -129,8 +119,8 @@ DataLoader:
|
|||
padding_mode: 'reflect'
|
||||
- ToTensor:
|
||||
- Normalize:
|
||||
mean: *sign_mean
|
||||
std: *sign_std
|
||||
mean: [0.5071, 0.4867, 0.4408]
|
||||
std: [0.2675, 0.2565, 0.2761]
|
||||
|
||||
transform_s1:
|
||||
- RandomHorizontalFlip:
|
||||
|
@ -144,8 +134,8 @@ DataLoader:
|
|||
m: 10
|
||||
- ToTensor:
|
||||
- Normalize:
|
||||
mean: *sign_mean
|
||||
std: *sign_std
|
||||
mean: [0.5071, 0.4867, 0.4408]
|
||||
std: [0.2675, 0.2565, 0.2761]
|
||||
|
||||
transform_s2:
|
||||
- RandomResizedCrop:
|
||||
|
@ -163,12 +153,9 @@ DataLoader:
|
|||
- RandomGrayscale:
|
||||
p: 0.2
|
||||
- ToTensor:
|
||||
# - Normalize:
|
||||
# mean: *sign_mean
|
||||
# std: *sign_std
|
||||
|
||||
sampler:
|
||||
name: DistributedBatchSampler # DistributedBatchSampler
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 112
|
||||
drop_last: true
|
||||
shuffle: true
|
||||
|
@ -185,8 +172,8 @@ DataLoader:
|
|||
transform_ops:
|
||||
- ToTensor:
|
||||
- Normalize:
|
||||
mean: *sign_mean
|
||||
std: *sign_std
|
||||
mean: [0.5071, 0.4867, 0.4408]
|
||||
std: [0.2675, 0.2565, 0.2761]
|
||||
sampler:
|
||||
name: DistributedBatchSampler
|
||||
batch_size: 16
|
||||
|
@ -196,8 +183,6 @@ DataLoader:
|
|||
num_workers: 4
|
||||
use_shared_memory: true
|
||||
|
||||
|
||||
|
||||
Metric:
|
||||
Eval:
|
||||
- TopkAcc:
|
||||
|
|
|
@ -7,7 +7,7 @@ Global:
|
|||
eval_during_train: true
|
||||
eval_interval: 1
|
||||
epochs: 1024
|
||||
iter_per_epoch: 40
|
||||
iter_per_epoch: 1024
|
||||
print_batch_step: 20
|
||||
use_visualdl: false
|
||||
use_dali: false
|
||||
|
@ -196,8 +196,6 @@ DataLoader:
|
|||
num_workers: 4
|
||||
use_shared_memory: true
|
||||
|
||||
|
||||
|
||||
Metric:
|
||||
Eval:
|
||||
- TopkAcc:
|
||||
|
|
|
@ -51,13 +51,10 @@ from paddle.vision.transforms import Pad as Pad_paddle_vision
|
|||
from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler, FmixOperator
|
||||
from ppcls.data.preprocess.batch_ops.batch_operators import MixupCutmixHybrid
|
||||
|
||||
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
import random
|
||||
|
||||
|
||||
def transform(data, ops=[]):
|
||||
""" transform """
|
||||
for op in ops:
|
||||
|
@ -120,5 +117,4 @@ class TimmAutoAugment(RawTimmAutoAugment):
|
|||
if isinstance(img, Image.Image):
|
||||
img = np.asarray(img)
|
||||
|
||||
return img
|
||||
|
||||
return img
|
|
@ -1,7 +1,4 @@
|
|||
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import time
|
||||
from turtle import update
|
||||
import paddle
|
||||
|
@ -11,11 +8,11 @@ from ppcls.utils import profiler
|
|||
from paddle.nn import functional as F
|
||||
import numpy as np
|
||||
import paddle
|
||||
# from reprod_log import ReprodLogger
|
||||
|
||||
|
||||
def train_epoch_fixmatch_ccssl(engine, epoch_id, print_batch_step):
|
||||
|
||||
print(engine.model.state_dict().keys())
|
||||
assert 1==0
|
||||
tic = time.time()
|
||||
if not hasattr(engine, 'train_dataloader_iter'):
|
||||
engine.train_dataloader_iter = iter(engine.train_dataloader)
|
||||
|
@ -135,5 +132,4 @@ def get_loss(engine,
|
|||
loss_dict[k] = v
|
||||
loss_dict['loss'] = loss_dict_label['loss'] + unlabel_loss['loss']
|
||||
|
||||
return loss_dict, logits_x
|
||||
|
||||
return loss_dict, logits_x
|
|
@ -1,7 +1,16 @@
|
|||
"""
|
||||
CCSSL loss
|
||||
author: zhhike
|
||||
"""
|
||||
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import paddle
|
||||
import paddle.nn as nn
|
||||
|
@ -65,5 +74,4 @@ class SoftSupConLoss(nn.Layer):
|
|||
if reduction == 'mean':
|
||||
loss = loss.mean()
|
||||
|
||||
return {"SoftSupConLoss": loss}
|
||||
|
||||
return {"SoftSupConLoss": loss}
|
Loading…
Reference in New Issue