2024-10-24 15:00:38 +00:00
<!doctype html>
< html lang = "zh" class = "no-js" >
< head >
< meta charset = "utf-8" >
< meta name = "viewport" content = "width=device-width,initial-scale=1" >
< meta name = "description" content = "Awesome multilingual OCR toolkits based on PaddlePaddle (practical ultra lightweight OCR system, support 80+ languages recognition, provide data annotation and synthesis tools, support training and deployment among server, mobile, embedded and IoT devices)" >
< meta name = "author" content = "PaddleOCR PMC" >
< link rel = "canonical" href = "https://paddlepaddle.github.io/PaddleOCR/v2.9/FAQ.html" >
< link rel = "prev" href = "datasets/kie_datasets.html" >
< link rel = "next" href = "community/community_contribution.html" >
< link rel = "icon" href = "static/images/logo.jpg" >
2024-12-12 07:39:44 +00:00
< meta name = "generator" content = "mkdocs-1.6.1, mkdocs-material-9.5.48" >
2024-10-24 15:00:38 +00:00
< title > FAQ - PaddleOCR 文档< / title >
2024-12-12 07:39:44 +00:00
< link rel = "stylesheet" href = "assets/stylesheets/main.6f8fc17f.min.css" >
2024-10-24 15:00:38 +00:00
< link rel = "stylesheet" href = "assets/stylesheets/palette.06af60db.min.css" >
< style > : r o o t { - - m d - a d m o n i t i o n - i c o n - - n o t e : u r l ( ' d a t a : i m a g e / s v g + x m l ; c h a r s e t = u t f - 8 , < s v g x m l n s = " h t t p : / / w w w . w 3 . o r g / 2 0 0 0 / s v g " v i e w B o x = " 0 0 1 6 1 6 " > < p a t h d = " M 1 7 . 7 7 5 V 2 . 7 5 C 1 1 . 7 8 4 1 . 7 8 4 1 2 . 7 5 1 h 5 . 0 2 5 c . 4 6 4 0 . 9 1 . 1 8 4 1 . 2 3 8 . 5 1 3 l 6 . 2 5 6 . 2 5 a 1 . 7 5 1 . 7 5 0 0 1 0 2 . 4 7 4 l - 5 . 0 2 6 5 . 0 2 6 a 1 . 7 5 1 . 7 5 0 0 1 - 2 . 4 7 4 0 l - 6 . 2 5 - 6 . 2 5 A 1 . 7 5 1 . 7 5 0 0 1 1 7 . 7 7 5 m 1 . 5 0 c 0 . 0 6 6 . 0 2 6 . 1 3 . 0 7 3 . 1 7 7 l 6 . 2 5 6 . 2 5 a . 2 5 . 2 5 0 0 0 . 3 5 4 0 l 5 . 0 2 5 - 5 . 0 2 5 a . 2 5 . 2 5 0 0 0 0 - . 3 5 4 l - 6 . 2 5 - 6 . 2 5 a . 2 5 . 2 5 0 0 0 - . 1 7 7 - . 0 7 3 H 2 . 7 5 a . 2 5 . 2 5 0 0 0 - . 2 5 . 2 5 Z M 6 5 a 1 1 0 1 1 0 2 1 1 0 0 1 0 - 2 " / > < / s v g > ' ) ; - - m d - a d m o n i t i o n - i c o n - - a b s t r a c t : u r l ( ' d a t a : i m a g e / s v g + x m l ; c h a r s e t = u t f - 8 , < s v g x m l n s = " h t t p : / / w w w . w 3 . o r g / 2 0 0 0 / s v g " v i e w B o x = " 0 0 1 6 1 6 " > < p a t h d = " M 2 . 5 1 . 7 5 v 1 1 . 5 c 0 . 1 3 8 . 1 1 2 . 2 5 . 2 5 . 2 5 h 3 . 1 7 a . 7 5 . 7 5 0 0 1 0 1 . 5 H 2 . 7 5 A 1 . 7 5 1 . 7 5 0 0 1 1 1 3 . 2 5 V 1 . 7 5 C 1 . 7 8 4 1 . 7 8 4 0 2 . 7 5 0 h 8 . 5 C 1 2 . 2 1 6 0 1 3 . 7 8 4 1 3 1 . 7 5 v 7 . 7 3 6 a . 7 5 . 7 5 0 0 1 - 1 . 5 0 V 1 . 7 5 a . 2 5 . 2 5 0 0 0 - . 2 5 - . 2 5 h - 8 . 5 a . 2 5 . 2 5 0 0 0 - . 2 5 . 2 5 m 1 3 . 2 7 4 9 . 5 3 7 z l - 4 . 5 5 7 4 . 4 5 a . 7 5 . 7 5 0 0 1 - 1 . 0 5 5 - . 0 0 8 l - 1 . 9 4 3 - 1 . 9 5 a . 7 5 . 7 5 0 0 1 1 . 0 6 2 - 1 . 0 5 8 l 1 . 4 1 9 1 . 4 2 5 4 . 0 2 6 - 3 . 9 3 2 a . 7 5 . 7 5 0 1 1 1 . 0 4 8 1 . 0 7 4 M 4 . 7 5 4 h 4 . 5 a . 7 5 . 7 5 0 0 1 0 1 . 5 h - 4 . 5 a . 7 5 . 7 5 0 0 1 0 - 1 . 5 M 4 7 . 7 5 A . 7 5 . 7 5 0 0 1 4 . 7 5 7 h 2 a . 7 5 . 7 5 0 0 1 0 1 . 5 h - 2 A . 7 5 . 7 5 0 0 1 4 7 . 7 5 " / > < / s v g > ' ) ; - - m d - a d m o n i t i o n - i c o n - - i n f o : u r l ( ' d a t a : i m a g e / s v g + x m l ; c h a r s e t = u t f - 8 , < s v g x m l n s = " h t t p : / / w w w . w 3 . o r g / 2 0 0 0 / s v g " v i e w B o x = " 0 0 1 6 1 6 " > < p a t h d = " M 0 8 a 8 8 0 1 1 1 6 0 A 8 8 0 0 1 0 8 m 8 - 6 . 5 a 6 . 5 6 . 5 0 1 0 0 1 3 6 . 5 6 . 5 0 0 0 0 - 1 3 M 6 . 5 7 . 7 5 A . 7 5 . 7 5 0 0 1 7 . 2 5 7 h 1 a . 7 5 . 7 5 0 0 1 . 7 5 . 7 5 v 2 . 7 5 h . 2 5 a . 7 5 . 7 5 0 0 1 0 1 . 5 h - 2 a . 7 5 . 7 5 0 0 1 0 - 1 . 5 h . 2 5 v - 2 h - . 2 5 a . 7 5 . 7 5 0 0 1 - . 7 5 - . 7 5 M 8 6 a 1 1 0 1 1 0 - 2 1 1 0 0 1 0 2 " / > < / s v g > ' ) ; - - m d - a d m o n i t i o n - i c o n - - t i p : u r l ( ' d a t a : i m a g e / s v g + x m l ; c h a r s e t = u t f - 8 , < s v g x m l n s = " h t t p : / / w w w . w 3 . o r g / 2 0 0 0 / s v g " v i e w B o x = " 0 0 1 6 1 6 " > < p a t h d = " M 3 . 4 9 9 . 7 5 a . 7 5 . 7 5 0 0 1 1 . 5 0 v . 9 9 6 C 5 . 9 2 . 9 0 3 6 . 7 9 3 3 . 6 5 7 . 6 6 2 4 . 3 7 6 l . 2 4 . 2 0 2 c - . 0 3 6 - . 6 9 4 . 0 5 5 - 1 . 4 2 2 . 4 2 6 - 2 . 1 6 3 C 9 . 1 . 8 7 3 1 0 . 7 9 4 - . 0 4 5 1 2 . 6 2 2 . 2 6 1 4 . 4 0 8 . 5 5 8 1 6 1 . 9 4 1 6 4 . 2 5 c 0 1 . 2 7 8 - . 9 5 4 2 . 5 7 5 - 2 . 4 4 2 . 7 3 4 l . 1 4 6 . 5 0 8 . 0 6 5 . 2 2 c . 2 0 3 . 7 0 1 . 4 1 2 1 . 4 5 5 . 4 7 6 2 . 2 2 6 . 1 4 2 1 . 7 0 7 - . 4 3 . 0 3 - 1 . 4 8 7 3 . 8 9 8 C 1 1 . 7 1 4 1 4 . 6 7 1 1 0 . 2 7 1 5 8 . 7 5 1 5 h - 6 a . 7 5 . 7 5 0 0 1 0 - 1 . 5 h 1 . 3 7 6 a 4 . 5 4 . 5 0 0 1 - . 5 6 3 - 1 . 1 9 1 3 . 8 4 3 . 8 4 0 0 1 - . 0 5 - 2 . 0 6 3 4 . 6 5 4 . 6 5 0 0 1 - 2 . 0 2 5 - . 2 9 3 . 7 5 . 7 5 0 0 1 . 5 2 5 - 1 . 4 0 6 c 1 . 3 5 7 . 5 0 7 2 . 3 7 6 - . 0 0 6 2 . 6 9 8 - . 3 1 8 l . 0 0 9 - . 0 1 a . 7 4 7 . 7 4 7 0 0 1 1 . 0 6 0 . 7 5 . 7 5 0 0 1 - . 0 1 2 1 . 0 7 4 c - . 9 1 2 . 9 2 - . 9 9 2 1 . 8 3 5 - . 7 6 8 2 . 5 8 6 . 2 2 1 . 7 4 . 7 4 5 1 . 3 3 7 1 . 1 9 6 1 . 6 2 1 H 8 . 7 5 c 1 . 3 4 3 0 2 . 3 9 8 - . 2 9 6 3 . 0 7 4 - . 8 3 6 . 6 3 5 - . 5 0 7 1 . 0 3 6 - 1 . 3 1 . 9 2 8 - 2 . 6 0 2 - . 0 5 - . 6 0 3 - . 2 1 6 - 1 . 2 2 4 - . 4 2 2 - 1 . 9 3 l - . 0 6 4 - . 2 2 1 c - . 1 2 - . 4 0 7 - . 2 4 6 - . 8 4 - . 3 5 3 - 1 . 2 9 a 2 . 4 2 . 4 0 0 1 - . 5 0 7 - . 4 4 1 3 . 1 3 . 1 0 0 1 - . 6 3 3 - 1 . 2 4 8 . 7 5 . 7 5 0 0 1 1 . 4 5 5 - . 3 6 4 c . 0 4 6 . 1 8 5 . 1 4 4 . 4 3 6 . 3 1 . 6 2 7 . 1 4 6 . 1 6 8 . 3 5 3 . 3 0 5 . 7 1 2 . 3 0 5 . 7 3 8 0 1 . 2 5 - . 6 1 5 1 . 2 5 - 1 . 2 5 0 - 1 . 4 7 - . 9 5 - 2 . 3 1 5 - 2 . 1 2 3 - 2 . 5 1 - 1 . 1 7 2 - . 1 9 6 - 2 . 2 2 7 . 3 8 7 - 2 . 7 0 6 1 . 3 4 5 - . 4 6 . 9 2 - . 2 7 1 . 7 7 4 . 0 1 9 3 . 0 6 2 l . 0 4 2 . 1 9 . 0 1 . 0 5 c . 3 4 8 . 4 4 3 . 6 6 6 . 9 4 9 . 9 4 1 . 5 5 3 a . 7 5 . 7 5 0 1 1 - 1 . 3 6 5 . 6 2 c - . 5 5 3 - 1 . 2 1 7 - 1 . 3 2 - 1 . 9 4 - 2 . 3 - 2 . 7 6 8 L 6 . 7 5 . 5 2 7 c - . 8 1 4 - . 6 8 - 1 . 7 5 - 1 . 4 6 2 - 2 . 6 9 2 - 2 . 6 1 9 a 3 . 7 3 . 7 0 0 0 - 1 . 0 2 3 . 8 8 c - . 4 0 6 . 4 9 5 - . 6 6 3 1 . 0 3 6 - . 7 2 2 1 . 5 0 8 . 1 1 6 . 1 2 2 . 3 0 6 . 2 1 . 5 9 1 . 2 3 9 . 3 8 8 . 0 3 8 . 7 9 7 - . 0 6 1 . 0 3 2 - . 1 9 a . 7 5 . 7 5 0 0 1 . 7 2 8 1 . 3 1 c - . 5 1 5 . 2 8 7 - 1 . 2 3 . 4 3 9 - 1 . 9 0 6 . 3 7 3 - . 6 8 2 - . 0 6 7 - 1 . 4 7 3 - . 3 8 - 1 . 8 7 9 - 1 . 1 9 3 L . 7 5 5 . 6 7 7 V 5 . 5 c 0 - . 9 8 4 . 4 8 - 1 . 9 4 1 . 0 7 7 - 2 . 6 6 4 . 4 6 - . 5 5 9 1 . 0 5 - 1 . 0 5 5 1 . 6 7 3 - 1 . 3 5 3 z " / > < / s v g > ' ) ; - - m d - a d m o n i t i o n - i c o n - - s u c c e s s : u r l ( ' d a t a : i m a g e / s v g + x m l ; c h a r s e t = u t f - 8 , < s v g x m l n s = " h t t p : / / w w w . w 3 . o r g / 2 0 0 0 / s v g " v i e w B o x = " 0 0 1 6 1 6 " > < p a t h d = " M 1 3 . 7 8 4 . 2 2 a . 7 5 . 7 5 0 0 1 0 1 . 0 6 l - 7 . 2 5 7 . 2 5 a . 7 5 . 7 5 0 0 1 - 1 . 0 6 0 L 2 . 2 2 9 . 2 8 a . 7 5 . 7 5 0 0 1 . 0 1 8 - 1 . 0 4 2 . 7 5 . 7 5 0 0 1 1 . 0 4 2 - . 0 1 8 L 6 1 0 . 9 4 l 6 . 7 2 - 6 . 7 2 a . 7 5 . 7 5 0 0 1 1 . 0 6 0 " / > < / s v g > ' ) ; - - m d - a d m o n i t i o n - i c o n - - q u e s t i o n : u r l ( ' d a t a : i m a g e / s v g + x m l ; c h a r s e t = u t f - 8 , < s v g x m l n s = " h t t p : / / w w w . w 3 . o r g / 2 0 0 0 / s v g " v i e w B o x = " 0 0 1 6 1 6 " > < p a t h d = " M 0 8 a 8 8 0 1 1 1 6 0 A 8 8 0 0 1 0 8 m 8 - 6 . 5 a 6 . 5 6 . 5 0 1 0 0 1 3 6 . 5 6 . 5 0 0 0 0 - 1 3 M 6 . 9 2 6 . 0 8 5 h . 0 0 1 a . 7 4 9 . 7 4 9 0 1 1 - 1 . 3 4 2 - . 6 7 c . 1 6 9 - . 3 3 9 . 4 3 6 - . 7 0 1 . 8 4 9 - . 9 7 7 C 6 . 8 4 5 4 . 1 6 7 . 3 6 9 4 8 4 a 2 . 7 6 2 . 7 6 0 0 1 1 . 6 3 7 . 5 2 5 c . 5 0 3 . 3 7 7 . 8 6 3 . 9 6 5 . 8 6 3 1 . 7 2 5 0 . 4 4 8 - . 1 1 5 . 8 3 - . 3 2 9 1 . 1 5 - . 2 0 5 . 3 0 7 - . 4 7 . 5 1 3 - . 6 9 2 . 6 6 2 - . 1 0 9 . 0 7 2 - . 2 2 . 1 3 8 - . 3 1 3 . 1 9 5 l - . 0 0 6 . 0 0 4 a 6 6 0 0 0 - . 2 6 . 1 6 1 1 0 0 0 - . 2 7 6 . 2 4 5 . 7 5 . 7 5 0 0 1 - 1 . 2 4 8 - . 8 3 2 c . 1 8 4 - . 2 6 4 . 4 2 - . 4 8 9 . 6 9 2 - . 6 6 1 q . 1 5 4 - . 1 . 3 1 3 - . 1 9 5 l . 0 0 7 - . 0 0 4 c . 1 - . 0 6 1 . 1 8 2 - . 1 1 . 2 5 8 - . 1 6 1 a 1 1 0 0 0 . 2 7 7 - . 2 4 5 C 8 . 9 6 6 . 5 1 4 9 6 . 4 2 7 9 6 . 2 5 a . 6 1 . 6 1 0 0 0 - . 2 6 2 - . 5 2 5 A 1 . 2 7 1 . 2 7 0 0 0 8 5 . 5 c - . 3 6 9 0 - . 5 9 5 . 0 9 - . 7 4 . 1 8 7 a 1 1 0 0 0 - . 3 4 . 3 9 8 M 9 1 1 a 1 1 0 1 1 - 2 0 1 1 0 0 1 2 0 " / > < / s v g > ' ) ; - - m d -
2024-12-12 07:39:44 +00:00
< style > . md-tag . md-tag--default-tag { --md-tag-icon : url ( 'data:image/svg+xml;charset=utf-8,<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.7.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2024 Fonticons, Inc.--><path d="M0 80v149.5c0 17 6.7 33.3 18.7 45.3l176 176c25 25 65.5 25 90.5 0l133.5-133.5c25-25 25-65.5 0-90.5l-176-176c-12-12-28.3-18.7-45.3-18.7L48 32C21.5 32 0 53.5 0 80m112 32a32 32 0 1 1 0 64 32 32 0 1 1 0-64"/></svg>' ) ; } . md-tag . md-tag--hardware-tag { --md-tag-icon : url ( 'data:image/svg+xml;charset=utf-8,<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.7.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2024 Fonticons, Inc.--><path d="M176 24c0-13.3-10.7-24-24-24s-24 10.7-24 24v40c-35.3 0-64 28.7-64 64H24c-13.3 0-24 10.7-24 24s10.7 24 24 24h40v56H24c-13.3 0-24 10.7-24 24s10.7 24 24 24h40v56H24c-13.3 0-24 10.7-24 24s10.7 24 24 24h40c0 35.3 28.7 64 64 64v40c0 13.3 10.7 24 24 24s24-10.7 24-24v-40h56v40c0 13.3 10.7 24 24 24s24-10.7 24-24v-40h56v40c0 13.3 10.7 24 24 24s24-10.7 24-24v-40c35.3 0 64-28.7 64-64h40c13.3 0 24-10.7 24-24s-10.7-24-24-24h-40v-56h40c13.3 0 24-10.7 24-24s-10.7-24-24-24h-40v-56h40c13.3 0 24-10.7 24-24s-10.7-24-24-24h-40c0-35.3-28.7-64-64-64V24c0-13.3-10.7-24-24-24s-24 10.7-24 24v40h-56V24c0-13.3-10.7-24-24-24s-24 10.7-24 24v40h-56zm-16 104h192c17.7 0 32 14.3 32 32v192c0 17.7-14.3 32-32 32H160c-17.7 0-32-14.3-32-32V160c0-17.7 14.3-32 32-32m192 32H160v192h192z"/></svg>' ) ; } . md-tag . md-tag--software-tag { --md-tag-icon : url ( 'data:image/svg+xml;charset=utf-8,<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 640 512"><!--! Font Awesome Free 6.7.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2024 Fonticons, Inc.--><path d="M64 96c0-35.3 28.7-64 64-64h384c35.3 0 64 28.7 64 64v256h-64V96H128v256H64zM0 403.2C0 392.6 8.6 384 19.2 384h601.6c10.6 0 19.2 8.6 19.2 19.2 0 42.4-34.4 76.8-76.8 76.8H76.8C34.4 480 0 445.6 0 403.2M281 209l-31 31 31 31c9.4 9.4 9.4 24.6 0 33.9s-24.6 9.4-33.9 0l-48-48c-9.4-9.4-9.4-24.6 0-33.9l48-48c9.4-9.4 24.6-9.4 33.9 0s9.4 24.6 0 33.9zm112-34 48 48c9.4 9.4 9.4 24.6 0 33.9l-48 48c-9.4 9.4-24.6 9.4-33.9 0s-9.4-24.6 0-33.9l31-31-31-31c-9.4-9.4-9.4-24.6 0-33.9s24.6-9.4 33.9 0z"/></svg>' ) ; } < / style >
2024-10-24 15:00:38 +00:00
< script src = "https://unpkg.com/iframe-worker/shim" > < / script >
< link rel = "preconnect" href = "https://fonts.gstatic.com" crossorigin >
< link rel = "stylesheet" href = "https://fonts.googleapis.com/css?family=Roboto:300,300i,400,400i,700,700i%7CRoboto+Mono:400,400i,700,700i&display=fallback" >
< style > : root { --md-text-font : "Roboto" ; --md-code-font : "Roboto Mono" } < / style >
< link rel = "stylesheet" href = "https://unpkg.com/katex@0/dist/katex.min.css" >
< script > _ _md _scope = new URL ( "." , location ) , _ _md _hash = e => [ ... e ] . reduce ( ( ( e , _ ) => ( e << 5 ) - e + _ . charCodeAt ( 0 ) ) , 0 ) , _ _md _get = ( e , _ = localStorage , t = _ _md _scope ) => JSON . parse ( _ . getItem ( t . pathname + "." + e ) ) , _ _md _set = ( e , _ , t = localStorage , a = _ _md _scope ) => { try { t . setItem ( a . pathname + "." + e , JSON . stringify ( _ ) ) } catch ( e ) { } } < / script >
< / head >
< body dir = "ltr" data-md-color-scheme = "default" data-md-color-primary = "indigo" data-md-color-accent = "indigo" >
< input class = "md-toggle" data-md-toggle = "drawer" type = "checkbox" id = "__drawer" autocomplete = "off" >
< input class = "md-toggle" data-md-toggle = "search" type = "checkbox" id = "__search" autocomplete = "off" >
< label class = "md-overlay" for = "__drawer" > < / label >
< div data-md-component = "skip" >
< a href = "#1" class = "md-skip" >
跳转至
< / a >
< / div >
< div data-md-component = "announce" >
< / div >
< div data-md-color-scheme = "default" data-md-component = "outdated" hidden >
< aside class = "md-banner md-banner--warning" >
< div class = "md-banner__inner md-grid md-typeset" >
You're not viewing the latest version.
< a href = "../." >
< strong > Click here to go to latest.< / strong >
< / a >
< / div >
< script > var el = document . querySelector ( "[data-md-component=outdated]" ) , outdated = _ _md _get ( "__outdated" , sessionStorage ) ; ! 0 === outdated && el && ( el . hidden = ! 1 ) < / script >
< / aside >
< / div >
< header class = "md-header md-header--shadow md-header--lifted" data-md-component = "header" >
< nav class = "md-header__inner md-grid" aria-label = "页眉" >
< a href = "index.html" title = "PaddleOCR 文档" class = "md-header__button md-logo" aria-label = "PaddleOCR 文档" data-md-component = "logo" >
< img src = "static/images/logo.jpg" alt = "logo" >
< / a >
< label class = "md-header__button md-icon" for = "__drawer" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M3 6h18v2H3zm0 5h18v2H3zm0 5h18v2H3z" / > < / svg >
< / label >
< div class = "md-header__title" data-md-component = "header-title" >
< div class = "md-header__ellipsis" >
< div class = "md-header__topic" >
< span class = "md-ellipsis" >
PaddleOCR 文档
< / span >
< / div >
< div class = "md-header__topic" data-md-component = "header-topic" >
< span class = "md-ellipsis" >
FAQ
< / span >
< / div >
< / div >
< / div >
< form class = "md-header__option" data-md-component = "palette" >
< input class = "md-option" data-md-color-media = "(prefers-color-scheme: light)" data-md-color-scheme = "default" data-md-color-primary = "indigo" data-md-color-accent = "indigo" aria-label = "Switch to dark mode" type = "radio" name = "__palette" id = "__palette_0" >
< label class = "md-header__button md-icon" title = "Switch to dark mode" for = "__palette_1" hidden >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M12 8a4 4 0 0 0-4 4 4 4 0 0 0 4 4 4 4 0 0 0 4-4 4 4 0 0 0-4-4m0 10a6 6 0 0 1-6-6 6 6 0 0 1 6-6 6 6 0 0 1 6 6 6 6 0 0 1-6 6m8-9.31V4h-4.69L12 .69 8.69 4H4v4.69L.69 12 4 15.31V20h4.69L12 23.31 15.31 20H20v-4.69L23.31 12z" / > < / svg >
< / label >
< input class = "md-option" data-md-color-media = "(prefers-color-scheme: dark)" data-md-color-scheme = "slate" data-md-color-primary = "black" data-md-color-accent = "indigo" aria-label = "Switch to system preference" type = "radio" name = "__palette" id = "__palette_1" >
< label class = "md-header__button md-icon" title = "Switch to system preference" for = "__palette_0" hidden >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M12 18c-.89 0-1.74-.2-2.5-.55C11.56 16.5 13 14.42 13 12s-1.44-4.5-3.5-5.45C10.26 6.2 11.11 6 12 6a6 6 0 0 1 6 6 6 6 0 0 1-6 6m8-9.31V4h-4.69L12 .69 8.69 4H4v4.69L.69 12 4 15.31V20h4.69L12 23.31 15.31 20H20v-4.69L23.31 12z" / > < / svg >
< / label >
< / form >
< script > var palette = _ _md _get ( "__palette" ) ; if ( palette && palette . color ) { if ( "(prefers-color-scheme)" === palette . color . media ) { var media = matchMedia ( "(prefers-color-scheme: light)" ) , input = document . querySelector ( media . matches ? "[data-md-color-media='(prefers-color-scheme: light)']" : "[data-md-color-media='(prefers-color-scheme: dark)']" ) ; palette . color . media = input . getAttribute ( "data-md-color-media" ) , palette . color . scheme = input . getAttribute ( "data-md-color-scheme" ) , palette . color . primary = input . getAttribute ( "data-md-color-primary" ) , palette . color . accent = input . getAttribute ( "data-md-color-accent" ) } for ( var [ key , value ] of Object . entries ( palette . color ) ) document . body . setAttribute ( "data-md-color-" + key , value ) } < / script >
< div class = "md-header__option" >
< div class = "md-select" >
< button class = "md-header__button md-icon" aria-label = "选择当前语言" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "m12.87 15.07-2.54-2.51.03-.03A17.5 17.5 0 0 0 14.07 6H17V4h-7V2H8v2H1v2h11.17C11.5 7.92 10.44 9.75 9 11.35 8.07 10.32 7.3 9.19 6.69 8h-2c.73 1.63 1.73 3.17 2.98 4.56l-5.09 5.02L4 19l5-5 3.11 3.11zM18.5 10h-2L12 22h2l1.12-3h4.75L21 22h2zm-2.62 7 1.62-4.33L19.12 17z" / > < / svg >
< / button >
< div class = "md-select__inner" >
< ul class = "md-select__list" >
< li class = "md-select__item" >
< a href = "FAQ.html" hreflang = "zh" class = "md-select__link" >
简体中文
< / a >
< / li >
< li class = "md-select__item" >
< a href = "en/FAQ.html" hreflang = "en" class = "md-select__link" >
English
< / a >
< / li >
< li class = "md-select__item" >
< a href = "https://github.com/PaddlePaddle/PaddleOCR/discussions/13374" hreflang = "null" class = "md-select__link" >
Help translating
< / a >
< / li >
< / ul >
< / div >
< / div >
< / div >
< label class = "md-header__button md-icon" for = "__search" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.52 6.52 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5" / > < / svg >
< / label >
< div class = "md-search" data-md-component = "search" role = "dialog" >
< label class = "md-search__overlay" for = "__search" > < / label >
< div class = "md-search__inner" role = "search" >
< form class = "md-search__form" name = "search" >
< input type = "text" class = "md-search__input" name = "query" aria-label = "搜索" placeholder = "搜索" autocapitalize = "off" autocorrect = "off" autocomplete = "off" spellcheck = "false" data-md-component = "search-query" required >
< label class = "md-search__icon md-icon" for = "__search" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.52 6.52 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5" / > < / svg >
2024-12-12 07:39:44 +00:00
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 320 512" > <!-- ! Font Awesome Free 6.7.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2024 Fonticons, Inc. --> < path d = "M41.4 233.4c-12.5 12.5-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L109.3 256l137.3-137.4c12.5-12.5 12.5-32.8 0-45.3s-32.8-12.5-45.3 0l-160 160z" / > < / svg >
2024-10-24 15:00:38 +00:00
< / label >
< nav class = "md-search__options" aria-label = "查找" >
< a href = "javascript:void(0)" class = "md-search__icon md-icon" title = "分享" aria-label = "分享" data-clipboard data-clipboard-text = "" data-md-component = "search-share" tabindex = "-1" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M18 16.08c-.76 0-1.44.3-1.96.77L8.91 12.7c.05-.23.09-.46.09-.7s-.04-.47-.09-.7l7.05-4.11c.54.5 1.25.81 2.04.81a3 3 0 0 0 3-3 3 3 0 0 0-3-3 3 3 0 0 0-3 3c0 .24.04.47.09.7L8.04 9.81C7.5 9.31 6.79 9 6 9a3 3 0 0 0-3 3 3 3 0 0 0 3 3c.79 0 1.5-.31 2.04-.81l7.12 4.15c-.05.21-.08.43-.08.66 0 1.61 1.31 2.91 2.92 2.91s2.92-1.3 2.92-2.91A2.92 2.92 0 0 0 18 16.08" / > < / svg >
< / a >
< button type = "reset" class = "md-search__icon md-icon" title = "清空当前内容" aria-label = "清空当前内容" tabindex = "-1" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M19 6.41 17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12z" / > < / svg >
< / button >
< / nav >
< div class = "md-search__suggest" data-md-component = "search-suggest" > < / div >
< / form >
< div class = "md-search__output" >
< div class = "md-search__scrollwrap" tabindex = "0" data-md-scrollfix >
< div class = "md-search-result" data-md-component = "search-result" >
< div class = "md-search-result__meta" >
正在初始化搜索引擎
< / div >
< ol class = "md-search-result__list" role = "presentation" > < / ol >
< / div >
< / div >
< / div >
< / div >
< / div >
< div class = "md-header__source" >
< a href = "https://github.com/PaddlePaddle/PaddleOCR" title = "前往仓库" class = "md-source" data-md-component = "source" >
< div class = "md-source__icon md-icon" >
2024-12-12 07:39:44 +00:00
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 496 512" > <!-- ! Font Awesome Free 6.7.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2024 Fonticons, Inc. --> < path d = "M165.9 397.4c0 2-2.3 3.6-5.2 3.6-3.3.3-5.6-1.3-5.6-3.6 0-2 2.3-3.6 5.2-3.6 3-.3 5.6 1.3 5.6 3.6m-31.1-4.5c-.7 2 1.3 4.3 4.3 4.9 2.6 1 5.6 0 6.2-2s-1.3-4.3-4.3-5.2c-2.6-.7-5.5.3-6.2 2.3m44.2-1.7c-2.9.7-4.9 2.6-4.6 4.9.3 2 2.9 3.3 5.9 2.6 2.9-.7 4.9-2.6 4.6-4.6-.3-1.9-3-3.2-5.9-2.9M244.8 8C106.1 8 0 113.3 0 252c0 110.9 69.8 205.8 169.5 239.2 12.8 2.3 17.3-5.6 17.3-12.1 0-6.2-.3-40.4-.3-61.4 0 0-70 15-84.7-29.8 0 0-11.4-29.1-27.8-36.6 0 0-22.9-15.7 1.6-15.4 0 0 24.9 2 38.6 25.8 21.9 38.6 58.6 27.5 72.9 20.9 2.3-16 8.8-27.1 16-33.7-55.9-6.2-112.3-14.3-112.3-110.5 0-27.5 7.6-41.3 23.6-58.9-2.6-6.5-11.1-33.3 2.6-67.9 20.9-6.5 69 27 69 27 20-5.6 41.5-8.5 62.8-8.5s42.8 2.9 62.8 8.5c0 0 48.1-33.6 69-27 13.7 34.7 5.2 61.4 2.6 67.9 16 17.7 25.8 31.5 25.8 58.9 0 96.5-58.9 104.2-114.8 110.5 9.2 7.9 17 22.9 17 46.4 0 33.7-.3 75.4-.3 83.6 0 6.5 4.6 14.4 17.3 12.1C428.2 457.8 496 362.9 496 252 496 113.3 383.5 8 244.8 8M97.2 352.9c-1.3 1-1 3.3.7 5.2 1.6 1.6 3.9 2.3 5.2 1 1.3-1 1-3.3-.7-5.2-1.6-1.6-3.9-2.3-5.2-1m-10.8-8.1c-.7 1.3.3 2.9 2.3 3.9 1.6 1 3.6.7 4.3-.7.7-1.3-.3-2.9-2.3-3.9-2-.6-3.6-.3-4.3.7m32.4 35.6c-1.6 1.3-1 4.3 1.3 6.2 2.3 2.3 5.2 2.6 6.5 1 1.3-1.3.7-4.3-1.3-6.2-2.2-2.3-5.2-2.6-6.5-1m-11.4-14.7c-1.6 1-1.6 3.6 0 5.9s4.3 3.3 5.6 2.3c1.6-1.3 1.6-3.9 0-6.2-1.4-2.3-4-3.3-5.6-2" / > < / svg >
2024-10-24 15:00:38 +00:00
< / div >
< div class = "md-source__repository" >
PaddlePaddle/PaddleOCR
< / div >
< / a >
< / div >
< / nav >
< nav class = "md-tabs" aria-label = "标签" data-md-component = "tabs" >
< div class = "md-grid" >
< ul class = "md-tabs__list" >
< li class = "md-tabs__item" >
< a href = "index.html" class = "md-tabs__link" >
Home
< / a >
< / li >
< li class = "md-tabs__item" >
< a href = "quick_start.html" class = "md-tabs__link" >
快速开始
< / a >
< / li >
< li class = "md-tabs__item" >
< a href = "update.html" class = "md-tabs__link" >
近期更新
< / a >
< / li >
< li class = "md-tabs__item" >
< a href = "paddlex/overview.html" class = "md-tabs__link" >
低代码全流程开发
< / a >
< / li >
< li class = "md-tabs__item" >
< a href = "model/index.html" class = "md-tabs__link" >
模型
< / a >
< / li >
< li class = "md-tabs__item" >
< a href = "ppocr/overview.html" class = "md-tabs__link" >
PP-OCR 文本检测识别
< / a >
< / li >
< li class = "md-tabs__item" >
< a href = "ppstructure/overview.html" class = "md-tabs__link" >
PP-Structure文档分析
< / a >
< / li >
< li class = "md-tabs__item" >
< a href = "algorithm/overview.html" class = "md-tabs__link" >
前沿算法与模型
< / a >
< / li >
< li class = "md-tabs__item" >
< a href = "applications/overview.html" class = "md-tabs__link" >
场景应用
< / a >
< / li >
< li class = "md-tabs__item" >
< a href = "data_anno_synth/overview.html" class = "md-tabs__link" >
数据标注与合成
< / a >
< / li >
< li class = "md-tabs__item" >
< a href = "datasets/datasets.html" class = "md-tabs__link" >
数据集
< / a >
< / li >
< li class = "md-tabs__item md-tabs__item--active" >
< a href = "FAQ.html" class = "md-tabs__link" >
FAQ
< / a >
< / li >
< li class = "md-tabs__item" >
< a href = "community/community_contribution.html" class = "md-tabs__link" >
社区
< / a >
< / li >
< / ul >
< / div >
< / nav >
< / header >
< div class = "md-container" data-md-component = "container" >
< main class = "md-main" data-md-component = "main" >
< div class = "md-main__inner md-grid" >
< div class = "md-sidebar md-sidebar--primary" data-md-component = "sidebar" data-md-type = "navigation" hidden >
< div class = "md-sidebar__scrollwrap" >
< div class = "md-sidebar__inner" >
< nav class = "md-nav md-nav--primary md-nav--lifted" aria-label = "导航栏" data-md-level = "0" >
< label class = "md-nav__title" for = "__drawer" >
< a href = "index.html" title = "PaddleOCR 文档" class = "md-nav__button md-logo" aria-label = "PaddleOCR 文档" data-md-component = "logo" >
< img src = "static/images/logo.jpg" alt = "logo" >
< / a >
PaddleOCR 文档
< / label >
< div class = "md-nav__source" >
< a href = "https://github.com/PaddlePaddle/PaddleOCR" title = "前往仓库" class = "md-source" data-md-component = "source" >
< div class = "md-source__icon md-icon" >
2024-12-12 07:39:44 +00:00
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 496 512" > <!-- ! Font Awesome Free 6.7.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2024 Fonticons, Inc. --> < path d = "M165.9 397.4c0 2-2.3 3.6-5.2 3.6-3.3.3-5.6-1.3-5.6-3.6 0-2 2.3-3.6 5.2-3.6 3-.3 5.6 1.3 5.6 3.6m-31.1-4.5c-.7 2 1.3 4.3 4.3 4.9 2.6 1 5.6 0 6.2-2s-1.3-4.3-4.3-5.2c-2.6-.7-5.5.3-6.2 2.3m44.2-1.7c-2.9.7-4.9 2.6-4.6 4.9.3 2 2.9 3.3 5.9 2.6 2.9-.7 4.9-2.6 4.6-4.6-.3-1.9-3-3.2-5.9-2.9M244.8 8C106.1 8 0 113.3 0 252c0 110.9 69.8 205.8 169.5 239.2 12.8 2.3 17.3-5.6 17.3-12.1 0-6.2-.3-40.4-.3-61.4 0 0-70 15-84.7-29.8 0 0-11.4-29.1-27.8-36.6 0 0-22.9-15.7 1.6-15.4 0 0 24.9 2 38.6 25.8 21.9 38.6 58.6 27.5 72.9 20.9 2.3-16 8.8-27.1 16-33.7-55.9-6.2-112.3-14.3-112.3-110.5 0-27.5 7.6-41.3 23.6-58.9-2.6-6.5-11.1-33.3 2.6-67.9 20.9-6.5 69 27 69 27 20-5.6 41.5-8.5 62.8-8.5s42.8 2.9 62.8 8.5c0 0 48.1-33.6 69-27 13.7 34.7 5.2 61.4 2.6 67.9 16 17.7 25.8 31.5 25.8 58.9 0 96.5-58.9 104.2-114.8 110.5 9.2 7.9 17 22.9 17 46.4 0 33.7-.3 75.4-.3 83.6 0 6.5 4.6 14.4 17.3 12.1C428.2 457.8 496 362.9 496 252 496 113.3 383.5 8 244.8 8M97.2 352.9c-1.3 1-1 3.3.7 5.2 1.6 1.6 3.9 2.3 5.2 1 1.3-1 1-3.3-.7-5.2-1.6-1.6-3.9-2.3-5.2-1m-10.8-8.1c-.7 1.3.3 2.9 2.3 3.9 1.6 1 3.6.7 4.3-.7.7-1.3-.3-2.9-2.3-3.9-2-.6-3.6-.3-4.3.7m32.4 35.6c-1.6 1.3-1 4.3 1.3 6.2 2.3 2.3 5.2 2.6 6.5 1 1.3-1.3.7-4.3-1.3-6.2-2.2-2.3-5.2-2.6-6.5-1m-11.4-14.7c-1.6 1-1.6 3.6 0 5.9s4.3 3.3 5.6 2.3c1.6-1.3 1.6-3.9 0-6.2-1.4-2.3-4-3.3-5.6-2" / > < / svg >
2024-10-24 15:00:38 +00:00
< / div >
< div class = "md-source__repository" >
PaddlePaddle/PaddleOCR
< / div >
< / a >
< / div >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "index.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
Home
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "quick_start.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
快速开始
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "update.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
近期更新
< / span >
< / a >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_4" >
< label class = "md-nav__link" for = "__nav_4" id = "__nav_4_label" tabindex = "0" >
< span class = "md-ellipsis" >
低代码全流程开发
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "1" aria-labelledby = "__nav_4_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_4" >
< span class = "md-nav__icon md-icon" > < / span >
低代码全流程开发
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "paddlex/overview.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
概述
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "paddlex/quick_start.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
快速开始
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_5" >
< div class = "md-nav__link md-nav__container" >
< a href = "model/index.html" class = "md-nav__link " >
< span class = "md-ellipsis" >
模型
< / span >
< / a >
< label class = "md-nav__link " for = "__nav_5" id = "__nav_5_label" tabindex = "0" >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< / div >
< nav class = "md-nav" data-md-level = "1" aria-labelledby = "__nav_5_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_5" >
< span class = "md-nav__icon md-icon" > < / span >
模型
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_5_2" >
< label class = "md-nav__link" for = "__nav_5_2" id = "__nav_5_2_label" tabindex = "0" >
< span class = "md-ellipsis" >
多硬件安装飞桨
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_5_2_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_5_2" >
< span class = "md-nav__icon md-icon" > < / span >
多硬件安装飞桨
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "model/hardware/install_other_devices.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
多硬件安装飞桨
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "model/hardware/supported_models.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
支持硬件列表
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_6" >
< label class = "md-nav__link" for = "__nav_6" id = "__nav_6_label" tabindex = "0" >
< span class = "md-ellipsis" >
PP-OCR 文本检测识别
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "1" aria-labelledby = "__nav_6_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_6" >
< span class = "md-nav__icon md-icon" > < / span >
PP-OCR 文本检测识别
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "ppocr/overview.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
概述
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/quick_start.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
快速开始
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/installation.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
快速安装
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/visualization.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
效果展示
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/environment.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
运行环境
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/model_list.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
模型库
< / span >
< / a >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_6_7" >
< label class = "md-nav__link" for = "__nav_6_7" id = "__nav_6_7_label" tabindex = "0" >
< span class = "md-ellipsis" >
模型训练
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_6_7_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_6_7" >
< span class = "md-nav__icon md-icon" > < / span >
模型训练
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "ppocr/model_train/training.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
基本概念
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/model_train/detection.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
文本检测
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/model_train/recognition.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
文本识别
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/model_train/angle_class.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
文本方向分类器
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/model_train/kie.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
关键信息提取
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/model_train/finetune.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
模型微调
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_6_8" >
< label class = "md-nav__link" for = "__nav_6_8" id = "__nav_6_8_label" tabindex = "0" >
< span class = "md-ellipsis" >
模型压缩
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_6_8_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_6_8" >
< span class = "md-nav__icon md-icon" > < / span >
模型压缩
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "ppocr/model_compress/quantization.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
模型量化
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/model_compress/prune.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
模型裁剪
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/model_compress/knowledge_distillation.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
知识蒸馏
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_6_9" >
< div class = "md-nav__link md-nav__container" >
< a href = "ppocr/infer_deploy/index.html" class = "md-nav__link " >
< span class = "md-ellipsis" >
推理部署
< / span >
< / a >
< label class = "md-nav__link " for = "__nav_6_9" id = "__nav_6_9_label" tabindex = "0" >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< / div >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_6_9_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_6_9" >
< span class = "md-nav__icon md-icon" > < / span >
推理部署
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "ppocr/infer_deploy/python_infer.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
基于Python预测引擎推理
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/infer_deploy/cpp_infer.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
基于C++预测引擎推理
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/infer_deploy/windows_vs2019_build.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
Visual Studio 2019 Community CMake 编译指南
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/infer_deploy/paddle_server.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
服务化部署
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/infer_deploy/android_demo.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
Android部署
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/infer_deploy/Jetson_infer.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
Jetson部署
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/infer_deploy/lite.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
端侧部署
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/infer_deploy/paddle_js.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
网页前端部署
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/infer_deploy/paddle2onnx.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
Paddle2ONNX模型转化与预测
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/infer_deploy/paddle_cloud.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
云上飞桨部署工具
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/infer_deploy/benchmark.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
Benchmark
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_6_10" >
< label class = "md-nav__link" for = "__nav_6_10" id = "__nav_6_10_label" tabindex = "0" >
< span class = "md-ellipsis" >
博客
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_6_10_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_6_10" >
< span class = "md-nav__icon md-icon" > < / span >
博客
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "ppocr/blog/PP-OCRv3_introduction.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
PP-OCRv3技术报告
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/blog/PP-OCRv4_introduction.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
PP-OCRv4技术报告
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/blog/whl.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
paddleocr package使用说明
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/blog/multi_languages.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
多语言模型
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/blog/ocr_book.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
动手学OCR
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/blog/enhanced_ctc_loss.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
Enhanced CTC Loss
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/blog/slice.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
切片操作
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/blog/inference_args.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
PaddleOCR模型推理参数解释
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/blog/distributed_training.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
分布式训练
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/blog/clone.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
项目克隆
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/blog/config.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
配置文件内容与生成
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppocr/blog/customize.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
如何生产自定义超轻量模型?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_7" >
< label class = "md-nav__link" for = "__nav_7" id = "__nav_7_label" tabindex = "0" >
< span class = "md-ellipsis" >
PP-Structure文档分析
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "1" aria-labelledby = "__nav_7_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_7" >
< span class = "md-nav__icon md-icon" > < / span >
PP-Structure文档分析
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "ppstructure/overview.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
概述
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppstructure/quick_start.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
快速开始
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppstructure/models_list.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
模型库
< / span >
< / a >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_7_4" >
< label class = "md-nav__link" for = "__nav_7_4" id = "__nav_7_4_label" tabindex = "0" >
< span class = "md-ellipsis" >
模型训练
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_7_4_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_7_4" >
< span class = "md-nav__icon md-icon" > < / span >
模型训练
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "ppstructure/model_train/training.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
基本概念
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppstructure/model_train/train_layout.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
版面分析
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppstructure/model_train/train_table.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
表格识别
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppstructure/model_train/recovery_to_doc.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
版面恢复
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppstructure/model_train/train_kie.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
关键信息提取
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_7_5" >
< div class = "md-nav__link md-nav__container" >
< a href = "ppstructure/infer_deploy/index.html" class = "md-nav__link " >
< span class = "md-ellipsis" >
推理部署
< / span >
< / a >
< label class = "md-nav__link " for = "__nav_7_5" id = "__nav_7_5_label" tabindex = "0" >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< / div >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_7_5_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_7_5" >
< span class = "md-nav__icon md-icon" > < / span >
推理部署
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "ppstructure/infer_deploy/python_infer.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
基于Python预测引擎推理
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppstructure/infer_deploy/cpp_infer.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
基于C++预测引擎推理
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppstructure/infer_deploy/paddle_server.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
服务化部署
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_7_6" >
< label class = "md-nav__link" for = "__nav_7_6" id = "__nav_7_6_label" tabindex = "0" >
< span class = "md-ellipsis" >
博客
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_7_6_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_7_6" >
< span class = "md-nav__icon md-icon" > < / span >
博客
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "ppstructure/blog/return_word_pos.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
返回识别位置
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "ppstructure/blog/how_to_do_kie.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
怎样完成基于图像数据的信息抽取任务
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_8" >
< label class = "md-nav__link" for = "__nav_8" id = "__nav_8_label" tabindex = "0" >
< span class = "md-ellipsis" >
前沿算法与模型
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "1" aria-labelledby = "__nav_8_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_8" >
< span class = "md-nav__icon md-icon" > < / span >
前沿算法与模型
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "algorithm/overview.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
概述
< / span >
< / a >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_8_2" >
< label class = "md-nav__link" for = "__nav_8_2" id = "__nav_8_2_label" tabindex = "0" >
< span class = "md-ellipsis" >
文本检测算法
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_8_2_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_8_2" >
< span class = "md-nav__icon md-icon" > < / span >
文本检测算法
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "algorithm/text_detection/algorithm_det_db.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
DB与DB++
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_detection/algorithm_det_east.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
EAST
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_detection/algorithm_det_sast.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
SAST
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_detection/algorithm_det_psenet.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
PSENet
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_detection/algorithm_det_fcenet.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
FCENet
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_detection/algorithm_det_drrg.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
DRRG
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_detection/algorithm_det_ct.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
CT
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_8_3" >
< label class = "md-nav__link" for = "__nav_8_3" id = "__nav_8_3_label" tabindex = "0" >
< span class = "md-ellipsis" >
文本识别算法
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_8_3_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_8_3" >
< span class = "md-nav__icon md-icon" > < / span >
文本识别算法
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_crnn.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
CRNN
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_rosetta.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
Rosetta
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_starnet.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
STAR-Net
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_rare.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
RARE
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_srn.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
SRN
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_nrtr.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
NRTR
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_sar.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
SAR
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_seed.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
SEED
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_svtr.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
SVTR
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_svtrv2.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
SVTRv2
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_vitstr.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
ViTSTR
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_abinet.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
ABINet
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_visionlan.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
VisionLAN
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_spin.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
SPIN
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_robustscanner.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
RobustScanner
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_rfl.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
RFL
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_parseq.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
ParseQ
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_cppd.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
CPPD
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/text_recognition/algorithm_rec_satrn.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
SATRN
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_8_4" >
< label class = "md-nav__link" for = "__nav_8_4" id = "__nav_8_4_label" tabindex = "0" >
< span class = "md-ellipsis" >
文本超分辨率算法
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_8_4_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_8_4" >
< span class = "md-nav__icon md-icon" > < / span >
文本超分辨率算法
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "algorithm/super_resolution/algorithm_sr_gestalt.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
Text Gestalt
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/super_resolution/algorithm_sr_telescope.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
Text Telescope
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_8_5" >
< label class = "md-nav__link" for = "__nav_8_5" id = "__nav_8_5_label" tabindex = "0" >
< span class = "md-ellipsis" >
公式识别算法
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_8_5_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_8_5" >
< span class = "md-nav__icon md-icon" > < / span >
公式识别算法
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "algorithm/formula_recognition/algorithm_rec_can.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
CAN
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/formula_recognition/algorithm_rec_latex_ocr.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
LaTeX-OCR
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_8_6" >
< label class = "md-nav__link" for = "__nav_8_6" id = "__nav_8_6_label" tabindex = "0" >
< span class = "md-ellipsis" >
端到端OCR算法
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_8_6_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_8_6" >
< span class = "md-nav__icon md-icon" > < / span >
端到端OCR算法
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "algorithm/end_to_end/algorithm_e2e_pgnet.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
PGNet
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_8_7" >
< label class = "md-nav__link" for = "__nav_8_7" id = "__nav_8_7_label" tabindex = "0" >
< span class = "md-ellipsis" >
表格识别算法
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_8_7_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_8_7" >
< span class = "md-nav__icon md-icon" > < / span >
表格识别算法
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "algorithm/table_recognition/algorithm_table_master.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
TableMaster
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/table_recognition/algorithm_table_slanet.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
TableSLANet
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_8_8" >
< label class = "md-nav__link" for = "__nav_8_8" id = "__nav_8_8_label" tabindex = "0" >
< span class = "md-ellipsis" >
关键信息抽取算法
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_8_8_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_8_8" >
< span class = "md-nav__icon md-icon" > < / span >
关键信息抽取算法
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "algorithm/kie/algorithm_kie_vi_layoutxlm.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
VI-LayoutXLM
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/kie/algorithm_kie_layoutxlm.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
LayoutLM
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/kie/algorithm_kie_sdmgr.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
SDMGR
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "algorithm/add_new_algorithm.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
使用PaddleOCR架构添加新算法
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_9" >
< label class = "md-nav__link" for = "__nav_9" id = "__nav_9_label" tabindex = "0" >
< span class = "md-ellipsis" >
场景应用
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "1" aria-labelledby = "__nav_9_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_9" >
< span class = "md-nav__icon md-icon" > < / span >
场景应用
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "applications/overview.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
概述
< / span >
< / a >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_9_2" >
< label class = "md-nav__link" for = "__nav_9_2" id = "__nav_9_2_label" tabindex = "0" >
< span class = "md-ellipsis" >
通用
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_9_2_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_9_2" >
< span class = "md-nav__icon md-icon" > < / span >
通用
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "applications/%E9%AB%98%E7%B2%BE%E5%BA%A6%E4%B8%AD%E6%96%87%E8%AF%86%E5%88%AB%E6%A8%A1%E5%9E%8B.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
高精度中文场景文本识别模型SVTR
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "applications/%E6%89%8B%E5%86%99%E6%96%87%E5%AD%97%E8%AF%86%E5%88%AB.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
手写体识别
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_9_3" >
< label class = "md-nav__link" for = "__nav_9_3" id = "__nav_9_3_label" tabindex = "0" >
< span class = "md-ellipsis" >
制造
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_9_3_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_9_3" >
< span class = "md-nav__icon md-icon" > < / span >
制造
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "applications/%E5%85%89%E5%8A%9F%E7%8E%87%E8%AE%A1%E6%95%B0%E7%A0%81%E7%AE%A1%E5%AD%97%E7%AC%A6%E8%AF%86%E5%88%AB.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
数码管识别
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "applications/%E6%B6%B2%E6%99%B6%E5%B1%8F%E8%AF%BB%E6%95%B0%E8%AF%86%E5%88%AB.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
液晶屏读数识别
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "applications/%E5%8C%85%E8%A3%85%E7%94%9F%E4%BA%A7%E6%97%A5%E6%9C%9F%E8%AF%86%E5%88%AB.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
包装生产日期
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "applications/PCB%E5%AD%97%E7%AC%A6%E8%AF%86%E5%88%AB.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
PCB文字识别
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_9_4" >
< label class = "md-nav__link" for = "__nav_9_4" id = "__nav_9_4_label" tabindex = "0" >
< span class = "md-ellipsis" >
金融
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_9_4_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_9_4" >
< span class = "md-nav__icon md-icon" > < / span >
金融
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "applications/%E5%A4%9A%E6%A8%A1%E6%80%81%E8%A1%A8%E5%8D%95%E8%AF%86%E5%88%AB.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
表单VQA
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "applications/%E5%8F%91%E7%A5%A8%E5%85%B3%E9%94%AE%E4%BF%A1%E6%81%AF%E6%8A%BD%E5%8F%96.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
增值税发票
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "applications/%E5%8D%B0%E7%AB%A0%E5%BC%AF%E6%9B%B2%E6%96%87%E5%AD%97%E8%AF%86%E5%88%AB.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
印章检测与识别
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "applications/%E5%BF%AB%E9%80%9F%E6%9E%84%E5%BB%BA%E5%8D%A1%E8%AF%81%E7%B1%BBOCR.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
通用卡证识别
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "applications/%E6%89%AB%E6%8F%8F%E5%90%88%E5%90%8C%E5%85%B3%E9%94%AE%E4%BF%A1%E6%81%AF%E6%8F%90%E5%8F%96.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
合同比对
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_9_5" >
< label class = "md-nav__link" for = "__nav_9_5" id = "__nav_9_5_label" tabindex = "0" >
< span class = "md-ellipsis" >
交通
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "2" aria-labelledby = "__nav_9_5_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_9_5" >
< span class = "md-nav__icon md-icon" > < / span >
交通
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "applications/%E8%BD%BB%E9%87%8F%E7%BA%A7%E8%BD%A6%E7%89%8C%E8%AF%86%E5%88%AB.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
车牌识别
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_10" >
< label class = "md-nav__link" for = "__nav_10" id = "__nav_10_label" tabindex = "0" >
< span class = "md-ellipsis" >
数据标注与合成
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "1" aria-labelledby = "__nav_10_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_10" >
< span class = "md-nav__icon md-icon" > < / span >
数据标注与合成
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "data_anno_synth/overview.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
概述
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "data_anno_synth/data_annotation.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
其它数据标注工具
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "data_anno_synth/data_synthesis.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
其它数据合成工具
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_11" >
< label class = "md-nav__link" for = "__nav_11" id = "__nav_11_label" tabindex = "0" >
< span class = "md-ellipsis" >
数据集
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "1" aria-labelledby = "__nav_11_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_11" >
< span class = "md-nav__icon md-icon" > < / span >
数据集
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "datasets/datasets.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
通用中英文OCR数据集
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "datasets/handwritten_datasets.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
手写中文OCR数据集
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "datasets/vertical_and_multilingual_datasets.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
垂类多语言OCR数据集
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "datasets/layout_datasets.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
版面分析数据集
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "datasets/table_datasets.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
表格识别数据集
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "datasets/kie_datasets.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
关键信息提取数据集
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--active" >
< input class = "md-nav__toggle md-toggle" type = "checkbox" id = "__toc" >
< label class = "md-nav__link md-nav__link--active" for = "__toc" >
< span class = "md-ellipsis" >
FAQ
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< a href = "FAQ.html" class = "md-nav__link md-nav__link--active" >
< span class = "md-ellipsis" >
FAQ
< / span >
< / a >
< nav class = "md-nav md-nav--secondary" aria-label = "目录" >
< label class = "md-nav__title" for = "__toc" >
< span class = "md-nav__icon md-icon" > < / span >
目录
< / label >
< ul class = "md-nav__list" data-md-component = "toc" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "#1" class = "md-nav__link" >
< span class = "md-ellipsis" >
1. 通用问题
< / span >
< / a >
< nav class = "md-nav" aria-label = "1. 通用问题" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#11" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.1 检测
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.1 检测" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 基于深度学习的文字检测方法有哪几种?各有什么优缺点?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#12" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.2 识别
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.2 识别" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-paddleocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR提供的文本识别算法包括哪些?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-crnn" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文本识别方法CRNN关键技术有哪些?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-ctcattention" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 对于中文行文本识别, CTC和Attention哪种更优?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-tps" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 弯曲形变的文字识别需要怎么处理? TPS应用场景是什么, 是否好用?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#13" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.3 端到端
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.3 端到端" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-pgnetdbcrnnpgnet" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 请问端到端的pgnet相比于DB+CRNN在准确率上有优势吗? 或者是pgnet最擅长的场景是什么场景呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-ocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 目前OCR普遍是二阶段, 端到端的方案在业界落地情况如何?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 二阶段的端到端的场景文本识别方法的不足有哪些?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-aaai-2021pgnet" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: AAAI 2021最新的端到端场景文本识别PGNet算法有什么特点?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#14" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.4 评估方法
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.4 评估方法" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-ocr_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: OCR领域常用的评估指标是什么?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#15" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.5 垂类场景实现思路
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.5 垂类场景实现思路" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q_2" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 背景干扰的文字( 如印章盖到落款上, 需要识别落款或者印章中的文字) , 如何识别?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_3" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 请问对于图片中的密集文字, 有什么好的处理办法吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_4" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文本行较紧密的情况下如何准确检测?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_5" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 对于一些在识别时稍微模糊的文本, 有没有一些图像增强的方式?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_6" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 低像素文字或者字号比较小的文字有什么超分辨率方法吗
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_7" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 对于一些尺寸较大的文档类图片, 在检测时会有较多的漏检, 怎么避免这种漏检的问题呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qdb" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文档场景中, 使用DB模型会出现整行漏检的情况应该怎么解决?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_8" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 弯曲文本(如略微形变的文档图像)漏检问题
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_9" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何识别文字比较长的文本?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_10" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何识别带空格的英文行文本图像?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qopencvtps" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 弯曲文本有试过opencv的TPS进行弯曲校正吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_11" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何识别招牌或者广告图中的艺术字?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_12" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 印章如何识别
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_13" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 使用预训练模型进行预测,对于特定字符识别识别效果较差,怎么解决?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_14" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 在使用训练好的识别模型进行预测的时候,发现有很多重复的字,这个怎么解决呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-ok90" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 图像正常识别出来的文字是OK的, 旋转90度后识别出来的结果就比较差, 有什么方法可以优化?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_15" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何识别竹简上的古文?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_16" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 只想要识别票据中的部分片段,重新训练它的话,只需要训练文本检测模型就可以了吗?问文本识别,方向分类还是用原来的模型这样可以吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocr_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何用PaddleOCR识别视频中的文字?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_17" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 相机采集的图像为四通道,应该如何处理?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_18" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 遇到中英文识别模型不支持的字符,该如何对模型做微调?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_19" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 特殊字符( 例如一些标点符号) 识别效果不好怎么办?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_20" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 单张图上多语种并存识别( 如单张图印刷体和手写文字并存) , 应该如何处理?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_21" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 多语言的字典里是混合了不同的语种,这个是有什么讲究吗?统一到一个字典里会对精度造成多大的损失?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_22" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 类似泰语这样的小语种, 部分字会占用两个字符甚至三个字符, 请问如何制作字典
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-ppocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 想把简历上的文字识别出来后, 能够把关系一一对应起来, 比如姓名和它后面的名字组成一对, 籍贯、邮箱、学历等等都和各自的内容关联起来, 这个应该如何处理, PPOCR目前支持吗?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#16" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.6 训练过程与模型调优
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.6 训练过程与模型调优" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-batch_size" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 增大batch_size模型训练速度没有明显提升
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_23" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 预测时提示图像过大,显存、内存溢出了,应该如何处理?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-9070" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 识别训练时, 训练集精度已经到达90了, 但验证集精度一直在70, 涨不上去怎么办?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#17" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.7 补充资料
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.7 补充资料" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-ocr_2" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 对于小白如何快速入门中文OCR项目实践?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#2-paddleocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
2. PaddleOCR实战问题
< / span >
< / a >
< nav class = "md-nav" aria-label = "2. PaddleOCR实战问题" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#21-paddleocr-repo" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.1 PaddleOCR repo
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.1 PaddleOCR repo" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-paddleocr-developdygraph" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR develop分支和dygraph分支的区别?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocrocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR与百度的其他OCR产品有什么区别?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#22" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.2 安装环境
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.2 安装环境" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#qoserror-winerror-126-mac-pro-python-34-shapely-import" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: OSError: [WinError 126] 找不到指定的模块。mac pro python 3.4 shapely import 问题
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddlepaddlegpu-osenvironcuda_visible_devices" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddlePaddle怎么指定GPU运行 os.environ["CUDA_VISIBLE_DEVICES"]这种不生效
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocrwindowsmac" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR是否支持在Windows或Mac系统上运行?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#23" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.3 数据量说明
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.3 数据量说明" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#qocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 简单的对于精度要求不高的OCR任务, 数据集需要准备多少张呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocrgpuepoch" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 请问PaddleOCR项目中的中文超轻量和通用模型用了哪些数据集? 训练多少样本, gpu什么配置, 跑了多少个epoch, 大概跑了多久?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q30w500w" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 训练文字识别模型, 真实数据有30w, 合成数据有500w, 需要做样本均衡吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_24" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 当训练数据量少时,如何获取更多的数据?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#24" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.4 数据标注与生成
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.4 数据标注与生成" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-style-text" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: Style-Text 如何不文字风格迁移,就像普通文本生成程序一样默认字体直接输出到分割的背景图?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-styletext" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 能否修改StyleText配置文件中的分辨率?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-styletext_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: StyleText是否可以更换字体文件?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-styletext_2" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: StyleText批量生成图片为什么没有输出?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qstyletexttextinputstyleinput" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 使用StyleText进行数据合成时, 文本(TextInput)的长度远超StyleInput的长度, 该怎么处理与合成呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-styletext_3" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: StyleText 合成数据效果不好?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#25" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.5 预训练模型与微调
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.5 预训练模型与微调" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#qbackbone" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何更换文本检测/识别的backbone?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_25" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 参照文档做实际项目时,是重新训练还是在官方训练的基础上进行训练?具体如何操作?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-inferencepdiparams-inferencepdmodel" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 下载的识别模型解压后缺失文件, 没有期望的inference.pdiparams, inference.pdmodel等文件
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-checkpointsload" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 为什么在checkpoints中load下载的预训练模型会报错?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-finetune" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何对检测模型finetune, 比如冻结前面的层或某些层使用小的学习率学习?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#26" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.6 模型超参调整
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.6 模型超参调整" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-db640" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: DB检测训练输入尺寸640, 可以改大一些吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-32" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 预处理部分, 图片的长和宽为什么要处理成32的倍数?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-stride2-1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 在识别模型中, 为什么降采样残差结构的stride为(2, 1)?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qshape" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 训练识别时, 如何选择合适的网络输入shape?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_26" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 识别模型框出来的位置太紧凑, 会丢失边缘的文字信息, 导致识别错误
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#27" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.7 模型结构
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.7 模型结构" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#qlstm" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文本识别训练不加LSTM是否可以收敛?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qlstmgru" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文本识别中LSTM和GRU如何选择?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qcrnnbackbonedensenetresnet_vd" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 对于CRNN模型, backbone采用DenseNet和ResNet_vd, 哪种网络结构更好?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-backbone" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何根据不同的硬件平台选用不同的backbone?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#28-pp-ocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.8 PP-OCR系统
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.8 PP-OCR系统" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-pp-ocrse" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 在PP-OCR系统中, 文本检测的骨干网络为什么没有使用SE模块?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-pp-ocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PP-OCR系统中, 文本检测的结果有置信度吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-db" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: DB文本检测, 特征提取网络金字塔构建的部分代码在哪儿?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR如何做到横排和竖排同时支持的?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_27" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 目前知识蒸馏有哪些主要的实践思路?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-preds_idx-predsargmaxaxis2beam-search" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文字识别模型模型的输出矩阵需要进行解码才能得到识别的文本。代码中实现为preds_idx = preds.argmax(axis=2), 也就是最佳路径解码法。这是一种贪心算法, 是每一个时间步只将最大概率的字符作为当前时间步的预测输出, 但得到的结果不一定是最好的。为什么不使用beam search这种方式进行解码呢?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#29" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.9 端到端
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.9 端到端" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-pgnet" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 端到端算法PGNet是否支持中文识别, 速度会很慢嘛?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-pgnet_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 端到端算法PGNet提供了两种后处理方式, 两者之间有什么区别呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-pgneteval" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 使用PGNet进行eval报错?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-pgnet_2" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PGNet有中文预训练模型吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-pgnet_3" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 用于PGNet的训练集, 文本框的标注有要求吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-pgnet_4" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 用PGNet做进行端到端训练时, 数据集标注的点的个数必须都是统一一样的吗? 能不能随意标点数,只要能够按顺时针从左上角开始标这样?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#210" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.10 模型效果与效果不一致
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.10 模型效果与效果不一致" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-pp-ocr_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PP-OCR检测效果不好, 该如何优化?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q2126" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 同一张图通用检测出21个条目, 轻量级检测出26个 ,难道不是轻量级的好吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-db_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: DB有些框太贴文本了反而去掉了一些文本的边角影响识别, 这个问题有什么办法可以缓解吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-infer" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 使用合成数据精调小模型后, 效果可以, 但是还没开源的小infer模型效果好, 这是为什么呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_28" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 表格识别中,如何提高单字的识别结果?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-dygraphrelease20" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 动态图分支(dygraph,release/2.0),训练模型和推理模型效果不一致
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-detinferenceeval" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 自己训练的det模型, 在同一张图片上, inference模型与eval模型结果差别很大, 为什么?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_29" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 训练模型和测试模型的检测结果差距较大
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocrpythonc" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR模型Python端预测和C++预测结果不一致?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#211" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.11 训练调试与配置文件
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.11 训练调试与配置文件" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-epoch" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 某个类别的样本比较少, 通过增加训练的迭代次数或者是epoch, 变相增加小样本的数目, 这样能缓解这个问题么?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_30" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文本检测换成自己的数据没法训练, 有一些”###”是什么意思?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_31" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何调试数据读取程序?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_32" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 中文文本检测、文本识别构建训练集的话, 大概需要多少数据量
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-config-ymlratio_list" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: config yml文件中的ratio_list参数的作用是什么?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-iaa" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: iaa里面添加的数据增强方式, 是每张图像训练都会做增强还是随机的? 如何添加一个数据增强方法?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_33" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 怎么加速训练过程呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-finetune_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 一些特殊场景的数据识别效果差, 但是数据量很少, 不够用来finetune怎么办?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocr_2" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR可以识别灰度图吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_34" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何合成手写中文数据集?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocr200step" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR默认不是200个step保存一次模型吗? 为啥文件夹下面都没有生成
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocrcosine_decay" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR在训练的时候一直使用cosine_decay的学习率下降策略, 这是为什么呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-cosine" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: Cosine学习率的更新策略是怎样的? 训练过程中为什么会在一个值上停很久?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-cosinewarmup" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 之前的CosineWarmup方法为什么不见了?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-warmup" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 训练识别和检测时学习率要加上warmup, 目的是什么?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-dygraph" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 关于dygraph分支中, 文本识别模型训练, 要使用数据增强应该如何设置?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_35" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 训练过程中,训练程序意外退出/挂起,应该如何解决?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-log" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 训练程序启动后直到结束, 看不到训练过程log?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-num-workers" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 配置文件中的参数num workers是什么意思, 应该如何设置?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#212" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.12 预测
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.12 预测" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-paddleocrtest_batch_size_per_card1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 为什么PaddleOCR检测预测是只支持一张图片测试? 即test_batch_size_per_card=1
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocrtensorrt" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR支持tensorrt推理吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-tensorrtpaddleocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何使用TensorRT加速PaddleOCR预测?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_36" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 为什么识别模型做预测的时候,预测图片的数量数量还会影响预测的精度
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#213" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.13 推理部署
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.13 推理部署" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#qpaddleocr_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR模型推理方式有几种? 各自的优缺点是什么
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocrcputenorrtgpu" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR中, 对于模型预测加速, CPU加速的途径有哪些? 基于TenorRT加速GPU对输入有什么要求?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qhubservingpdserving" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: hubserving、pdserving这两种部署方式区别是什么?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddle-hub-serving-imgpathimgurl" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 目前paddle hub serving 只支持 imgpath, 如果我想用imgurl 去哪里改呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-c-ocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: C++ 端侧部署可以只对OCR的检测部署吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_37" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 服务部署可以只发布文本识别, 而不带文本检测模型么?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-litenb" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: lite预测库和nb模型版本不匹配, 该如何解决?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocrsdk" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何将PaddleOCR预测模型封装成SDK
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocrtest_batch_size_per_card1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 为什么PaddleOCR检测预测是只支持一张图片测试? 即test_batch_size_per_card=1
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_38" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 为什么第一张张图预测时间很长, 第二张之后预测时间会降低?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddle-lite" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 采用Paddle-Lite进行端侧部署, 出现问题, 环境没问题
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocr_3" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何多进程运行paddleocr?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_39" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何多进程预测?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocrt4" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 怎么解决paddleOCR在T4卡上有越预测越慢的情况?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-windowscpp-inferencepaddle_fluiddllopencv_world346dll" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 在windows上进行cpp inference的部署时, 总是提示找不到paddle_fluid.dll和opencv_world346.dll
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-winc" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: win下C++部署中文识别乱码的解决方法
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-windows-3060gpu" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: windows 3060显卡GPU模式启动 加载模型慢
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qmac" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 想在Mac上部署, 从哪里下载预测库呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_40" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 内网环境如何进行服务化部署呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-hub_serving" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 使用hub_serving部署, 延时较高, 可能的原因是什么呀?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddlelite" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 在使用PaddleLite进行预测部署时, 启动预测后卡死/手机死机?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_41" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 预测时显存爆炸、内存泄漏问题?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< / ul >
< / nav >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item md-nav__item--nested" >
< input class = "md-nav__toggle md-toggle md-toggle--indeterminate" type = "checkbox" id = "__nav_13" >
< label class = "md-nav__link" for = "__nav_13" id = "__nav_13_label" tabindex = "0" >
< span class = "md-ellipsis" >
社区
< / span >
< span class = "md-nav__icon md-icon" > < / span >
< / label >
< nav class = "md-nav" data-md-level = "1" aria-labelledby = "__nav_13_label" aria-expanded = "false" >
< label class = "md-nav__title" for = "__nav_13" >
< span class = "md-nav__icon md-icon" > < / span >
社区
< / label >
< ul class = "md-nav__list" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "community/community_contribution.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
社区贡献
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "community/code_and_doc.html" class = "md-nav__link" >
< span class = "md-ellipsis" >
附录
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< / ul >
< / nav >
< / div >
< / div >
< / div >
< div class = "md-sidebar md-sidebar--secondary" data-md-component = "sidebar" data-md-type = "toc" hidden >
< div class = "md-sidebar__scrollwrap" >
< div class = "md-sidebar__inner" >
< nav class = "md-nav md-nav--secondary" aria-label = "目录" >
< label class = "md-nav__title" for = "__toc" >
< span class = "md-nav__icon md-icon" > < / span >
目录
< / label >
< ul class = "md-nav__list" data-md-component = "toc" data-md-scrollfix >
< li class = "md-nav__item" >
< a href = "#1" class = "md-nav__link" >
< span class = "md-ellipsis" >
1. 通用问题
< / span >
< / a >
< nav class = "md-nav" aria-label = "1. 通用问题" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#11" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.1 检测
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.1 检测" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 基于深度学习的文字检测方法有哪几种?各有什么优缺点?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#12" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.2 识别
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.2 识别" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-paddleocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR提供的文本识别算法包括哪些?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-crnn" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文本识别方法CRNN关键技术有哪些?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-ctcattention" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 对于中文行文本识别, CTC和Attention哪种更优?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-tps" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 弯曲形变的文字识别需要怎么处理? TPS应用场景是什么, 是否好用?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#13" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.3 端到端
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.3 端到端" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-pgnetdbcrnnpgnet" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 请问端到端的pgnet相比于DB+CRNN在准确率上有优势吗? 或者是pgnet最擅长的场景是什么场景呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-ocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 目前OCR普遍是二阶段, 端到端的方案在业界落地情况如何?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 二阶段的端到端的场景文本识别方法的不足有哪些?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-aaai-2021pgnet" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: AAAI 2021最新的端到端场景文本识别PGNet算法有什么特点?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#14" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.4 评估方法
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.4 评估方法" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-ocr_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: OCR领域常用的评估指标是什么?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#15" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.5 垂类场景实现思路
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.5 垂类场景实现思路" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q_2" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 背景干扰的文字( 如印章盖到落款上, 需要识别落款或者印章中的文字) , 如何识别?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_3" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 请问对于图片中的密集文字, 有什么好的处理办法吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_4" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文本行较紧密的情况下如何准确检测?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_5" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 对于一些在识别时稍微模糊的文本, 有没有一些图像增强的方式?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_6" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 低像素文字或者字号比较小的文字有什么超分辨率方法吗
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_7" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 对于一些尺寸较大的文档类图片, 在检测时会有较多的漏检, 怎么避免这种漏检的问题呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qdb" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文档场景中, 使用DB模型会出现整行漏检的情况应该怎么解决?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_8" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 弯曲文本(如略微形变的文档图像)漏检问题
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_9" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何识别文字比较长的文本?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_10" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何识别带空格的英文行文本图像?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qopencvtps" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 弯曲文本有试过opencv的TPS进行弯曲校正吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_11" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何识别招牌或者广告图中的艺术字?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_12" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 印章如何识别
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_13" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 使用预训练模型进行预测,对于特定字符识别识别效果较差,怎么解决?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_14" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 在使用训练好的识别模型进行预测的时候,发现有很多重复的字,这个怎么解决呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-ok90" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 图像正常识别出来的文字是OK的, 旋转90度后识别出来的结果就比较差, 有什么方法可以优化?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_15" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何识别竹简上的古文?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_16" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 只想要识别票据中的部分片段,重新训练它的话,只需要训练文本检测模型就可以了吗?问文本识别,方向分类还是用原来的模型这样可以吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocr_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何用PaddleOCR识别视频中的文字?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_17" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 相机采集的图像为四通道,应该如何处理?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_18" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 遇到中英文识别模型不支持的字符,该如何对模型做微调?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_19" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 特殊字符( 例如一些标点符号) 识别效果不好怎么办?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_20" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 单张图上多语种并存识别( 如单张图印刷体和手写文字并存) , 应该如何处理?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_21" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 多语言的字典里是混合了不同的语种,这个是有什么讲究吗?统一到一个字典里会对精度造成多大的损失?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_22" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 类似泰语这样的小语种, 部分字会占用两个字符甚至三个字符, 请问如何制作字典
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-ppocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 想把简历上的文字识别出来后, 能够把关系一一对应起来, 比如姓名和它后面的名字组成一对, 籍贯、邮箱、学历等等都和各自的内容关联起来, 这个应该如何处理, PPOCR目前支持吗?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#16" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.6 训练过程与模型调优
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.6 训练过程与模型调优" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-batch_size" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 增大batch_size模型训练速度没有明显提升
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_23" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 预测时提示图像过大,显存、内存溢出了,应该如何处理?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-9070" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 识别训练时, 训练集精度已经到达90了, 但验证集精度一直在70, 涨不上去怎么办?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#17" class = "md-nav__link" >
< span class = "md-ellipsis" >
1.7 补充资料
< / span >
< / a >
< nav class = "md-nav" aria-label = "1.7 补充资料" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-ocr_2" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 对于小白如何快速入门中文OCR项目实践?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#2-paddleocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
2. PaddleOCR实战问题
< / span >
< / a >
< nav class = "md-nav" aria-label = "2. PaddleOCR实战问题" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#21-paddleocr-repo" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.1 PaddleOCR repo
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.1 PaddleOCR repo" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-paddleocr-developdygraph" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR develop分支和dygraph分支的区别?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocrocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR与百度的其他OCR产品有什么区别?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#22" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.2 安装环境
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.2 安装环境" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#qoserror-winerror-126-mac-pro-python-34-shapely-import" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: OSError: [WinError 126] 找不到指定的模块。mac pro python 3.4 shapely import 问题
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddlepaddlegpu-osenvironcuda_visible_devices" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddlePaddle怎么指定GPU运行 os.environ["CUDA_VISIBLE_DEVICES"]这种不生效
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocrwindowsmac" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR是否支持在Windows或Mac系统上运行?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#23" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.3 数据量说明
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.3 数据量说明" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#qocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 简单的对于精度要求不高的OCR任务, 数据集需要准备多少张呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocrgpuepoch" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 请问PaddleOCR项目中的中文超轻量和通用模型用了哪些数据集? 训练多少样本, gpu什么配置, 跑了多少个epoch, 大概跑了多久?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q30w500w" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 训练文字识别模型, 真实数据有30w, 合成数据有500w, 需要做样本均衡吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_24" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 当训练数据量少时,如何获取更多的数据?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#24" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.4 数据标注与生成
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.4 数据标注与生成" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-style-text" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: Style-Text 如何不文字风格迁移,就像普通文本生成程序一样默认字体直接输出到分割的背景图?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-styletext" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 能否修改StyleText配置文件中的分辨率?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-styletext_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: StyleText是否可以更换字体文件?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-styletext_2" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: StyleText批量生成图片为什么没有输出?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qstyletexttextinputstyleinput" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 使用StyleText进行数据合成时, 文本(TextInput)的长度远超StyleInput的长度, 该怎么处理与合成呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-styletext_3" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: StyleText 合成数据效果不好?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#25" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.5 预训练模型与微调
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.5 预训练模型与微调" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#qbackbone" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何更换文本检测/识别的backbone?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_25" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 参照文档做实际项目时,是重新训练还是在官方训练的基础上进行训练?具体如何操作?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-inferencepdiparams-inferencepdmodel" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 下载的识别模型解压后缺失文件, 没有期望的inference.pdiparams, inference.pdmodel等文件
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-checkpointsload" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 为什么在checkpoints中load下载的预训练模型会报错?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-finetune" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何对检测模型finetune, 比如冻结前面的层或某些层使用小的学习率学习?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#26" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.6 模型超参调整
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.6 模型超参调整" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-db640" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: DB检测训练输入尺寸640, 可以改大一些吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-32" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 预处理部分, 图片的长和宽为什么要处理成32的倍数?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-stride2-1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 在识别模型中, 为什么降采样残差结构的stride为(2, 1)?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qshape" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 训练识别时, 如何选择合适的网络输入shape?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_26" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 识别模型框出来的位置太紧凑, 会丢失边缘的文字信息, 导致识别错误
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#27" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.7 模型结构
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.7 模型结构" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#qlstm" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文本识别训练不加LSTM是否可以收敛?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qlstmgru" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文本识别中LSTM和GRU如何选择?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qcrnnbackbonedensenetresnet_vd" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 对于CRNN模型, backbone采用DenseNet和ResNet_vd, 哪种网络结构更好?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-backbone" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何根据不同的硬件平台选用不同的backbone?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#28-pp-ocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.8 PP-OCR系统
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.8 PP-OCR系统" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-pp-ocrse" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 在PP-OCR系统中, 文本检测的骨干网络为什么没有使用SE模块?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-pp-ocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PP-OCR系统中, 文本检测的结果有置信度吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-db" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: DB文本检测, 特征提取网络金字塔构建的部分代码在哪儿?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR如何做到横排和竖排同时支持的?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_27" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 目前知识蒸馏有哪些主要的实践思路?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-preds_idx-predsargmaxaxis2beam-search" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文字识别模型模型的输出矩阵需要进行解码才能得到识别的文本。代码中实现为preds_idx = preds.argmax(axis=2), 也就是最佳路径解码法。这是一种贪心算法, 是每一个时间步只将最大概率的字符作为当前时间步的预测输出, 但得到的结果不一定是最好的。为什么不使用beam search这种方式进行解码呢?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#29" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.9 端到端
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.9 端到端" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-pgnet" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 端到端算法PGNet是否支持中文识别, 速度会很慢嘛?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-pgnet_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 端到端算法PGNet提供了两种后处理方式, 两者之间有什么区别呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-pgneteval" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 使用PGNet进行eval报错?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-pgnet_2" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PGNet有中文预训练模型吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-pgnet_3" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 用于PGNet的训练集, 文本框的标注有要求吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-pgnet_4" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 用PGNet做进行端到端训练时, 数据集标注的点的个数必须都是统一一样的吗? 能不能随意标点数,只要能够按顺时针从左上角开始标这样?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#210" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.10 模型效果与效果不一致
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.10 模型效果与效果不一致" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-pp-ocr_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PP-OCR检测效果不好, 该如何优化?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q2126" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 同一张图通用检测出21个条目, 轻量级检测出26个 ,难道不是轻量级的好吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-db_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: DB有些框太贴文本了反而去掉了一些文本的边角影响识别, 这个问题有什么办法可以缓解吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-infer" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 使用合成数据精调小模型后, 效果可以, 但是还没开源的小infer模型效果好, 这是为什么呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_28" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 表格识别中,如何提高单字的识别结果?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-dygraphrelease20" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 动态图分支(dygraph,release/2.0),训练模型和推理模型效果不一致
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-detinferenceeval" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 自己训练的det模型, 在同一张图片上, inference模型与eval模型结果差别很大, 为什么?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_29" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 训练模型和测试模型的检测结果差距较大
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocrpythonc" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR模型Python端预测和C++预测结果不一致?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#211" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.11 训练调试与配置文件
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.11 训练调试与配置文件" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-epoch" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 某个类别的样本比较少, 通过增加训练的迭代次数或者是epoch, 变相增加小样本的数目, 这样能缓解这个问题么?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_30" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 文本检测换成自己的数据没法训练, 有一些”###”是什么意思?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_31" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何调试数据读取程序?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_32" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 中文文本检测、文本识别构建训练集的话, 大概需要多少数据量
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-config-ymlratio_list" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: config yml文件中的ratio_list参数的作用是什么?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-iaa" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: iaa里面添加的数据增强方式, 是每张图像训练都会做增强还是随机的? 如何添加一个数据增强方法?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_33" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 怎么加速训练过程呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-finetune_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 一些特殊场景的数据识别效果差, 但是数据量很少, 不够用来finetune怎么办?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocr_2" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR可以识别灰度图吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_34" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何合成手写中文数据集?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocr200step" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR默认不是200个step保存一次模型吗? 为啥文件夹下面都没有生成
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocrcosine_decay" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR在训练的时候一直使用cosine_decay的学习率下降策略, 这是为什么呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-cosine" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: Cosine学习率的更新策略是怎样的? 训练过程中为什么会在一个值上停很久?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-cosinewarmup" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 之前的CosineWarmup方法为什么不见了?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-warmup" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 训练识别和检测时学习率要加上warmup, 目的是什么?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-dygraph" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 关于dygraph分支中, 文本识别模型训练, 要使用数据增强应该如何设置?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_35" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 训练过程中,训练程序意外退出/挂起,应该如何解决?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-log" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 训练程序启动后直到结束, 看不到训练过程log?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-num-workers" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 配置文件中的参数num workers是什么意思, 应该如何设置?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#212" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.12 预测
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.12 预测" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#q-paddleocrtest_batch_size_per_card1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 为什么PaddleOCR检测预测是只支持一张图片测试? 即test_batch_size_per_card=1
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocrtensorrt" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR支持tensorrt推理吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-tensorrtpaddleocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何使用TensorRT加速PaddleOCR预测?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_36" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 为什么识别模型做预测的时候,预测图片的数量数量还会影响预测的精度
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< li class = "md-nav__item" >
< a href = "#213" class = "md-nav__link" >
< span class = "md-ellipsis" >
2.13 推理部署
< / span >
< / a >
< nav class = "md-nav" aria-label = "2.13 推理部署" >
< ul class = "md-nav__list" >
< li class = "md-nav__item" >
< a href = "#qpaddleocr_1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR模型推理方式有几种? 各自的优缺点是什么
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocrcputenorrtgpu" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: PaddleOCR中, 对于模型预测加速, CPU加速的途径有哪些? 基于TenorRT加速GPU对输入有什么要求?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qhubservingpdserving" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: hubserving、pdserving这两种部署方式区别是什么?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddle-hub-serving-imgpathimgurl" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 目前paddle hub serving 只支持 imgpath, 如果我想用imgurl 去哪里改呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-c-ocr" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: C++ 端侧部署可以只对OCR的检测部署吗?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_37" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 服务部署可以只发布文本识别, 而不带文本检测模型么?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-litenb" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: lite预测库和nb模型版本不匹配, 该如何解决?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocrsdk" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何将PaddleOCR预测模型封装成SDK
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qpaddleocrtest_batch_size_per_card1" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 为什么PaddleOCR检测预测是只支持一张图片测试? 即test_batch_size_per_card=1
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_38" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 为什么第一张张图预测时间很长, 第二张之后预测时间会降低?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddle-lite" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 采用Paddle-Lite进行端侧部署, 出现问题, 环境没问题
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocr_3" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何多进程运行paddleocr?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_39" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 如何多进程预测?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddleocrt4" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 怎么解决paddleOCR在T4卡上有越预测越慢的情况?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-windowscpp-inferencepaddle_fluiddllopencv_world346dll" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 在windows上进行cpp inference的部署时, 总是提示找不到paddle_fluid.dll和opencv_world346.dll
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-winc" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: win下C++部署中文识别乱码的解决方法
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-windows-3060gpu" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: windows 3060显卡GPU模式启动 加载模型慢
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#qmac" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 想在Mac上部署, 从哪里下载预测库呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_40" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 内网环境如何进行服务化部署呢?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-hub_serving" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 使用hub_serving部署, 延时较高, 可能的原因是什么呀?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q-paddlelite" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 在使用PaddleLite进行预测部署时, 启动预测后卡死/手机死机?
< / span >
< / a >
< / li >
< li class = "md-nav__item" >
< a href = "#q_41" class = "md-nav__link" >
< span class = "md-ellipsis" >
Q: 预测时显存爆炸、内存泄漏问题?
< / span >
< / a >
< / li >
< / ul >
< / nav >
< / li >
< / ul >
< / nav >
< / li >
< / ul >
< / nav >
< / div >
< / div >
< / div >
< div class = "md-content" data-md-component = "content" >
< article class = "md-content__inner md-typeset" >
< a href = "https://github.com/PaddlePaddle/PaddleOCR/edit/main/docs/FAQ.md" title = "编辑此页" class = "md-content__button md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M20.71 7.04c.39-.39.39-1.04 0-1.41l-2.34-2.34c-.37-.39-1.02-.39-1.41 0l-1.84 1.83 3.75 3.75M3 17.25V21h3.75L17.81 9.93l-3.75-3.75z" / > < / svg >
< / a >
< a href = "https://github.com/PaddlePaddle/PaddleOCR/raw/main/docs/FAQ.md" title = "查看本页的源代码" class = "md-content__button md-icon" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M12 9a3 3 0 0 0-3 3 3 3 0 0 0 3 3 3 3 0 0 0 3-3 3 3 0 0 0-3-3m0 8a5 5 0 0 1-5-5 5 5 0 0 1 5-5 5 5 0 0 1 5 5 5 5 0 0 1-5 5m0-12.5C7 4.5 2.73 7.61 1 12c1.73 4.39 6 7.5 11 7.5s9.27-3.11 11-7.5c-1.73-4.39-6-7.5-11-7.5" / > < / svg >
< / a >
< h1 > FAQ< / h1 >
< blockquote >
< p > 恭喜你发现宝藏!< / p >
< / blockquote >
< p > PaddleOCR收集整理了自从开源以来在issues和用户群中的常见问题并且给出了简要解答, 旨在为OCR的开发者提供一些参考, 也希望帮助大家少走一些弯路。< / p >
< p > 其中< a href = "#1" > 通用问题< / a > 一般是初次接触OCR相关算法时用户会提出的问题, 在< a href = "#15" > 1.5 垂类场景实现思路< / a > 中总结了如何在一些具体的场景中确定技术路线进行优化。< a href = "#2-paddleocr" > PaddleOCR常见问题< / a > 是开发者在使用PaddleOCR之后可能会遇到的问题也是PaddleOCR实践过程中的避坑指南。< / p >
< p > 同时PaddleOCR也会在review issue的过程中添加 < code > good issue< / code > 、 < code > good first issue< / code > 标签, 但这些问题可能不会被立刻补充在FAQ文档里, 开发者也可对应查看。我们也非常希望开发者能够帮助我们将这些内容补充在FAQ中。< / p >
< p > OCR领域大佬众多, 本文档回答主要依赖有限的项目实践, 难免挂一漏万, 如有遗漏和不足, 也< strong > 希望有识之士帮忙补充和修正< / strong > ,万分感谢。< / p >
< h2 id = "1" > 1. 通用问题< a class = "headerlink" href = "#1" title = "Permanent link" > ¶ < / a > < / h2 >
< h3 id = "11" > 1.1 检测< a class = "headerlink" href = "#11" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q" > Q: 基于深度学习的文字检测方法有哪几种?各有什么优缺点?< a class = "headerlink" href = "#q" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :常用的基于深度学习的文字检测方法一般可以分为基于回归的、基于分割的两大类,当然还有一些将两者进行结合的方法。< / p >
< p > ( 1) 基于回归的方法分为box回归和像素值回归。a. 采用box回归的方法主要有CTPN、Textbox系列和EAST, 这类算法对规则形状文本检测效果较好, 但无法准确检测不规则形状文本。 b. 像素值回归的方法主要有CRAFT和SA-Text, 这类算法能够检测弯曲文本且对小文本效果优秀但是实时性能不够。< / p >
< p > ( 2) 基于分割的算法, 如PSENet, 这类算法不受文本形状的限制, 对各种形状的文本都能取得较好的效果, 但是往往后处理比较复杂, 导致耗时严重。目前也有一些算法专门针对这个问题进行改进, 如DB, 将二值化进行近似, 使其可导, 融入训练, 从而获取更准确的边界, 大大降低了后处理的耗时。< / p >
< h3 id = "12" > 1.2 识别< a class = "headerlink" href = "#12" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q-paddleocr" > Q: PaddleOCR提供的文本识别算法包括哪些? < a class = "headerlink" href = "#q-paddleocr" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : PaddleOCR主要提供五种文本识别算法, 包括CRNN\StarNet\RARE\Rosetta和SRN, 其中CRNN\StarNet和Rosetta是基于ctc的文字识别算法, RARE是基于attention的文字识别算法; SRN为百度自研的文本识别算法, 引入了语义信息, 显著提升了准确率。 详情可参照如下页面: 文本识别算法< / p >
< h4 id = "q-crnn" > Q: 文本识别方法CRNN关键技术有哪些? < a class = "headerlink" href = "#q-crnn" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : CRNN 关键技术包括三部分。( 1) CNN提取图像卷积特征。( 2) 深层双向LSTM网络, 在卷积特征的基础上继续提取文字序列特征。( 3) Connectionist Temporal Classification(CTC),解决训练时字符无法对齐的问题。< / p >
< h4 id = "q-ctcattention" > Q: 对于中文行文本识别, CTC和Attention哪种更优? < a class = "headerlink" href = "#q-ctcattention" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : ( 1) 从效果上来看, 通用OCR场景CTC的识别效果优于Attention, 因为带识别的字典中的字符比较多, 常用中文汉字三千字以上, 如果训练样本不足的情况下, 对于这些字符的序列关系挖掘比较困难。中文场景下Attention模型的优势无法体现。而且Attention适合短语句识别, 对长句子识别比较差。< / p >
< p > ( 2) 从训练和预测速度上, Attention的串行解码结构限制了预测速度, 而CTC网络结构更高效, 预测速度上更有优势。< / p >
< h4 id = "q-tps" > Q: 弯曲形变的文字识别需要怎么处理? TPS应用场景是什么, 是否好用? < a class = "headerlink" href = "#q-tps" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : ( 1) 在大多数情况下, 如果遇到的场景弯曲形变不是太严重, 检测4个顶点, 然后直接通过仿射变换转正识别就足够了。< / p >
< p > ( 2) 如果不能满足需求, 可以尝试使用TPS( Thin Plate Spline) , 即薄板样条插值。TPS是一种插值算法, 经常用于图像变形等, 通过少量的控制点就可以驱动图像进行变化。一般用在有弯曲形变的文本识别中, 当检测到不规则的/弯曲的( 如, 使用基于分割的方法检测算法) 文本区域, 往往先使用TPS算法对文本区域矫正成矩形再进行识别, 如, STAR-Net、RARE等识别算法中引入了TPS模块。< / p >
< blockquote >
< p > < strong > Warning< / strong > : TPS看起来美好, 在实际应用时经常发现并不够鲁棒, 并且会增加耗时, 需要谨慎使用。< / p >
< / blockquote >
< h3 id = "13" > 1.3 端到端< a class = "headerlink" href = "#13" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q-pgnetdbcrnnpgnet" > Q: 请问端到端的pgnet相比于DB+CRNN在准确率上有优势吗? 或者是pgnet最擅长的场景是什么场景呢? < a class = "headerlink" href = "#q-pgnetdbcrnnpgnet" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : pgnet是端到端算法, 检测识别一步到位, 不用分开训练2个模型, 也支持弯曲文本的识别, 但是在中文上的效果还没有充分验证; db+crnn的验证更充分, 应用相对成熟, 常规非弯曲的文本都能解的不错。< / p >
< h4 id = "q-ocr" > Q: 目前OCR普遍是二阶段, 端到端的方案在业界落地情况如何? < a class = "headerlink" href = "#q-ocr" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 端到端在文字分布密集的业务场景, 效率会比较有保证, 精度的话看自己业务数据积累情况, 如果行级别的识别数据积累比较多的话two-stage会比较好。百度的落地场景, 比如工业仪表识别、车牌识别都用到端到端解决方案。< / p >
< h4 id = "q_1" > Q: 二阶段的端到端的场景文本识别方法的不足有哪些?< a class = "headerlink" href = "#q_1" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 这类方法一般需要设计针对ROI提取特征的方法, 而ROI操作一般比较耗时。< / p >
< h4 id = "q-aaai-2021pgnet" > Q: AAAI 2021最新的端到端场景文本识别PGNet算法有什么特点? < a class = "headerlink" href = "#q-aaai-2021pgnet" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : PGNet不需要字符级别的标注, NMS操作以及ROI操作。同时提出预测文本行内的阅读顺序模块和基于图的修正模块来提升文本识别效果。该算法是百度自研, 近期会在PaddleOCR开源。< / p >
< h3 id = "14" > 1.4 评估方法< a class = "headerlink" href = "#14" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q-ocr_1" > Q: OCR领域常用的评估指标是什么? < a class = "headerlink" href = "#q-ocr_1" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :对于两阶段的可以分开来看,分别是检测和识别阶段< / p >
< p > ( 1) 检测阶段: 先按照检测框和标注框的IOU评估, IOU大于某个阈值判断为检测准确。这里检测框和标注框不同于一般的通用目标检测框, 是采用多边形进行表示。检测准确率: 正确的检测框个数在全部检测框的占比, 主要是判断检测指标。检测召回率: 正确的检测框个数在全部标注框的占比, 主要是判断漏检的指标。< / p >
< p > ( 2) 识别阶段:
字符识别准确率,即正确识别的文本行占标注的文本行数量的比例,只有整行文本识别对才算正确识别。< / p >
< p > ( 3) 端到端统计:
端对端召回率:准确检测并正确识别文本行在全部标注文本行的占比;
端到端准确率:准确检测并正确识别文本行在 检测到的文本行数量 的占比;
准确检测的标准是检测框与标注框的IOU大于某个阈值, 正确识别的检测框中的文本与标注的文本相同。< / p >
< h3 id = "15" > 1.5 垂类场景实现思路< a class = "headerlink" href = "#15" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q_2" > Q: 背景干扰的文字( 如印章盖到落款上, 需要识别落款或者印章中的文字) , 如何识别? < a class = "headerlink" href = "#q_2" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : ( 1) 在人眼确认可识别的条件下, 对于背景有干扰的文字, 首先要保证检测框足够准确, 如果检测框不准确, 需要考虑是否可以通过过滤颜色等方式对图像预处理并且增加更多相关的训练数据; 在识别的部分, 注意在训练数据中加入背景干扰类的扩增图像。< / p >
< p > ( 2) 如果MobileNet模型不能满足需求, 可以尝试ResNet系列大模型来获得更好的效果。< / p >
< h4 id = "q_3" > Q: 请问对于图片中的密集文字, 有什么好的处理办法吗? < a class = "headerlink" href = "#q_3" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 可以先试用预训练模型测试一下, 例如DB+CRNN, 判断下密集文字图片中是检测还是识别的问题, 然后针对性的改善。还有一种是如果图象中密集文字较小, 可以尝试增大图像分辨率, 对图像进行一定范围内的拉伸, 将文字稀疏化, 提高识别效果。< / p >
< h4 id = "q_4" > Q: 文本行较紧密的情况下如何准确检测?< a class = "headerlink" href = "#q_4" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 使用基于分割的方法, 如DB, 检测密集文本行时, 最好收集一批数据进行训练, 并且在训练时, 并将生成二值图像的shrink_ratio参数调小一些。< / p >
< h4 id = "q_5" > Q: 对于一些在识别时稍微模糊的文本, 有没有一些图像增强的方式? < a class = "headerlink" href = "#q_5" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 在人类肉眼可以识别的前提下, 可以考虑图像处理中的均值滤波、中值滤波或者高斯滤波等模糊算子尝试。也可以尝试从数据扩增扰动来强化模型鲁棒性, 另外新的思路有对抗性训练和超分SR思路, 可以尝试借鉴。但目前业界尚无普遍认可的最优方案, 建议优先在数据采集阶段增加一些限制提升图片质量。< / p >
< h4 id = "q_6" > Q: 低像素文字或者字号比较小的文字有什么超分辨率方法吗< a class = "headerlink" href = "#q_6" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 超分辨率方法分为传统方法和基于深度学习的方法。基于深度学习的方法中, 比较经典的有SRCNN, 另外CVPR2020也有一篇超分辨率的工作可以参考文章: Unpaired Image Super-Resolution using Pseudo-Supervision, 但是没有充分的实践验证过, 需要看实际场景下的效果。< / p >
< h4 id = "q_7" > Q: 对于一些尺寸较大的文档类图片, 在检测时会有较多的漏检, 怎么避免这种漏检的问题呢? < a class = "headerlink" href = "#q_7" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : PaddleOCR中在图像最长边大于960时, 将图像等比例缩放为长边960的图像再进行预测, 对于这种图像, 可以通过修改det_limit_side_len, 增大检测的最长边: tools/infer/utility.py#L42< / p >
< h4 id = "qdb" > Q: 文档场景中, 使用DB模型会出现整行漏检的情况应该怎么解决? < a class = "headerlink" href = "#qdb" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :可以在预测时调小 det_db_box_thresh 阈值, 默认为0.5, 可调小至0.3观察效果。< / p >
< h4 id = "q_8" > Q: 弯曲文本(如略微形变的文档图像)漏检问题< a class = "headerlink" href = "#q_8" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : db后处理中计算文本框平均得分时, 是求rectangle区域的平均分数, 容易造成弯曲文本漏检, 已新增求polygon区域的平均分数, 会更准确, 但速度有所降低, 可按需选择, 在相关pr中可查看< a href = "https://github.com/PaddlePaddle/PaddleOCR/pull/2604" > 可视化对比效果< / a > 。该功能通过参数 < a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/tools/infer/utility.py#L51" > det_db_score_mode< / a > 进行选择,参数值可选[< code > fast< / code > (默认)、< code > slow< / code > ], < code > fast< / code > 对应原始的rectangle方式, < code > slow< / code > 对应polygon方式。感谢用户< a href = "https://github.com/buptlihang" > buptlihang< / a > 提< a href = "https://github.com/PaddlePaddle/PaddleOCR/pull/2574" > pr< / a > 帮助解决该问题🌹。< / p >
< h4 id = "q_9" > Q: 如何识别文字比较长的文本? < a class = "headerlink" href = "#q_9" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :在中文识别模型训练时,并不是采用直接将训练样本缩放到[3,32,320]进行训练, 而是先等比例缩放图像, 保证图像高度为32, 宽度不足320的部分补0, 宽高比大于10的样本直接丢弃。预测时, 如果是单张图像预测, 则按上述操作直接对图像缩放, 不做宽度320的限制。如果是多张图预测, 则采用batch方式预测, 每个batch的宽度动态变换, 采用这个batch中最长宽度。< / p >
< h4 id = "q_10" > Q: 如何识别带空格的英文行文本图像? < a class = "headerlink" href = "#q_10" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :空格识别可以考虑以下两种方案:< / p >
< p > (1)优化文本检测算法。检测结果在空格处将文本断开。这种方案在检测数据标注时,需要将含有空格的文本行分成好多段。< / p >
< p > (2)优化文本识别算法。在识别字典里面引入空格字符,然后在识别的训练数据中,如果用空行,进行标注。此外,合成数据时,通过拼接训练数据,生成含有空格的文本。< / p >
< h4 id = "qopencvtps" > Q: 弯曲文本有试过opencv的TPS进行弯曲校正吗? < a class = "headerlink" href = "#qopencvtps" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : opencv的tps需要标出上下边界对应的点, 这个点很难通过传统方法或者深度学习方法获取。PaddleOCR里StarNet网络中的tps模块实现了自动学点, 自动校正, 可以直接尝试这个。< / p >
< h4 id = "q_11" > Q: 如何识别招牌或者广告图中的艺术字?< a class = "headerlink" href = "#q_11" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 招牌或者广告图中的艺术字是文本识别一个非常有挑战性的难题, 因为艺术字中的单字和印刷体相比, 变化非常大。如果需要识别的艺术字是在一个词典列表内, 可以将改每个词典认为是一个待识别图像模板, 通过通用图像检索识别系统解决识别问题。可以尝试使用PaddleClas的图像识别系统。< / p >
< h4 id = "q_12" > Q: 印章如何识别< a class = "headerlink" href = "#q_12" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 1. 使用带tps的识别网络或abcnet,2.使用极坐标变换将图片拉平之后使用crnn< / p >
< h4 id = "q_13" > Q: 使用预训练模型进行预测,对于特定字符识别识别效果较差,怎么解决?< a class = "headerlink" href = "#q_13" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 由于我们所提供的识别模型是基于通用大规模数据集进行训练的, 部分字符可能在训练集中包含较少, 因此您可以构建特定场景的数据集, 基于我们提供的预训练模型进行微调。建议用于微调的数据集中, 每个字符出现的样本数量不低于300, 但同时需要注意不同字符的数量均衡。具体可以参考: 微调。< / p >
< h4 id = "q_14" > Q: 在使用训练好的识别模型进行预测的时候,发现有很多重复的字,这个怎么解决呢?< a class = "headerlink" href = "#q_14" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :可以看下训练的尺度和预测的尺度是否相同,如果训练的尺度为[3, 32, 320],预测的尺度为[3, 64, 640],则会有比较多的重复识别现象。< / p >
< h4 id = "q-ok90" > Q: 图像正常识别出来的文字是OK的, 旋转90度后识别出来的结果就比较差, 有什么方法可以优化? < a class = "headerlink" href = "#q-ok90" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 整图旋转90之后效果变差是有可能的, 因为目前PPOCR默认输入的图片是正向的; 可以自己训练一个整图的方向分类器,放在预测的最前端(可以参照现有方向分类器的方式),或者可以基于规则做一些预处理,比如判断长宽等等。< / p >
< h4 id = "q_15" > Q: 如何识别竹简上的古文?< a class = "headerlink" href = "#q_15" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 对于字符都是普通的汉字字符的情况, 只要标注足够的数据, finetune模型就可以了。如果数据量不足, 您可以尝试< a href = "https://github.com/PFCCLab/StyleText" > StyleText< / a > 工具。
而如果使用的字符是特殊的古文字、甲骨文、象形文字等,那么首先需要构建一个古文字的字典,之后再进行训练。< / p >
< h4 id = "q_16" > Q: 只想要识别票据中的部分片段,重新训练它的话,只需要训练文本检测模型就可以了吗?问文本识别,方向分类还是用原来的模型这样可以吗?< a class = "headerlink" href = "#q_16" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 可以的。PaddleOCR的检测、识别、方向分类器三个模型是独立的, 在实际使用中可以优化和替换其中任何一个模型。< / p >
< h4 id = "q-paddleocr_1" > Q: 如何用PaddleOCR识别视频中的文字? < a class = "headerlink" href = "#q-paddleocr_1" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 目前PaddleOCR主要针对图像做处理, 如果需要视频识别, 可以先对视频抽帧, 然后用PPOCR识别。< / p >
< h4 id = "q_17" > Q: 相机采集的图像为四通道,应该如何处理?< a class = "headerlink" href = "#q_17" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 有两种方式处理:< / p >
< ul >
< li > 如果没有其他需要, 可以在解码数据的时候指定模式为三通道, 例如如果使用opencv, 可以使用cv::imread(img_path, cv::IMREAD_COLOR)。< / li >
< li > 如果其他模块需要处理四通道的图像, 那也可以在输入PaddleOCR模块之前进行转换, 例如使用cvCvtColor(& img,img3chan,CV_RGBA2RGB)。< / li >
< / ul >
< h4 id = "q_18" > Q: 遇到中英文识别模型不支持的字符,该如何对模型做微调?< a class = "headerlink" href = "#q_18" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :如果希望识别中英文识别模型中不支持的字符,需要更新识别的字典,并完成微调过程。比如说如果希望模型能够进一步识别罗马数字,可以按照以下步骤完成模型微调过程。< / p >
< ol >
< li > 准备中英文识别数据以及罗马数字的识别数据,用于训练,同时保证罗马数字和中英文识别数字的效果;< / li >
< li > 修改默认的字典文件,在后面添加罗马数字的字符;< / li >
< li > 下载PaddleOCR提供的预训练模型, 配置预训练模型和数据的路径, 开始训练。< / li >
< / ol >
< h4 id = "q_19" > Q: 特殊字符( 例如一些标点符号) 识别效果不好怎么办? < a class = "headerlink" href = "#q_19" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :首先请您确认要识别的特殊字符是否在字典中。
如果字符在已经字典中但效果依然不好, 可能是由于识别数据较少导致的, 您可以增加相应数据finetune模型。< / p >
< hr / >
< h4 id = "q_20" > Q: 单张图上多语种并存识别( 如单张图印刷体和手写文字并存) , 应该如何处理? < a class = "headerlink" href = "#q_20" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 单张图像中存在多种类型文本的情况很常见, 典型的以学生的试卷为代表, 一张图像同时存在手写体和印刷体两种文本, 这类情况下, 可以尝试”1个检测模型+1个N分类模型+N个识别模型”的解决方案。
其中不同类型文本共用同一个检测模型, N分类模型指额外训练一个分类器, 将检测到的文本进行分类, 如手写+印刷的情况就是二分类, N种语言就是N分类, 在识别的部分, 针对每个类型的文本单独训练一个识别模型, 如手写+印刷的场景,就需要训练一个手写体识别模型,一个印刷体识别模型,如果一个文本框的分类结果是手写体,那么就传给手写体识别模型进行识别,其他情况同理。< / p >
< h4 id = "q_21" > Q: 多语言的字典里是混合了不同的语种,这个是有什么讲究吗?统一到一个字典里会对精度造成多大的损失?< a class = "headerlink" href = "#q_21" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 统一到一个字典里, 会造成最后一层FC过大, 增加模型大小。如果有特殊需求的话, 可以把需要的几种语言合并字典训练模型, 合并字典之后如果引入过多的形近字, 可能会造成精度损失, 字符平衡的问题可能也需要考虑一下。在PaddleOCR里暂时将语言字典分开。< / p >
< h4 id = "q_22" > Q: 类似泰语这样的小语种, 部分字会占用两个字符甚至三个字符, 请问如何制作字典< a class = "headerlink" href = "#q_22" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :处理字符的时候,把多字符的当作一个字就行,字典中每行是一个字。< / p >
< hr / >
< h4 id = "q-ppocr" > Q: 想把简历上的文字识别出来后, 能够把关系一一对应起来, 比如姓名和它后面的名字组成一对, 籍贯、邮箱、学历等等都和各自的内容关联起来, 这个应该如何处理, PPOCR目前支持吗? < a class = "headerlink" href = "#q-ppocr" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 这样的需求在企业应用中确实比较常见,但往往都是个性化的需求,没有非常规整统一的处理方式。常见的处理方式有如下两种:< / p >
< ol >
< li > 对于单一版式、或者版式差异不大的应用场景,可以基于识别场景的一些先验信息,将识别内容进行配对; 比如运用表单结构信息:常见表单"姓名"关键字的后面,往往紧跟的就是名字信息< / li >
< li > 对于版式多样,或者无固定版式的场景, 需要借助于NLP中的NER技术, 给识别内容中的某些字段, 赋予key值< / li >
< / ol >
< p > 由于这部分需求和业务场景强相关, 难以用一个统一的模型去处理, 目前PPOCR暂不支持。 如果需要用到NER技术, 可以参照Paddle团队的另一个开源套件: < a href = "https://github.com/PaddlePaddle/ERNIE" > PaddlePaddle/ERNIE< / a > , 其提供的预训练模型ERNIE, 可以帮助提升NER任务的准确率。< / p >
< h3 id = "16" > 1.6 训练过程与模型调优< a class = "headerlink" href = "#16" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q-batch_size" > Q: 增大batch_size模型训练速度没有明显提升< a class = "headerlink" href = "#q-batch_size" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 如果batch_size打得太大, 加速效果不明显的话, 可以试一下增大初始化内存的值, 运行代码前设置环境变量:
export FLAGS_initial_cpu_memory_in_mb=2000 # 设置初始化内存约2G左右< / p >
< h4 id = "q_23" > Q: 预测时提示图像过大,显存、内存溢出了,应该如何处理?< a class = "headerlink" href = "#q_23" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 可以按照这个PR的修改来缓解显存、内存占用 #2230< / p >
< h4 id = "q-9070" > Q: 识别训练时, 训练集精度已经到达90了, 但验证集精度一直在70, 涨不上去怎么办? < a class = "headerlink" href = "#q-9070" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 训练集精度90, 测试集70多的话, 应该是过拟合了, 有两个可尝试的方法: ( 1) 加入更多的增广方式或者调大增广prob的概率, 默认为0.4。( 2) 调大系统的l2 decay值< / p >
< h3 id = "17" > 1.7 补充资料< a class = "headerlink" href = "#17" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q-ocr_2" > Q: 对于小白如何快速入门中文OCR项目实践? < a class = "headerlink" href = "#q-ocr_2" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 建议可以先了解OCR方向的基础知识, 大概了解基础的检测和识别模型算法。然后在Github上可以查看OCR方向相关的repo。目前来看, 从内容的完备性来看, PaddleOCR的中英文双语教程文档是有明显优势的, 在数据集、模型训练、预测部署文档详实, 可以快速入手。而且还有微信用户群答疑, 非常适合学习实践。项目地址: PaddleOCR AI 快车道课程:< a href = "https://aistudio.baidu.com/aistudio/course/introduce/1519" > https://aistudio.baidu.com/aistudio/course/introduce/1519< / a > < / p >
< h2 id = "2-paddleocr" > 2. PaddleOCR实战问题< a class = "headerlink" href = "#2-paddleocr" title = "Permanent link" > ¶ < / a > < / h2 >
< h3 id = "21-paddleocr-repo" > 2.1 PaddleOCR repo< a class = "headerlink" href = "#21-paddleocr-repo" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q-paddleocr-developdygraph" > Q: PaddleOCR develop分支和dygraph分支的区别? < a class = "headerlink" href = "#q-paddleocr-developdygraph" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 目前PaddleOCR有四个分支, 分别是: < / p >
< ul >
< li > develop: 基于Paddle静态图开发的分支, 推荐使用paddle1.8 或者2.0版本, 该分支具备完善的模型训练、预测、推理部署、量化裁剪等功能, 领先于release/1.1分支。< / li >
< li > release/1.1: PaddleOCR 发布的第一个稳定版本,基于静态图开发,具备完善的训练、预测、推理部署、量化裁剪等功能。< / li >
< li > dygraph: 基于Paddle动态图开发的分支, 目前仍在开发中, 未来将作为主要开发分支, 运行要求使用Paddle2.0.0版本。< / li >
< li > release/2.0-rc1-0: PaddleOCR发布的第二个稳定版本, 基于动态图和paddle2.0版本开发,动态图开发的工程更易于调试,目前支,支持模型训练、预测,暂不支持移动端部署。< / li >
< / ul >
< p > 如果您已经上手过PaddleOCR, 并且希望在各种环境上部署PaddleOCR, 目前建议使用静态图分支, develop或者release/1.1分支。如果您是初学者, 想快速训练, 调试PaddleOCR中的算法, 建议尝鲜PaddleOCR dygraph分支。< / p >
< p > < strong > 注意< / strong > : develop和dygraph分支要求的Paddle版本、本地环境有差别, 请注意不同分支环境安装部分的差异。< / p >
< h4 id = "qpaddleocrocr" > Q: PaddleOCR与百度的其他OCR产品有什么区别? < a class = "headerlink" href = "#qpaddleocrocr" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : PaddleOCR主要聚焦通用ocr, 如果有垂类需求, 您可以用PaddleOCR+垂类数据自己训练;
如果缺少带标注的数据, 或者不想投入研发成本, 建议直接调用开放的API, 开放的API覆盖了目前比较常见的一些垂类。< / p >
< h3 id = "22" > 2.2 安装环境< a class = "headerlink" href = "#22" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "qoserror-winerror-126-mac-pro-python-34-shapely-import" > Q: OSError: [WinError 126] 找不到指定的模块。mac pro python 3.4 shapely import 问题< a class = "headerlink" href = "#qoserror-winerror-126-mac-pro-python-34-shapely-import" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 这个问题是因为shapely库安装有误, 可以参考 #212 这个issue重新安装一下< / p >
< h4 id = "qpaddlepaddlegpu-osenvironcuda_visible_devices" > Q: PaddlePaddle怎么指定GPU运行 os.environ["CUDA_VISIBLE_DEVICES"]这种不生效< a class = "headerlink" href = "#qpaddlepaddlegpu-osenvironcuda_visible_devices" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :通过设置 export CUDA_VISIBLE_DEVICES='0'环境变量< / p >
< h4 id = "qpaddleocrwindowsmac" > Q: PaddleOCR是否支持在Windows或Mac系统上运行? < a class = "headerlink" href = "#qpaddleocrwindowsmac" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : PaddleOCR已完成Windows和Mac系统适配, 运行时注意两点:
( 1) 在快速安装时, 如果不想安装docker, 可跳过第一步, 直接从第二步安装paddle开始。
( 2) inference模型下载时, 如果没有安装wget, 可直接点击模型链接或将链接地址复制到浏览器进行下载, 并解压放置到相应目录。< / p >
< h3 id = "23" > 2.3 数据量说明< a class = "headerlink" href = "#23" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "qocr" > Q: 简单的对于精度要求不高的OCR任务, 数据集需要准备多少张呢? < a class = "headerlink" href = "#qocr" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : ( 1) 训练数据的数量和需要解决问题的复杂度有关系。难度越大, 精度要求越高, 则数据集需求越大, 而且一般情况实际中的训练数据越多效果越好。< / p >
< p > ( 2) 对于精度要求不高的场景, 检测任务和识别任务需要的数据量是不一样的。对于检测任务, 500张图像可以保证基本的检测效果。对于识别任务, 需要保证识别字典中每个字符出现在不同场景的行文本图像数目需要大于200张( 举例, 如果有字典中有5个字, 每个字都需要出现在200张图片以上, 那么最少要求的图像数量应该在200-1000张之间) , 这样可以保证基本的识别效果。< / p >
< h4 id = "qpaddleocrgpuepoch" > Q: 请问PaddleOCR项目中的中文超轻量和通用模型用了哪些数据集? 训练多少样本, gpu什么配置, 跑了多少个epoch, 大概跑了多久? < a class = "headerlink" href = "#qpaddleocrgpuepoch" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :
( 1) 检测的话, LSVT街景数据集共3W张图像, 超轻量模型, 150epoch左右, 2卡V100 跑了不到2天; 通用模型: 2卡V100 150epoch 不到4天。
( 2) 识别的话, 520W左右的数据集( 真实数据26W+合成数据500W) 训练, 超轻量模型: 4卡V100, 总共训练了5天左右。通用模型: 4卡V100, 共训练6天。< / p >
< p > 超轻量模型训练分为2个阶段:
(1)全量数据训练50epoch, 耗时3天
(2)合成数据+真实数据按照1:1数据采样, 进行finetune训练200epoch, 耗时2天< / p >
< p > 通用模型训练:
真实数据+合成数据,动态采样(1: 1)训练, 200epoch, 耗时 6天左右。< / p >
< h4 id = "q30w500w" > Q: 训练文字识别模型, 真实数据有30w, 合成数据有500w, 需要做样本均衡吗? < a class = "headerlink" href = "#q30w500w" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 需要, 一般需要保证一个batch中真实数据样本和合成数据样本的比例是5: 1~10: 1左右效果比较理想。如果合成数据过大, 会过拟合到合成数据, 预测效果往往不佳。还有一种启发性的尝试是可以先用大量合成数据训练一个base模型, 然后再用真实数据微调, 在一些简单场景效果也是会有提升的。< / p >
< h4 id = "q_24" > Q: 当训练数据量少时,如何获取更多的数据?< a class = "headerlink" href = "#q_24" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 当训练数据量少时, 可以尝试以下三种方式获取更多的数据: ( 1) 人工采集更多的训练数据, 最直接也是最有效的方式。( 2) 基于PIL和opencv基本图像处理或者变换。例如PIL中ImageFont, Image, ImageDraw三个模块将文字写到背景中, opencv的旋转仿射变换, 高斯滤波等。( 3) 利用数据生成算法合成数据, 例如pix2pix等算法。< / p >
< h3 id = "24" > 2.4 数据标注与生成< a class = "headerlink" href = "#24" title = "Permanent link" > ¶ < / a > < / h3 >
< blockquote >
< p > [!NOTE]
StyleText 已经移动到 < a href = "https://github.com/PFCCLab/StyleText" > PFCCLab/StyleText< / a > < / p >
< / blockquote >
< h4 id = "q-style-text" > Q: Style-Text 如何不文字风格迁移,就像普通文本生成程序一样默认字体直接输出到分割的背景图?< a class = "headerlink" href = "#q-style-text" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 使用image_synth模式会输出fake_bg.jpg, 即为背景图。如果想要批量提取背景, 可以稍微修改一下代码, 将fake_bg保存下来即可。要修改的位置:
< a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/de3e2e7cd3b8b65ee02d7a41e570fa5b511a3c1d/StyleText/engine/synthesisers.py#L68" > https://github.com/PaddlePaddle/PaddleOCR/blob/de3e2e7cd3b8b65ee02d7a41e570fa5b511a3c1d/StyleText/engine/synthesisers.py#L68< / a > < / p >
< h4 id = "q-styletext" > Q: 能否修改StyleText配置文件中的分辨率? < a class = "headerlink" href = "#q-styletext" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : StyleText目前的训练数据主要是高度32的图片, 建议不要改变高度。未来我们会支持更丰富的分辨率。< / p >
< h4 id = "q-styletext_1" > Q: StyleText是否可以更换字体文件? < a class = "headerlink" href = "#q-styletext_1" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : StyleText项目中的字体文件为标准字体, 主要用作模型的输入部分, 不能够修改。
StyleText的用途主要是: 提取style_image中的字体、背景等style信息, 根据语料生成同样style的图片。< / p >
< h4 id = "q-styletext_2" > Q: StyleText批量生成图片为什么没有输出? < a class = "headerlink" href = "#q-styletext_2" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :需要检查以下您配置文件中的路径是否都存在。尤其要注意的是< a href = "https://github.com/PFCCLab/StyleText/blob/main/README_ch.md#%E4%B8%89%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B" > label_file配置< / a > 。
如果您使用的style_image输入没有label信息, 您依然需要提供一个图片文件列表。< / p >
< h4 id = "qstyletexttextinputstyleinput" > Q: 使用StyleText进行数据合成时, 文本(TextInput)的长度远超StyleInput的长度, 该怎么处理与合成呢? < a class = "headerlink" href = "#qstyletexttextinputstyleinput" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 在使用StyleText进行数据合成的时候, 建议StyleInput的长度长于TextInput的长度。有2种方法可以处理上述问题: < / p >
< ol >
< li > 将StyleInput按列的方向进行复制与扩充, 直到其超过TextInput的长度。< / li >
< li > 将TextInput进行裁剪, 保证每段TextInput都稍短于StyleInput, 分别合成 之后,再拼接在一起。< / li >
< / ol >
< p > 实际使用中发现, 使用第2种方法的效果在长文本合成的场景中的合成效果更好, StyleText中提供的也是第2种数据合成的逻辑。< / p >
< h4 id = "q-styletext_3" > Q: StyleText 合成数据效果不好?< a class = "headerlink" href = "#q-styletext_3" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : StyleText模型生成的数据主要用于OCR识别模型的训练。PaddleOCR目前识别模型的输入为32 x N, 因此当前版本模型主要适用高度为32的数据。
建议要合成的数据尺寸设置为32 x N。尺寸相差不多的数据也可以生成, 尺寸很大或很小的数据效果确实不佳。< / p >
< h3 id = "25" > 2.5 预训练模型与微调< a class = "headerlink" href = "#25" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "qbackbone" > Q: 如何更换文本检测/识别的backbone? < a class = "headerlink" href = "#qbackbone" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 无论是文字检测, 还是文字识别, 骨干网络的选择是预测效果和预测效率的权衡。一般, 选择更大规模的骨干网络, 例如ResNet101_vd, 则检测或识别更准确, 但预测耗时相应也会增加。而选择更小规模的骨干网络, 例如MobileNetV3_small_x0_35, 则预测更快, 但检测或识别的准确率会大打折扣。幸运的是不同骨干网络的检测或识别效果与在ImageNet数据集图像1000分类任务效果正相关。飞桨图像分类套件PaddleClas汇总了ResNet_vd、Res2Net、HRNet、MobileNetV3、GhostNet等23种系列的分类网络结构, 在上述图像分类任务的top1识别准确率, GPU(V100和T4)和CPU(骁龙855)的预测耗时以及相应的117个预训练模型下载地址。< / p >
< p > ( 1) 文字检测骨干网络的替换, 主要是确定类似于ResNet的4个stages, 以方便集成后续的类似FPN的检测头。此外, 对于文字检测问题, 使用ImageNet训练的分类预训练模型, 可以加速收敛和效果提升。< / p >
< p > ( 2) 文字识别的骨干网络的替换, 需要注意网络宽高stride的下降位置。由于文本识别一般宽高比例很大, 因此高度下降频率少一些, 宽度下降频率多一些。可以参考PaddleOCR中MobileNetV3骨干网络的改动。< / p >
< h4 id = "q_25" > Q: 参照文档做实际项目时,是重新训练还是在官方训练的基础上进行训练?具体如何操作?< a class = "headerlink" href = "#q_25" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 基于官方提供的模型, 进行finetune的话, 收敛会更快一些。 具体操作上, 以识别模型训练为例: 如果修改了字符文件, 可以设置pretraind_model为官方提供的预训练模型< / p >
< h4 id = "q-inferencepdiparams-inferencepdmodel" > Q: 下载的识别模型解压后缺失文件, 没有期望的inference.pdiparams, inference.pdmodel等文件< a class = "headerlink" href = "#q-inferencepdiparams-inferencepdmodel" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 用解压软件解压可能会出现这个问题, 建议二次解压下或者用命令行解压tar xf< / p >
< h4 id = "q-checkpointsload" > Q: 为什么在checkpoints中load下载的预训练模型会报错? < a class = "headerlink" href = "#q-checkpointsload" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :这里有两个不同的概念:< / p >
< p > pretrained_model: 指预训练模型, 是已经训练完成的模型。这时会load预训练模型的参数, 但并不会load学习率、优化器以及训练状态等。如果需要finetune, 应该使用pretrained。
checkpoints: 指之前训练的中间结果, 例如前一次训练到了100个epoch, 想接着训练。这时会load尝试所有信息, 包括模型的参数, 之前的状态等。< / p >
< h4 id = "q-finetune" > Q: 如何对检测模型finetune, 比如冻结前面的层或某些层使用小的学习率学习? < a class = "headerlink" href = "#q-finetune" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 如果是冻结某些层, 可以将变量的stop_gradient属性设置为True, 这样计算这个变量之前的所有参数都不会更新了, 参考: < a href = "https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/faq/train_cn.html#id4" > https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/faq/train_cn.html#id4< / a > < / p >
< p > 如果对某些层使用更小的学习率学习,静态图里还不是很方便,一个方法是在参数初始化的时候,给权重的属性设置固定的学习率,参考:< a href = "https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/fluid/param_attr/ParamAttr_cn.html#paramattr" > https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/fluid/param_attr/ParamAttr_cn.html#paramattr< / a > < / p >
< p > 实际上我们实验发现, 直接加载模型去fine-tune, 不设置某些层不同学习率, 效果也都不错< / p >
< h3 id = "26" > 2.6 模型超参调整< a class = "headerlink" href = "#26" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q-db640" > Q: DB检测训练输入尺寸640, 可以改大一些吗? < a class = "headerlink" href = "#q-db640" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 不建议改大。检测模型训练输入尺寸是预处理中random crop后的尺寸, 并非直接将原图进行resize, 多数场景下这个尺寸并不小了, 改大后可能反而并不合适, 而且训练会变慢。另外, 代码里可能有的地方参数按照预设输入尺寸适配的, 改大后可能有隐藏风险。< / p >
< h4 id = "q-32" > Q: 预处理部分, 图片的长和宽为什么要处理成32的倍数? < a class = "headerlink" href = "#q-32" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 以检测中的resnet骨干网络为例, 图像输入网络之后, 需要经过5次2倍降采样, 共32倍, 因此建议输入的图像尺寸为32的倍数。< / p >
< h4 id = "q-stride2-1" > Q: 在识别模型中, 为什么降采样残差结构的stride为(2, 1)? < a class = "headerlink" href = "#q-stride2-1" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : stride为(2, 1), 表示在图像y方向( 高度方向) 上stride为2, x方向( 宽度方向) 上为1。由于待识别的文本图像通常为长方形, 这样只在高度方向做下采样, 尽量保留宽度方向的序列信息, 避免宽度方向下采样后丢失过多的文字信息。< / p >
< h4 id = "qshape" > Q: 训练识别时, 如何选择合适的网络输入shape? < a class = "headerlink" href = "#qshape" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 一般高度采用32, 最长宽度的选择, 有两种方法: < / p >
< p > ( 1) 统计训练样本图像的宽高比分布。最大宽高比的选取考虑满足80%的训练样本。< / p >
< p > ( 2) 统计训练样本文字数目。最长字符数目的选取考虑满足80%的训练样本。然后中文字符长宽比近似认为是1, 英文认为3: 1, 预估一个最长宽度。< / p >
< h4 id = "q_26" > Q: 识别模型框出来的位置太紧凑, 会丢失边缘的文字信息, 导致识别错误< a class = "headerlink" href = "#q_26" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :可以在命令中加入 --det_db_unclip_ratio , 参数定义位置, 这个参数是检测后处理时控制文本框大小的, 默认1.6, 可以尝试改成2.5或者更大,反之,如果觉得文本框不够紧凑,也可以把该参数调小。< / p >
< h3 id = "27" > 2.7 模型结构< a class = "headerlink" href = "#27" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "qlstm" > Q: 文本识别训练不加LSTM是否可以收敛? < a class = "headerlink" href = "#qlstm" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 理论上是可以收敛的, 加上LSTM模块主要是为了挖掘文字之间的序列关系, 提升识别效果。对于有明显上下文语义的场景效果会比较明显。< / p >
< h4 id = "qlstmgru" > Q: 文本识别中LSTM和GRU如何选择? < a class = "headerlink" href = "#qlstmgru" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 从项目实践经验来看, 序列模块采用LSTM的识别效果优于GRU, 但是LSTM的计算量比GRU大一些, 可以根据自己实际情况选择。< / p >
< h4 id = "qcrnnbackbonedensenetresnet_vd" > Q: 对于CRNN模型, backbone采用DenseNet和ResNet_vd, 哪种网络结构更好? < a class = "headerlink" href = "#qcrnnbackbonedensenetresnet_vd" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : Backbone的识别效果在CRNN模型上的效果, 与Imagenet 1000 图像分类任务上识别效果和效率一致。在图像分类任务上ResnNet_vd( 79%+) 的识别精度明显优于DenseNet( 77%+) , 此外对于GPU, Nvidia针对ResNet系列模型做了优化, 预测效率更高, 所以相对而言, resnet_vd是较好选择。如果是移动端, 可以优先考虑MobileNetV3系列。< / p >
< h4 id = "q-backbone" > Q: 如何根据不同的硬件平台选用不同的backbone? < a class = "headerlink" href = "#q-backbone" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 在不同的硬件上, 不同的backbone的速度优势不同, 可以根据不同平台的速度-精度图来确定backbone, 这里可以参考< a href = "https://github.com/PaddlePaddle/PaddleClas/tree/release/2.0/docs/zh_CN/models" > PaddleClas模型速度-精度图< / a > 。< / p >
< h3 id = "28-pp-ocr" > 2.8 PP-OCR系统< a class = "headerlink" href = "#28-pp-ocr" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q-pp-ocrse" > Q: 在PP-OCR系统中, 文本检测的骨干网络为什么没有使用SE模块? < a class = "headerlink" href = "#q-pp-ocrse" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : SE模块是MobileNetV3网络一个重要模块, 目的是估计特征图每个特征通道重要性, 给特征图每个特征分配权重, 提高网络的表达能力。但是, 对于文本检测, 输入网络的分辨率比较大, 一般是640*640, 利用SE模块估计特征图每个特征通道重要性比较困难, 网络提升能力有限, 但是该模块又比较耗时, 因此在PP-OCR系统中, 文本检测的骨干网络没有使用SE模块。实验也表明, 当去掉SE模块, 超轻量模型大小可以减小40%, 文本检测效果基本不受影响。详细可以参考PP-OCR技术文章, < a href = "https://arxiv.org/abs/2009.09941" > https://arxiv.org/abs/2009.09941< / a > .< / p >
< h4 id = "q-pp-ocr" > Q: PP-OCR系统中, 文本检测的结果有置信度吗? < a class = "headerlink" href = "#q-pp-ocr" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :文本检测的结果有置信度,由于推理过程中没有使用,所以没有显示的返回到最终结果中。如果需要文本检测结果的置信度,可以在< a href = "../../ppocr/postprocess/db_postprocess.py" > 文本检测DB的后处理代码< / a > 的155行, 添加scores信息。这样, 在< a href = "../../tools/infer/predict_det.py" > 检测预测代码< / a > 的197行, 就可以拿到文本检测的scores信息。< / p >
< h4 id = "q-db" > Q: DB文本检测, 特征提取网络金字塔构建的部分代码在哪儿? < a class = "headerlink" href = "#q-db" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :特征提取网络金字塔构建的部分:< a href = "../../ppocr/modeling/necks/db_fpn.py" > 代码位置< / a > 。ppocr/modeling文件夹里面是组网相关的代码, 其中architectures是文本检测或者文本识别整体流程代码; backbones是骨干网络相关代码; necks是类似与FPN的颈函数代码; heads是提取文本检测或者文本识别预测结果相关的头函数; transforms是类似于TPS特征预处理模块。更多的信息可以参考< a href = "./tree.md" > 代码组织结构< / a > 。< / p >
< h4 id = "qpaddleocr" > Q: PaddleOCR如何做到横排和竖排同时支持的? < a class = "headerlink" href = "#qpaddleocr" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 合成了一批竖排文字, 逆时针旋转90度后加入训练集与横排一起训练。预测时根据图片长宽比判断是否为竖排, 若为竖排则将crop出的文本逆时针旋转90度后送入识别网络。< / p >
< h4 id = "q_27" > Q: 目前知识蒸馏有哪些主要的实践思路?< a class = "headerlink" href = "#q_27" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 知识蒸馏即利用教师模型指导学生模型的训练, 目前有3种主要的蒸馏思路: < / p >
< ol >
< li > 基于输出结果的蒸馏, 即让学生模型学习教师模型的软标签( 分类或者OCR识别等任务中) 或者概率热度图( 分割等任务中) 。< / li >
< li > 基于特征图的蒸馏,即让学生模型学习教师模型中间层的特征图,拟合中间层的一些特征。< / li >
< li > 基于关系的蒸馏, 针对不同的样本( 假设个数为N) , 教师模型会有不同的输出, 那么可以基于不同样本的输出, 计算一个NxN的相关性矩阵, 可以让学生模型去学习教师模型关于不同样本的相关性矩阵。< / li >
< / ol >
< p > 当然,知识蒸馏方法日新月异,也欢迎大家提出更多的总结与建议。< / p >
< h4 id = "q-preds_idx-predsargmaxaxis2beam-search" > Q: 文字识别模型模型的输出矩阵需要进行解码才能得到识别的文本。代码中实现为preds_idx = preds.argmax(axis=2), 也就是最佳路径解码法。这是一种贪心算法, 是每一个时间步只将最大概率的字符作为当前时间步的预测输出, 但得到的结果不一定是最好的。为什么不使用beam search这种方式进行解码呢? < a class = "headerlink" href = "#q-preds_idx-predsargmaxaxis2beam-search" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 实验发现, 使用贪心的方法去做解码, 识别精度影响不大, 但是速度方面的优势比较明显, 因此PaddleOCR中使用贪心算法去做识别的解码。< / p >
< h3 id = "29" > 2.9 端到端< a class = "headerlink" href = "#29" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q-pgnet" > Q: 端到端算法PGNet是否支持中文识别, 速度会很慢嘛? < a class = "headerlink" href = "#q-pgnet" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 目前开源的PGNet算法模型主要是用于检测英文数字, 对于中文的识别需要自己训练, 大家可以使用开源的端到端中文数据集, 而对于复杂文本( 弯曲文本) 的识别, 也可以自己构造一批数据集针对进行训练, 对于推理速度, 可以先将模型转换为inference再进行预测, 速度应该会相当可观。< / p >
< h4 id = "q-pgnet_1" > Q: 端到端算法PGNet提供了两种后处理方式, 两者之间有什么区别呢? < a class = "headerlink" href = "#q-pgnet_1" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 两种后处理的区别主要在于速度的推理, config中PostProcess有fast/slow两种模式, slow模式的后处理速度慢, 精度相对较高, fast模式的后处理速度快, 精度也在可接受的范围之内。建议使用速度快的后处理方式。< / p >
< h4 id = "q-pgneteval" > Q: 使用PGNet进行eval报错? < a class = "headerlink" href = "#q-pgneteval" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 需要注意, 我们目前在release/2.1更新了评测代码, 目前支持A, B两种评测模式: < / p >
< ul >
< li > A模式: 该模式主要为了方便用户使用, 与训练集一样的标注文件就可以正常进行eval操作, 代码中默认是A模式。< / li >
< li > B模式: 该模式主要为了保证我们的评测代码可以和Total Text官方的评测方式对齐, 该模式下直接加载官方提供的mat文件进行eval。< / li >
< / ul >
< h4 id = "q-pgnet_2" > Q: PGNet有中文预训练模型吗? < a class = "headerlink" href = "#q-pgnet_2" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 目前我们尚未提供针对中文的预训练模型,如有需要,可以尝试自己训练。具体需要修改的地方有:< / p >
< ol >
< li > < a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/configs/e2e/e2e_r50_vd_pg.yml#L23-L24" > config文件中< / a > ,字典文件路径及语种设置;< / li >
< li > < a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/ppocr/modeling/heads/e2e_pg_head.py#L181" > 网络结构中< / a > , < code > out_channels< / code > 修改为字典中的字符数目+1( 考虑到空格) ; < / li >
< li > < a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/ppocr/losses/e2e_pg_loss.py#L93" > loss中< / a > ,修改< code > 37< / code > 为字典中的字符数目+1( 考虑到空格) ; < / li >
< / ol >
< h4 id = "q-pgnet_3" > Q: 用于PGNet的训练集, 文本框的标注有要求吗? < a class = "headerlink" href = "#q-pgnet_3" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : PGNet支持多点标注, 比如4点、8点、14点等。但需要注意的是, 标注点尽可能分布均匀( 相邻标注点间隔距离均匀一致) , 且label文件中的标注点需要从标注框的左上角开始, 按标注点顺时针顺序依次编写, 以上问题都可能对训练精度造成影响。
我们提供的, 基于Total Text数据集的PGNet预训练模型使用了14点标注方式。< / p >
< h4 id = "q-pgnet_4" > Q: 用PGNet做进行端到端训练时, 数据集标注的点的个数必须都是统一一样的吗? 能不能随意标点数,只要能够按顺时针从左上角开始标这样?< a class = "headerlink" href = "#q-pgnet_4" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 目前代码要求标注为统一的点数。< / p >
< h3 id = "210" > 2.10 模型效果与效果不一致< a class = "headerlink" href = "#210" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q-pp-ocr_1" > Q: PP-OCR检测效果不好, 该如何优化? < a class = "headerlink" href = "#q-pp-ocr_1" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 具体问题具体分析:
如果在你的场景上检测效果不可用, 首选是在你的数据上做finetune训练;
如果图像过大, 文字过于密集, 建议不要过度压缩图像, 可以尝试修改检测预处理的resize逻辑, 防止图像被过度压缩;
检测框大小过于紧贴文字或检测框过大, 可以调整db_unclip_ratio这个参数, 加大参数可以扩大检测框, 减小参数可以减小检测框大小;
检测框存在很多漏检问题, 可以减小DB检测后处理的阈值参数det_db_box_thresh, 防止一些检测框被过滤掉, 也可以尝试设置det_db_score_mode为'slow';
其他方法可以选择use_dilation为True, 对检测输出的feature map做膨胀处理, 一般情况下, 会有效果改善; < / p >
< h4 id = "q2126" > Q: 同一张图通用检测出21个条目, 轻量级检测出26个 ,难道不是轻量级的好吗?< a class = "headerlink" href = "#q2126" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :可以主要参考可视化效果,通用模型更倾向于检测一整行文字,轻量级可能会有一行文字被分成两段检测的情况,不是数量越多,效果就越好。< / p >
< h4 id = "q-db_1" > Q: DB有些框太贴文本了反而去掉了一些文本的边角影响识别, 这个问题有什么办法可以缓解吗? < a class = "headerlink" href = "#q-db_1" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 可以把后处理的参数unclip_ratio适当调大一点。< / p >
< h4 id = "q-infer" > Q: 使用合成数据精调小模型后, 效果可以, 但是还没开源的小infer模型效果好, 这是为什么呢? < a class = "headerlink" href = "#q-infer" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : ( 1) 要保证使用的配置文件和pretrain weights是对应的; < / p >
< p > ( 2) 在微调时, 一般都需要真实数据, 如果使用合成数据, 效果反而可能会有下降, PaddleOCR中放出的识别inference模型也是基于预训练模型在真实数据上微调得到的, 效果提升比较明显; < / p >
< p > ( 3) 在训练的时候, 文本长度超过25的训练图像都会被丢弃, 因此需要看下真正参与训练的图像有多少, 太少的话也容易过拟合。< / p >
< h4 id = "q_28" > Q: 表格识别中,如何提高单字的识别结果?< a class = "headerlink" href = "#q_28" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 首先需要确认一下检测模型有没有有效的检测出单个字符,如果没有的话,需要在训练集当中添加相应的单字数据集。< / p >
< h4 id = "q-dygraphrelease20" > Q: 动态图分支(dygraph,release/2.0),训练模型和推理模型效果不一致< a class = "headerlink" href = "#q-dygraphrelease20" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 当前问题表现为: 使用训练完的模型直接测试结果较好, 但是转换为inference model后, 预测结果不一致; 出现这个问题一般是两个原因: < / p >
< ol >
< li > 预处理函数设置的不一致< / li >
< li > 后处理参数不一致 repo中config.yml文件的前后处理参数和inference预测默认的超参数有不一致的地方, 建议排查下训练模型预测和inference预测的前后处理, 参考issue。< / li >
< / ol >
< h4 id = "q-detinferenceeval" > Q: 自己训练的det模型, 在同一张图片上, inference模型与eval模型结果差别很大, 为什么? < a class = "headerlink" href = "#q-detinferenceeval" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 这是由于图片预处理不同造成的。如果训练的det模型图片输入并不是默认的shape[600, 600], eval的程序中图片预处理方式与train时一致 ( 由xxx_reader.yml中的test_image_shape参数决定缩放大小, 但predict_eval.py中的图片预处理方式由程序里的preprocess_params决定, 最好不要传入max_side_len, 而是传入和训练时一样大小的test_image_shape。< / p >
< h4 id = "q_29" > Q: 训练模型和测试模型的检测结果差距较大< a class = "headerlink" href = "#q_29" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 1. 检查两个模型使用的后处理参数是否是一样的, 训练的后处理参数在配置文件中的PostProcess部分, 测试模型的后处理参数在tools/infer/utility.py中, 最新代码中两个后处理参数已保持一致。< / p >
< h4 id = "q-paddleocrpythonc" > Q: PaddleOCR模型Python端预测和C++预测结果不一致?< a class = "headerlink" href = "#q-paddleocrpythonc" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 正常来说, python端预测和C++预测文本是一致的,如果预测结果差异较大, 建议首先排查diff出现在检测模型还是识别模型, 或者尝试换其他模型是否有类似的问题。 其次, 检查python端和C++端数据处理部分是否存在差异, 建议保存环境, 更新PaddleOCR代码再试下。 如果更新代码或者更新代码都没能解决, 建议在PaddleOCR微信群里或者issue中抛出您的问题。< / p >
< p > 用户总结的排查步骤:< a href = "https://github.com/PaddlePaddle/PaddleOCR/issues/2470" > https://github.com/PaddlePaddle/PaddleOCR/issues/2470< / a > < / p >
< h3 id = "211" > 2.11 训练调试与配置文件< a class = "headerlink" href = "#211" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q-epoch" > Q: 某个类别的样本比较少, 通过增加训练的迭代次数或者是epoch, 变相增加小样本的数目, 这样能缓解这个问题么? < a class = "headerlink" href = "#q-epoch" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 尽量保证类别均衡, 某些类别样本少,可以通过补充合成数据的方式处理;实验证明训练集中出现频次较少的字符,识别效果会比较差,增加迭代次数不能改变样本量少的问题。< / p >
< h4 id = "q_30" > Q: 文本检测换成自己的数据没法训练, 有一些”###”是什么意思?< a class = "headerlink" href = "#q_30" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :数据格式有问题,”###” 表示要被忽略的文本区域,所以你的数据都被跳过了,可以换成其他任意字符或者就写个空的。< / p >
< h4 id = "q_31" > Q: 如何调试数据读取程序? < a class = "headerlink" href = "#q_31" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : tools/train.py中有一个test_reader()函数用于调试数据读取。< / p >
< h4 id = "q_32" > Q: 中文文本检测、文本识别构建训练集的话, 大概需要多少数据量< a class = "headerlink" href = "#q_32" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 检测需要的数据相对较少, 在PaddleOCR模型的基础上进行Fine-tune, 一般需要500张可达到不错的效果。 识别分英文和中文,一般英文场景需要几十万数据可达到不错的效果,中文则需要几百万甚至更多。< / p >
< h4 id = "q-config-ymlratio_list" > Q: config yml文件中的ratio_list参数的作用是什么? < a class = "headerlink" href = "#q-config-ymlratio_list" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 在动态图中, ratio_list在有多个数据源的情况下使用, ratio_list中的每个值是每个epoch从对应数据源采样数据的比例。如ratio_list=[0.3,0.2], label_file_list=['data1','data2'],代表每个epoch的训练数据包含data1 30%的数据, 和data2里 20%的数据, ratio_list中数值的和不需要等于1。ratio_list和label_file_list的长度必须一致。< / p >
< p > 静态图检测数据采样的逻辑与动态图不同,但基本不影响训练精度。< / p >
< p > 在静态图中,使用 检测 dataloader读取数据时, 会先设置每个epoch的数据量, 比如这里设置为1000, ratio_list中的值表示在1000中的占比, 比如ratio_list是[0.3, 0.7], 则表示使用两个数据源, 每个epoch从第一个数据源采样1000*0.3=300张图, 从第二个数据源采样700张图。ratio_list的值的和也不需要等于1。< / p >
< h4 id = "q-iaa" > Q: iaa里面添加的数据增强方式, 是每张图像训练都会做增强还是随机的? 如何添加一个数据增强方法? < a class = "headerlink" href = "#q-iaa" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : iaa增强的训练配置参考: < a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/0ccc1720c252beb277b9e522a1b228eb6abffb8a/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml#L82" > 这里< / a > 。其中{ 'type': Fliplr, 'args': { 'p': 0.5 } } p是概率。新增数据增强, 可以参考< a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/release%2F2.1/doc/doc_ch/add_new_algorithm.md#%E6%95%B0%E6%8D%AE%E5%8A%A0%E8%BD%BD%E5%92%8C%E5%A4%84%E7%90%86" > 这个方法< / a > < / p >
< h4 id = "q_33" > Q: 怎么加速训练过程呢?< a class = "headerlink" href = "#q_33" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : OCR模型训练过程中一般包含大量的数据增广, 这些数据增广是比较耗时的, 因此可以离线生成大量增广后的图像, 直接送入网络进行训练, 机器资源充足的情况下, 也可以使用分布式训练的方法, 可以参考< a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/distributed_training.md" > 分布式训练教程文档< / a > 。< / p >
< h4 id = "q-finetune_1" > Q: 一些特殊场景的数据识别效果差, 但是数据量很少, 不够用来finetune怎么办? < a class = "headerlink" href = "#q-finetune_1" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :您可以合成一些接近使用场景的数据用于训练。
我们计划推出基于特定场景的文本数据合成工具, 请您持续关注PaddleOCR的近期更新。< / p >
< h4 id = "q-paddleocr_2" > Q: PaddleOCR可以识别灰度图吗? < a class = "headerlink" href = "#q-paddleocr_2" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : PaddleOCR的模型均为三通道输入。如果您想使用灰度图作为输入, 建议直接用3通道的模式读入灰度图,
或者将单通道图像转换为三通道图像再识别。例如, opencv的cvtColor函数就可以将灰度图转换为RGB三通道模式。< / p >
< h4 id = "q_34" > Q: 如何合成手写中文数据集?< a class = "headerlink" href = "#q_34" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 手写数据集可以通过手写单字数据集合成得到。随机选取一定数量的单字图片和对应的label, 将图片高度resize为随机的统一高度后拼接在一起, 即可得到合成数据集。对于需要添加文字背景的情况, 建议使用阈值化将单字图片的白色背景处理为透明背景, 再与真实背景图进行合成。具体可以参考文档< a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/a72d6f23be9979e0c103d911a9dca3e4613e8ccf/doc/doc_ch/handwritten_datasets.md" > 手写数据集< / a > 。< / p >
< h4 id = "qpaddleocr200step" > Q: PaddleOCR默认不是200个step保存一次模型吗? 为啥文件夹下面都没有生成< a class = "headerlink" href = "#qpaddleocr200step" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 因为默认保存的起始点不是0, 而是4000, 将eval_batch_step [4000, 5000]改为[0, 2000] 就是从第0次迭代开始, 每2000迭代保存一次模型< / p >
< h4 id = "q-paddleocrcosine_decay" > Q: PaddleOCR在训练的时候一直使用cosine_decay的学习率下降策略, 这是为什么呢? < a class = "headerlink" href = "#q-paddleocrcosine_decay" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : cosine_decay表示在训练的过程中, 学习率按照cosine的变化趋势逐渐下降至0, 在迭代轮数更长的情况下, 比常量的学习率变化策略会有更好的收敛效果, 因此在实际训练的时候, 均采用了cosine_decay, 来获得精度更高的模型。< / p >
< h4 id = "q-cosine" > Q: Cosine学习率的更新策略是怎样的? 训练过程中为什么会在一个值上停很久? < a class = "headerlink" href = "#q-cosine" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : Cosine学习率的说明可以参考< a href = "https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.html#cosineannealingdecay" > 这里< / a > < / p >
< p > 在PaddleOCR中, 为了让学习率更加平缓, 我们将其中的epoch调整成了iter。
学习率的更新会和总的iter数量有关。当iter比较大时, 会经过较多iter才能看出学习率的值有变化。< / p >
< h4 id = "q-cosinewarmup" > Q: 之前的CosineWarmup方法为什么不见了? < a class = "headerlink" href = "#q-cosinewarmup" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 我们对代码结构进行了调整, 目前的Cosine可以覆盖原有的CosineWarmup的功能, 只需要在配置文件中增加相应配置即可。
例如下面的代码, 可以设置warmup为2个epoch: < / p >
< div class = "language-text highlight" > < pre > < span > < / span > < code > < span id = "__span-0-1" > < a id = "__codelineno-0-1" name = "__codelineno-0-1" href = "#__codelineno-0-1" > < / a > lr:
< / span > < span id = "__span-0-2" > < a id = "__codelineno-0-2" name = "__codelineno-0-2" href = "#__codelineno-0-2" > < / a > name: Cosine
< / span > < span id = "__span-0-3" > < a id = "__codelineno-0-3" name = "__codelineno-0-3" href = "#__codelineno-0-3" > < / a > learning_rate: 0.001
< / span > < span id = "__span-0-4" > < a id = "__codelineno-0-4" name = "__codelineno-0-4" href = "#__codelineno-0-4" > < / a > warmup_epoch: 2
< / span > < / code > < / pre > < / div >
< h4 id = "q-warmup" > Q: 训练识别和检测时学习率要加上warmup, 目的是什么? < a class = "headerlink" href = "#q-warmup" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : Warmup机制先使学习率从一个较小的值逐步升到一个较大的值, 而不是直接就使用较大的学习率, 这样有助于模型的稳定收敛。在OCR检测和OCR识别中, 一般会带来精度~0.5%的提升。< / p >
< h4 id = "q-dygraph" > Q: 关于dygraph分支中, 文本识别模型训练, 要使用数据增强应该如何设置? < a class = "headerlink" href = "#q-dygraph" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :可以参考< a href = "../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml" > 配置文件< / a > 在< code > Train['dataset']['transforms']< / code > 添加RecAug字段, 使数据增强生效。可以通过添加对aug_prob设置, 表示每种数据增强采用的概率。aug_prob默认是0.4。详细设置可以参考< a href = "https://github.com/PaddlePaddle/PaddleOCR/issues/1744" > ISSUE 1744< / a > 。< / p >
< h4 id = "q_35" > Q: 训练过程中,训练程序意外退出/挂起,应该如何解决?< a class = "headerlink" href = "#q_35" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 考虑内存, 显存( 使用GPU训练的话) 是否不足, 可在配置文件中, 将训练和评估的batch size调小一些。需要注意, 训练batch size调小时, 学习率learning rate也要调小, 一般可按等比例调整。< / p >
< h4 id = "q-log" > Q: 训练程序启动后直到结束, 看不到训练过程log? < a class = "headerlink" href = "#q-log" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 可以从以下三方面考虑:
1. 检查训练进程是否正常退出、显存占用是否释放、是否有残留进程, 如果确定是训练程序卡死, 可以检查环境配置, 遇到环境问题建议使用docker, 可以参考说明文档< a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/doc/doc_ch/installation.md" > 安装< / a > 。
2. 检查数据集的数据量是否太小, 可调小batch size从而增加一个epoch中的训练step数量, 或在训练config文件中, 将参数print_batch_step改为1, 即每一个step打印一次log信息。
3. 如果使用私有数据集训练, 可先用PaddleOCR提供/推荐的数据集进行训练,排查私有数据集是否存在问题。< / p >
< h4 id = "q-num-workers" > Q: 配置文件中的参数num workers是什么意思, 应该如何设置? < a class = "headerlink" href = "#q-num-workers" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 训练数据的读取需要硬盘IO, 而硬盘IO速度远小于GPU运算速度, 为了避免数据读取成为训练速度瓶颈, 可以使用多进程读取数据, num workers表示数据读取的进程数量, 0表示不使用多进程读取。在Linux系统下, 多进程读取数据时, 进程间通信需要基于共享内存, 因此使用多进程读取数据时, 建议设置共享内存不低于2GB, 最好可以达到8GB, 此时, num workers可以设置为CPU核心数。如果机器硬件配置较低, 或训练进程卡死、dataloader报错, 可以将num workers设置为0, 即不使用多进程读取数据。< / p >
< h3 id = "212" > 2.12 预测< a class = "headerlink" href = "#212" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "q-paddleocrtest_batch_size_per_card1" > Q: 为什么PaddleOCR检测预测是只支持一张图片测试? 即test_batch_size_per_card=1< a class = "headerlink" href = "#q-paddleocrtest_batch_size_per_card1" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 测试的时候, 对图像等比例缩放, 最长边960, 不同图像等比例缩放后长宽不一致, 无法组成batch, 所以设置为test_batch_size为1。< / p >
< h4 id = "q-paddleocrtensorrt" > Q: PaddleOCR支持tensorrt推理吗? < a class = "headerlink" href = "#q-paddleocrtensorrt" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 支持的, 需要在编译的时候将CMakeLists.txt文件当中, 将相关代码option(WITH_TENSORRT "Compile demo with TensorRT." OFF)的OFF改成ON。关于服务器端部署的更多设置, 可以参考飞桨官网< / p >
< h4 id = "q-tensorrtpaddleocr" > Q: 如何使用TensorRT加速PaddleOCR预测? < a class = "headerlink" href = "#q-tensorrtpaddleocr" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 目前paddle的dygraph分支已经支持了python和C++ TensorRT预测的代码, python端inference预测时把参数< a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/3ec57e8df9263de6fa897e33d2d91bc5d0849ef3/tools/infer/utility.py#L37" > --use_tensorrt=True< / a > 即可,
C++TensorRT预测需要使用支持TRT的预测库并在编译时打开< a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/3ec57e8df9263de6fa897e33d2d91bc5d0849ef3/deploy/cpp_infer/tools/build.sh#L15" > -DWITH_TENSORRT=ON< / a > 。
如果想修改其他分支代码支持TensorRT预测, 可以参考< a href = "https://github.com/PaddlePaddle/PaddleOCR/pull/2921" > PR< / a > 。< / p >
< p > 注: 建议使用TensorRT大于等于6.1.0.5以上的版本。< / p >
< h4 id = "q_36" > Q: 为什么识别模型做预测的时候,预测图片的数量数量还会影响预测的精度< a class = "headerlink" href = "#q_36" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 推理时识别模型默认的batch_size=6, 如预测图片长度变化大, 可能影响预测效果。如果出现上述问题可在推理的时候设置识别bs=1, 命令如下: < / p >
< div class = "language-bash highlight" > < table class = "highlighttable" > < tr > < td class = "linenos" > < div class = "linenodiv" > < pre > < span > < / span > < span class = "normal" > < a href = "#__codelineno-1-1" > 1< / a > < / span > < / pre > < / div > < / td > < td class = "code" > < div > < pre > < span > < / span > < code > < span id = "__span-1-1" > < a id = "__codelineno-1-1" name = "__codelineno-1-1" > < / a > python3< span class = "w" > < / span > tools/infer/predict_rec.py< span class = "w" > < / span > --image_dir< span class = "o" > =< / span > < span class = "s2" > " ./doc/imgs_words/ch/word_4.jpg" < / span > < span class = "w" > < / span > --rec_model_dir< span class = "o" > =< / span > < span class = "s2" > " ./ch_PP-OCRv3_rec_infer/" < / span > < span class = "w" > < / span > --rec_batch_num< span class = "o" > =< / span > < span class = "m" > 1< / span >
< / span > < / code > < / pre > < / div > < / td > < / tr > < / table > < / div >
< h3 id = "213" > 2.13 推理部署< a class = "headerlink" href = "#213" title = "Permanent link" > ¶ < / a > < / h3 >
< h4 id = "qpaddleocr_1" > Q: PaddleOCR模型推理方式有几种? 各自的优缺点是什么< a class = "headerlink" href = "#qpaddleocr_1" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :目前推理方式支持基于训练引擎推理和基于预测引擎推理。< / p >
< p > ( 1) 基于训练引擎推理不需要转换模型, 但是需要先组网再load参数, 语言只支持python, 不适合系统集成。< / p >
< p > ( 2) 基于预测引擎的推理需要先转换模型为inference格式, 然后可以进行不需要组网的推理, 语言支持c++和python, 适合系统集成。< / p >
< h4 id = "qpaddleocrcputenorrtgpu" > Q: PaddleOCR中, 对于模型预测加速, CPU加速的途径有哪些? 基于TenorRT加速GPU对输入有什么要求? < a class = "headerlink" href = "#qpaddleocrcputenorrtgpu" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : ( 1) CPU可以使用mkldnn进行加速; 对于python inference的话, 可以把enable_mkldnn改为true, < a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/tools/infer/utility.py#L99" > 参考代码< / a > , 对于cpp inference的话, 可参考< a href = "https://github.com/PaddlePaddle/PaddleOCR/tree/dygraph/deploy/cpp_infer" > 文档< / a > < / p >
< p > ( 2) GPU需要注意变长输入问题等, TRT6 之后才支持变长输入< / p >
< h4 id = "qhubservingpdserving" > Q: hubserving、pdserving这两种部署方式区别是什么? < a class = "headerlink" href = "#qhubservingpdserving" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : hubserving原本是paddlehub的配套服务部署工具, 可以很方便的将paddlehub内置的模型部署为服务, paddleocr使用了这个功能, 并将模型路径等参数暴露出来方便用户自定义修改。paddle serving是面向所有paddle模型的部署工具, 文档中可以看到我们提供了快速版和标准版, 其中快速版和hubserving的本质是一样的, 而标准版基于rpc, 更稳定, 更适合分布式部署。< / p >
< h4 id = "q-paddle-hub-serving-imgpathimgurl" > Q: 目前paddle hub serving 只支持 imgpath, 如果我想用imgurl 去哪里改呢?< a class = "headerlink" href = "#q-paddle-hub-serving-imgpathimgurl" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :图片是在< a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/67ef25d593c4eabfaaceb22daade4577f53bed81/deploy/hubserving/ocr_system/module.py#L55" > 这里< / a > 读取的, 可以参考下面的写法, 将url path转化为np array< / p >
< div class = "language-text highlight" > < pre > < span > < / span > < code > < span id = "__span-2-1" > < a id = "__codelineno-2-1" name = "__codelineno-2-1" href = "#__codelineno-2-1" > < / a > response = request.urlopen(' http://i1.whymtj.com/uploads/tu/201902/9999/52491ae4ba.jpg' )
< / span > < span id = "__span-2-2" > < a id = "__codelineno-2-2" name = "__codelineno-2-2" href = "#__codelineno-2-2" > < / a > img_array = np.array(bytearray(response.read()), dtype=np.uint8)
< / span > < span id = "__span-2-3" > < a id = "__codelineno-2-3" name = "__codelineno-2-3" href = "#__codelineno-2-3" > < / a > img = cv.imdecode(img_array, -1)
< / span > < / code > < / pre > < / div >
< h4 id = "q-c-ocr" > Q: C++ 端侧部署可以只对OCR的检测部署吗? < a class = "headerlink" href = "#q-c-ocr" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 可以的, 识别和检测模块是解耦的。如果想对检测部署, 需要自己修改一下main函数, 只保留检测相关就可以: < a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/de3e2e7cd3b8b65ee02d7a41e570fa5b511a3c1d/deploy/cpp_infer/src/main.cpp#L72" > 参考< / a > < / p >
< h4 id = "q_37" > Q: 服务部署可以只发布文本识别, 而不带文本检测模型么? < a class = "headerlink" href = "#q_37" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 可以的。默认的服务部署是检测和识别串联预测的。也支持单独发布文本检测或文本识别模型, 比如使用PaddleHUBPaddleOCR 模型时, deploy下有三个文件夹, 分别是
ocr_det: 检测预测
ocr_rec: 识别预测
ocr_system: 检测识别串联预测< / p >
< h4 id = "q-litenb" > Q: lite预测库和nb模型版本不匹配, 该如何解决? < a class = "headerlink" href = "#q-litenb" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 如果可以正常预测就不用管, 如果这个问题导致无法正常预测, 可以尝试使用同一个commit的Paddle Lite代码编译预测库和opt文件, 可以参考< a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/release%2F2.1/deploy/lite/readme.md" > 移动端部署教程< / a > 。< / p >
< h4 id = "qpaddleocrsdk" > Q: 如何将PaddleOCR预测模型封装成SDK< a class = "headerlink" href = "#qpaddleocrsdk" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 如果是Python的话, 可以使用tools/infer/predict_system.py中的TextSystem进行sdk封装, 如果是c++的话, 可以使用deploy/cpp_infer/src下面的DBDetector和CRNNRecognizer完成封装< / p >
< h4 id = "qpaddleocrtest_batch_size_per_card1" > Q: 为什么PaddleOCR检测预测是只支持一张图片测试? 即test_batch_size_per_card=1< a class = "headerlink" href = "#qpaddleocrtest_batch_size_per_card1" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 测试的时候, 对图像等比例缩放, 最长边960, 不同图像等比例缩放后长宽不一致, 无法组成batch, 所以设置为test_batch_size为1。< / p >
< h4 id = "q_38" > Q: 为什么第一张张图预测时间很长, 第二张之后预测时间会降低? < a class = "headerlink" href = "#q_38" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > :第一张图需要显存资源初始化,耗时较多。完成模型加载后,之后的预测时间会明显缩短。< / p >
< h4 id = "q-paddle-lite" > Q: 采用Paddle-Lite进行端侧部署, 出现问题, 环境没问题< a class = "headerlink" href = "#q-paddle-lite" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 如果你的预测库是自己编译的, 那么你的nb文件也要自己编译, 用同一个lite版本。不能直接用下载的nb文件, 因为版本不同。< / p >
< h4 id = "q-paddleocr_3" > Q: 如何多进程运行paddleocr? < a class = "headerlink" href = "#q-paddleocr_3" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 实例化多个paddleocr服务, 然后将服务注册到注册中心, 之后通过注册中心统一调度即可, 关于注册中心, 可以搜索eureka了解一下具体使用, 其他的注册中心也行。< / p >
< h4 id = "q_39" > Q: 如何多进程预测?< a class = "headerlink" href = "#q_39" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 近期PaddleOCR新增了< a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/a312647be716776c1aac33ff939ae358a39e8188/tools/infer/utility.py#L103" > 多进程预测控制参数< / a > , < code > use_mp< / code > 表示是否使用多进程,< code > total_process_num< / code > 表示在使用多进程时的进程数。具体使用方式请参考< a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/doc/doc_ch/inference.md#1-%E8%B6%85%E8%BD%BB%E9%87%8F%E4%B8%AD%E6%96%87ocr%E6%A8%A1%E5%9E%8B%E6%8E%A8%E7%90%86" > 文档< / a > 。< / p >
< h4 id = "q-paddleocrt4" > Q: 怎么解决paddleOCR在T4卡上有越预测越慢的情况? < a class = "headerlink" href = "#q-paddleocrt4" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : < / p >
< ol >
< li > T4 GPU没有主动散热, 因此在测试的时候需要在每次infer之后需要sleep 30ms, 否则机器容易因为过热而降频(inference速度会变慢),温度过高也有可能会导致宕机。< / li >
< li > T4在不使用的时候, 也有可能会降频, 因此在做benchmark的时候需要锁频, 下面这两条命令可以进行锁频。< / li >
< / ol >
< div class = "language-text highlight" > < pre > < span > < / span > < code > < span id = "__span-3-1" > < a id = "__codelineno-3-1" name = "__codelineno-3-1" href = "#__codelineno-3-1" > < / a > nvidia-smi -i 0 -pm ENABLED
< / span > < span id = "__span-3-2" > < a id = "__codelineno-3-2" name = "__codelineno-3-2" href = "#__codelineno-3-2" > < / a > nvidia-smi --lock-gpu-clocks=1590 -i 0
< / span > < / code > < / pre > < / div >
< h4 id = "q-windowscpp-inferencepaddle_fluiddllopencv_world346dll" > Q: 在windows上进行cpp inference的部署时, 总是提示找不到< code > paddle_fluid.dll< / code > 和< code > opencv_world346.dll< / code > < a class = "headerlink" href = "#q-windowscpp-inferencepaddle_fluiddllopencv_world346dll" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 有2种方法可以解决这个问题: < / p >
< ol >
< li > 将paddle预测库和opencv库的地址添加到系统环境变量中。< / li >
< li > 将提示缺失的dll文件拷贝到编译产出的< code > ocr_system.exe< / code > 文件夹中。< / li >
< / ol >
< h4 id = "q-winc" > Q: win下C++部署中文识别乱码的解决方法< a class = "headerlink" href = "#q-winc" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : win下编码格式不是utf8,而ppocr_keys_v1.txt的编码格式的utf8, 将ppocr_keys_v1.txt 的编码从utf-8修改为 Ansi 编码格式就行了。< / p >
< h4 id = "q-windows-3060gpu" > Q: windows 3060显卡GPU模式启动 加载模型慢< a class = "headerlink" href = "#q-windows-3060gpu" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 30系列的显卡需要使用cuda11。< / p >
< h4 id = "qmac" > Q: 想在Mac上部署, 从哪里下载预测库呢? < a class = "headerlink" href = "#qmac" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : Mac上的Paddle预测库可以从这里下载: < a href = "https://paddle-inference-lib.bj.bcebos.com/mac/2.0.0/cpu_avx_openblas/paddle_inference.tgz" > https://paddle-inference-lib.bj.bcebos.com/mac/2.0.0/cpu_avx_openblas/paddle_inference.tgz< / a > < / p >
< h4 id = "q_40" > Q: 内网环境如何进行服务化部署呢? < a class = "headerlink" href = "#q_40" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 仍然可以使用PaddleServing或者HubServing进行服务化部署, 保证内网地址可以访问即可。< / p >
< h4 id = "q-hub_serving" > Q: 使用hub_serving部署, 延时较高, 可能的原因是什么呀? < a class = "headerlink" href = "#q-hub_serving" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 首先, 测试的时候第一张图延时较高, 可以多测试几张然后观察后几张图的速度; 其次, 如果是在cpu端部署serving端模型( 如backbone为ResNet34) , 耗时较慢, 建议在cpu端部署mobile( 如backbone为MobileNetV3) 模型。< / p >
< h4 id = "q-paddlelite" > Q: 在使用PaddleLite进行预测部署时, 启动预测后卡死/手机死机?< a class = "headerlink" href = "#q-paddlelite" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 请检查模型转换时所用PaddleLite的版本, 和预测库的版本是否对齐。即PaddleLite版本为2.8, 则预测库版本也要为2.8。< / p >
< h4 id = "q_41" > Q: 预测时显存爆炸、内存泄漏问题?< a class = "headerlink" href = "#q_41" title = "Permanent link" > ¶ < / a > < / h4 >
< p > < strong > A< / strong > : 打开显存/内存优化开关< code > enable_memory_optim< / code > 可以解决该问题,相关代码已合入,< a href = "https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/tools/infer/utility.py#L153" > 查看详情< / a > 。< / p >
< aside class = "md-source-file" >
< span class = "md-source-file__fact" >
< span class = "md-icon" title = "最后更新" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M21 13.1c-.1 0-.3.1-.4.2l-1 1 2.1 2.1 1-1c.2-.2.2-.6 0-.8l-1.3-1.3c-.1-.1-.2-.2-.4-.2m-1.9 1.8-6.1 6V23h2.1l6.1-6.1zM12.5 7v5.2l4 2.4-1 1L11 13V7zM11 21.9c-5.1-.5-9-4.8-9-9.9C2 6.5 6.5 2 12 2c5.3 0 9.6 4.1 10 9.3-.3-.1-.6-.2-1-.2s-.7.1-1 .2C19.6 7.2 16.2 4 12 4c-4.4 0-8 3.6-8 8 0 4.1 3.1 7.5 7.1 7.9l-.1.2z" / > < / svg >
< / span >
2024-12-12 07:39:44 +00:00
< span class = "git-revision-date-localized-plugin git-revision-date-localized-plugin-date" > 2024年12月12日< / span >
2024-10-24 15:00:38 +00:00
< / span >
< span class = "md-source-file__fact" >
< span class = "md-icon" title = "创建日期" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M14.47 15.08 11 13V7h1.5v5.25l3.08 1.83c-.41.28-.79.62-1.11 1m-1.39 4.84c-.36.05-.71.08-1.08.08-4.42 0-8-3.58-8-8s3.58-8 8-8 8 3.58 8 8c0 .37-.03.72-.08 1.08.69.1 1.33.32 1.92.64.1-.56.16-1.13.16-1.72 0-5.5-4.5-10-10-10S2 6.5 2 12s4.47 10 10 10c.59 0 1.16-.06 1.72-.16-.32-.59-.54-1.23-.64-1.92M18 15v3h-3v2h3v3h2v-3h3v-2h-3v-3z" / > < / svg >
< / span >
2024-12-12 07:39:44 +00:00
< span class = "git-revision-date-localized-plugin git-revision-date-localized-plugin-date" > 2024年12月12日< / span >
2024-10-24 15:00:38 +00:00
< / span >
< span class = "md-source-file__fact" >
< span class = "md-icon" title = "贡献者" >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M12 2A10 10 0 0 0 2 12c0 4.42 2.87 8.17 6.84 9.5.5.08.66-.23.66-.5v-1.69c-2.77.6-3.36-1.34-3.36-1.34-.46-1.16-1.11-1.47-1.11-1.47-.91-.62.07-.6.07-.6 1 .07 1.53 1.03 1.53 1.03.87 1.52 2.34 1.07 2.91.83.09-.65.35-1.09.63-1.34-2.22-.25-4.55-1.11-4.55-4.92 0-1.11.38-2 1.03-2.71-.1-.25-.45-1.29.1-2.64 0 0 .84-.27 2.75 1.02.79-.22 1.65-.33 2.5-.33s1.71.11 2.5.33c1.91-1.29 2.75-1.02 2.75-1.02.55 1.35.2 2.39.1 2.64.65.71 1.03 1.6 1.03 2.71 0 3.82-2.34 4.66-4.57 4.91.36.31.69.92.69 1.85V21c0 .27.16.59.67.5C19.14 20.16 22 16.42 22 12A10 10 0 0 0 12 2" / > < / svg >
< / span >
< span > GitHub< / span >
< nav >
< a href = "https://github.com/GreatV" class = "md-author" title = "@GreatV" >
< img src = "https://avatars.githubusercontent.com/u/17264618?v=4&size=72" alt = "GreatV" >
< / a >
< a href = "https://github.com/web-flow" class = "md-author" title = "@web-flow" >
< img src = "https://avatars.githubusercontent.com/u/19864447?v=4&size=72" alt = "web-flow" >
< / a >
< a href = "https://github.com/SWHL" class = "md-author" title = "@SWHL" >
< img src = "https://avatars.githubusercontent.com/u/28639377?v=4&size=72" alt = "SWHL" >
< / a >
< / nav >
< / span >
< / aside >
< h2 id = "__comments" > 评论< / h2 >
<!-- Insert generated snippet here -->
< script src = "https://giscus.app/client.js" data-repo = "PaddlePaddle/PaddleOCR"
data-repo-id="MDEwOlJlcG9zaXRvcnkyNjIyOTYxMjI=" data-category="Q& A" data-category-id="DIC_kwDOD6JSOs4COrbO"
data-mapping="pathname" data-strict="0" data-reactions-enabled="1" data-emit-metadata="0" data-input-position="top"
data-theme="preferred_color_scheme" data-lang="en" data-loading="lazy" crossorigin="anonymous" async>
< / script >
<!-- Synchronize Giscus theme with palette -->
< script >
var giscus = document.querySelector("script[src*=giscus]")
// Set palette on initial load
var palette = __md_get("__palette")
if (palette & & typeof palette.color === "object") {
var theme = palette.color.scheme === "slate"
? "transparent_dark"
: "light"
// Instruct Giscus to set theme
giscus.setAttribute("data-theme", theme)
}
// Register event handlers after documented loaded
document.addEventListener("DOMContentLoaded", function () {
var ref = document.querySelector("[data-md-component=palette]")
ref.addEventListener("change", function () {
var palette = __md_get("__palette")
if (palette & & typeof palette.color === "object") {
var theme = palette.color.scheme === "slate"
? "transparent_dark"
: "light"
// Instruct Giscus to change theme
var frame = document.querySelector(".giscus-frame")
frame.contentWindow.postMessage(
{ giscus: { setConfig: { theme } } },
"https://giscus.app"
)
}
})
})
< / script >
< / article >
< / div >
< script > var tabs = _ _md _get ( "__tabs" ) ; if ( Array . isArray ( tabs ) ) e : for ( var set of document . querySelectorAll ( ".tabbed-set" ) ) { var labels = set . querySelector ( ".tabbed-labels" ) ; for ( var tab of tabs ) for ( var label of labels . getElementsByTagName ( "label" ) ) if ( label . innerText . trim ( ) === tab ) { var input = document . getElementById ( label . htmlFor ) ; input . checked = ! 0 ; continue e } } < / script >
< script > var target = document . getElementById ( location . hash . slice ( 1 ) ) ; target && target . name && ( target . checked = target . name . startsWith ( "__tabbed_" ) ) < / script >
< / div >
< button type = "button" class = "md-top md-icon" data-md-component = "top" hidden >
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 24 24" > < path d = "M13 20h-2V8l-5.5 5.5-1.42-1.42L12 4.16l7.92 7.92-1.42 1.42L13 8z" / > < / svg >
回到页面顶部
< / button >
< / main >
< footer class = "md-footer" >
< nav class = "md-footer__inner md-grid" aria-label = "页脚" >
< a href = "datasets/kie_datasets.html" class = "md-footer__link md-footer__link--prev" aria-label = "上一页: 关键信息提取数据集" >
< div class = "md-footer__button md-icon" >
2024-12-12 07:39:44 +00:00
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 320 512" > <!-- ! Font Awesome Free 6.7.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2024 Fonticons, Inc. --> < path d = "M41.4 233.4c-12.5 12.5-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L109.3 256l137.3-137.4c12.5-12.5 12.5-32.8 0-45.3s-32.8-12.5-45.3 0l-160 160z" / > < / svg >
2024-10-24 15:00:38 +00:00
< / div >
< div class = "md-footer__title" >
< span class = "md-footer__direction" >
上一页
< / span >
< div class = "md-ellipsis" >
关键信息提取数据集
< / div >
< / div >
< / a >
< a href = "community/community_contribution.html" class = "md-footer__link md-footer__link--next" aria-label = "下一页: 社区贡献" >
< div class = "md-footer__title" >
< span class = "md-footer__direction" >
下一页
< / span >
< div class = "md-ellipsis" >
社区贡献
< / div >
< / div >
< div class = "md-footer__button md-icon" >
2024-12-12 07:39:44 +00:00
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 320 512" > <!-- ! Font Awesome Free 6.7.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2024 Fonticons, Inc. --> < path d = "M278.6 233.4c12.5 12.5 12.5 32.8 0 45.3l-160 160c-12.5 12.5-32.8 12.5-45.3 0s-12.5-32.8 0-45.3L210.7 256 73.4 118.6c-12.5-12.5-12.5-32.8 0-45.3s32.8-12.5 45.3 0l160 160z" / > < / svg >
2024-10-24 15:00:38 +00:00
< / div >
< / a >
< / nav >
< div class = "md-footer-meta md-typeset" >
< div class = "md-footer-meta__inner md-grid" >
< div class = "md-copyright" >
< div class = "md-copyright__highlight" >
Copyright © 2024 Maintained by PaddleOCR PMC.
< / div >
Made with
< a href = "https://squidfunk.github.io/mkdocs-material/" target = "_blank" rel = "noopener" >
Material for MkDocs
< / a >
< / div >
< div class = "md-social" >
< a href = "https://github.com/PaddlePaddle/PaddleOCR" target = "_blank" rel = "noopener" title = "github.com" class = "md-social__link" >
2024-12-12 07:39:44 +00:00
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 496 512" > <!-- ! Font Awesome Free 6.7.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2024 Fonticons, Inc. --> < path d = "M165.9 397.4c0 2-2.3 3.6-5.2 3.6-3.3.3-5.6-1.3-5.6-3.6 0-2 2.3-3.6 5.2-3.6 3-.3 5.6 1.3 5.6 3.6m-31.1-4.5c-.7 2 1.3 4.3 4.3 4.9 2.6 1 5.6 0 6.2-2s-1.3-4.3-4.3-5.2c-2.6-.7-5.5.3-6.2 2.3m44.2-1.7c-2.9.7-4.9 2.6-4.6 4.9.3 2 2.9 3.3 5.9 2.6 2.9-.7 4.9-2.6 4.6-4.6-.3-1.9-3-3.2-5.9-2.9M244.8 8C106.1 8 0 113.3 0 252c0 110.9 69.8 205.8 169.5 239.2 12.8 2.3 17.3-5.6 17.3-12.1 0-6.2-.3-40.4-.3-61.4 0 0-70 15-84.7-29.8 0 0-11.4-29.1-27.8-36.6 0 0-22.9-15.7 1.6-15.4 0 0 24.9 2 38.6 25.8 21.9 38.6 58.6 27.5 72.9 20.9 2.3-16 8.8-27.1 16-33.7-55.9-6.2-112.3-14.3-112.3-110.5 0-27.5 7.6-41.3 23.6-58.9-2.6-6.5-11.1-33.3 2.6-67.9 20.9-6.5 69 27 69 27 20-5.6 41.5-8.5 62.8-8.5s42.8 2.9 62.8 8.5c0 0 48.1-33.6 69-27 13.7 34.7 5.2 61.4 2.6 67.9 16 17.7 25.8 31.5 25.8 58.9 0 96.5-58.9 104.2-114.8 110.5 9.2 7.9 17 22.9 17 46.4 0 33.7-.3 75.4-.3 83.6 0 6.5 4.6 14.4 17.3 12.1C428.2 457.8 496 362.9 496 252 496 113.3 383.5 8 244.8 8M97.2 352.9c-1.3 1-1 3.3.7 5.2 1.6 1.6 3.9 2.3 5.2 1 1.3-1 1-3.3-.7-5.2-1.6-1.6-3.9-2.3-5.2-1m-10.8-8.1c-.7 1.3.3 2.9 2.3 3.9 1.6 1 3.6.7 4.3-.7.7-1.3-.3-2.9-2.3-3.9-2-.6-3.6-.3-4.3.7m32.4 35.6c-1.6 1.3-1 4.3 1.3 6.2 2.3 2.3 5.2 2.6 6.5 1 1.3-1.3.7-4.3-1.3-6.2-2.2-2.3-5.2-2.6-6.5-1m-11.4-14.7c-1.6 1-1.6 3.6 0 5.9s4.3 3.3 5.6 2.3c1.6-1.3 1.6-3.9 0-6.2-1.4-2.3-4-3.3-5.6-2" / > < / svg >
2024-10-24 15:00:38 +00:00
< / a >
< a href = "https://pypi.org/project/paddleocr/" target = "_blank" rel = "noopener" title = "pypi.org" class = "md-social__link" >
2024-12-12 07:39:44 +00:00
< svg xmlns = "http://www.w3.org/2000/svg" viewBox = "0 0 448 512" > <!-- ! Font Awesome Free 6.7.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2024 Fonticons, Inc. --> < path d = "M439.8 200.5c-7.7-30.9-22.3-54.2-53.4-54.2h-40.1v47.4c0 36.8-31.2 67.8-66.8 67.8H172.7c-29.2 0-53.4 25-53.4 54.3v101.8c0 29 25.2 46 53.4 54.3 33.8 9.9 66.3 11.7 106.8 0 26.9-7.8 53.4-23.5 53.4-54.3v-40.7H226.2v-13.6h160.2c31.1 0 42.6-21.7 53.4-54.2 11.2-33.5 10.7-65.7 0-108.6M286.2 404c11.1 0 20.1 9.1 20.1 20.3 0 11.3-9 20.4-20.1 20.4-11 0-20.1-9.2-20.1-20.4.1-11.3 9.1-20.3 20.1-20.3M167.8 248.1h106.8c29.7 0 53.4-24.5 53.4-54.3V91.9c0-29-24.4-50.7-53.4-55.6-35.8-5.9-74.7-5.6-106.8.1-45.2 8-53.4 24.7-53.4 55.6v40.7h106.9v13.6h-147c-31.1 0-58.3 18.7-66.8 54.2-9.8 40.7-10.2 66.1 0 108.6 7.6 31.6 25.7 54.2 56.8 54.2H101v-48.8c0-35.3 30.5-66.4 66.8-66.4m-6.7-142.6c-11.1 0-20.1-9.1-20.1-20.3.1-11.3 9-20.4 20.1-20.4 11 0 20.1 9.2 20.1 20.4s-9 20.3-20.1 20.3" / > < / svg >
2024-10-24 15:00:38 +00:00
< / a >
< / div >
< / div >
< / div >
< / footer >
< / div >
< div class = "md-dialog" data-md-component = "dialog" >
< div class = "md-dialog__inner md-typeset" > < / div >
< / div >
< script id = "__config" type = "application/json" > { "base" : "." , "features" : [ "announce.dismiss" , "content.tooltips" , "content.code.copy" , "content.tabs.link" , "content.footnote.tooltips" , "content.action.edit" , "content.action.view" , "navigation.expand" , "navigation.tabs" , "navigation.tabs.sticky" , "navigation.top" , "navigation.tracking" , "navigation.footer" , "navigation.indexes" , "search.highlight" , "search.share" , "search.suggest" , "toc.follow" ] , "search" : "assets/javascripts/workers/search.6ce7567c.min.js" , "translations" : { "clipboard.copied" : "\u5df2\u590d\u5236" , "clipboard.copy" : "\u590d\u5236" , "search.result.more.one" : "\u5728\u8be5\u9875\u4e0a\u8fd8\u6709 1 \u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u7ed3\u679c" , "search.result.more.other" : "\u5728\u8be5\u9875\u4e0a\u8fd8\u6709 # \u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u7ed3\u679c" , "search.result.none" : "\u6ca1\u6709\u627e\u5230\u7b26\u5408\u6761\u4ef6\u7684\u7ed3\u679c" , "search.result.one" : "\u627e\u5230 1 \u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u7ed3\u679c" , "search.result.other" : "# \u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u7ed3\u679c" , "search.result.placeholder" : "\u952e\u5165\u4ee5\u5f00\u59cb\u641c\u7d22" , "search.result.term.missing" : "\u7f3a\u5c11" , "select.version" : "\u9009\u62e9\u5f53\u524d\u7248\u672c" } , "version" : { "provider" : "mike" } } < / script >
< script src = "assets/javascripts/bundle.83f73b43.min.js" > < / script >
< script src = "javascripts/katex.min.js" > < / script >
< script src = "https://unpkg.com/katex@0/dist/katex.min.js" > < / script >
< script src = "https://unpkg.com/katex@0/dist/contrib/auto-render.min.js" > < / script >
< / body >
< / html >