meimeiking 4 anos atrás
commit
0bc8c84788
100 arquivos alterados com 3664 adições e 0 exclusões
  1. 22 0
      .idea/PaperCut.iml
  2. 4 0
      .idea/encodings.xml
  3. 7 0
      .idea/misc.xml
  4. 8 0
      .idea/modules.xml
  5. 693 0
      .idea/workspace.xml
  6. 15 0
      222.py
  7. 6 0
      CCPN/.gitignore
  8. 11 0
      CCPN/3.html
  9. 19 0
      CCPN/LICENSE
  10. 103 0
      CCPN/README.md
  11. 0 0
      CCPN/basenet/__init__.py
  12. 73 0
      CCPN/basenet/vgg16_bn.py
  13. 85 0
      CCPN/craft.py
  14. 256 0
      CCPN/craft_utils.py
  15. BIN
      CCPN/data/1.png
  16. BIN
      CCPN/figures/craft_example.gif
  17. 76 0
      CCPN/file_utils.py
  18. 70 0
      CCPN/imgproc.py
  19. 65 0
      CCPN/refinenet.py
  20. 5 0
      CCPN/requirements.txt
  21. 171 0
      CCPN/test.py
  22. 190 0
      CCPN/textbbx.py
  23. 31 0
      DeepModel.py
  24. 36 0
      Detection.py
  25. 326 0
      Near.py
  26. 16 0
      TextModel.py
  27. BIN
      __pycache__/DeepModel.cpython-36.pyc
  28. BIN
      __pycache__/Near.cpython-36.pyc
  29. BIN
      __pycache__/TextModel.cpython-36.pyc
  30. BIN
      __pycache__/dev_image.cpython-36.pyc
  31. BIN
      __pycache__/image_tools.cpython-36.pyc
  32. BIN
      __pycache__/neighbor.cpython-36.pyc
  33. BIN
      __pycache__/ocrapi.cpython-36.pyc
  34. BIN
      __pycache__/tools.cpython-36.pyc
  35. 94 0
      app.py
  36. 118 0
      base64crnn.py
  37. BIN
      bbb.png
  38. BIN
      binary.png
  39. 0 0
      crnn/__init__.py
  40. BIN
      crnn/__pycache__/__init__.cpython-36.pyc
  41. BIN
      crnn/__pycache__/crnn.cpython-36.pyc
  42. BIN
      crnn/__pycache__/dataset.cpython-36.pyc
  43. BIN
      crnn/__pycache__/keys_crnn.cpython-36.pyc
  44. BIN
      crnn/__pycache__/util.cpython-36.pyc
  45. 23 0
      crnn/app.py
  46. 110 0
      crnn/crnn.py
  47. 132 0
      crnn/dataset.py
  48. 1 0
      crnn/keys_crnn.py
  49. 0 0
      crnn/models/__init__.py
  50. BIN
      crnn/models/__pycache__/__init__.cpython-36.pyc
  51. BIN
      crnn/models/__pycache__/crnn.cpython-36.pyc
  52. BIN
      crnn/models/__pycache__/utils.cpython-36.pyc
  53. 84 0
      crnn/models/crnn.py
  54. 13 0
      crnn/models/utils.py
  55. BIN
      crnn/samples/model_acc97.pth
  56. BIN
      crnn/samples/netCRNN_143_16500.pth
  57. BIN
      crnn/samples/netCRNN_1952_247000.pth
  58. BIN
      crnn/samples/netCRNN_474_58000.pth
  59. BIN
      crnn/samples/netCRNN_61_134500.pth
  60. 41 0
      crnn/test.py
  61. 103 0
      crnn/util.py
  62. 121 0
      dev_image.py
  63. BIN
      gray.png
  64. BIN
      image_dir/01.jpg
  65. BIN
      image_dir/06.jpg
  66. BIN
      image_dir/1.jpg
  67. BIN
      image_dir/1.png
  68. BIN
      image_dir/11.jpg
  69. BIN
      image_dir/17.png
  70. BIN
      image_dir/18.png
  71. BIN
      image_dir/19.png
  72. BIN
      image_dir/2.png
  73. BIN
      image_dir/202004232018_0001.jpg
  74. BIN
      image_dir/29.png
  75. BIN
      image_dir/3.png
  76. BIN
      image_dir/34.png
  77. BIN
      image_dir/35.png
  78. BIN
      image_dir/4.jpg
  79. BIN
      image_dir/5.png
  80. BIN
      image_dir/6.png
  81. BIN
      image_dir/666.png
  82. BIN
      image_dir/7.png
  83. BIN
      image_dir/9_LAF40AXGZ37ZN.png
  84. BIN
      image_dir/QQ20200427170437.png
  85. BIN
      image_dir/c982aa7130d493db76fec10a918e8c6.png
  86. BIN
      image_dir/latex.png
  87. BIN
      image_dir/png
  88. BIN
      image_dir/timmmmmmmmmmmmmmmmmmmmmmm.png
  89. 132 0
      image_generator.py
  90. 84 0
      image_tools.py
  91. 287 0
      neighbor.py
  92. 24 0
      ocrapi.py
  93. 9 0
      readme.md
  94. BIN
      result/image/129-150-45-107.png
  95. BIN
      result/image/129-208-576-779.png
  96. BIN
      result/image/129-323-44-563.png
  97. BIN
      result/image/360-470-238-553.png
  98. BIN
      result/image/397-471-561-770.png
  99. BIN
      result/image/399-471-44-236.png
  100. BIN
      result/text_img/10-38-43-989.png

+ 22 - 0
.idea/PaperCut.iml

@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="PYTHON_MODULE" version="4">
+  <component name="NewModuleRootManager">
+    <content url="file://$MODULE_DIR$">
+      <sourceFolder url="file://$MODULE_DIR$/CCPN" isTestSource="false" />
+      <sourceFolder url="file://$MODULE_DIR$/crnn" isTestSource="false" />
+    </content>
+    <orderEntry type="jdk" jdkName="Python 3.6" jdkType="Python SDK" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+  <component name="TemplatesService">
+    <option name="TEMPLATE_CONFIGURATION" value="Jinja2" />
+    <option name="TEMPLATE_FOLDERS">
+      <list>
+        <option value="$MODULE_DIR$/templates" />
+      </list>
+    </option>
+  </component>
+  <component name="TestRunnerService">
+    <option name="PROJECT_TEST_RUNNER" value="Unittests" />
+  </component>
+</module>

+ 4 - 0
.idea/encodings.xml

@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="Encoding" addBOMForNewFiles="with NO BOM" />
+</project>

+ 7 - 0
.idea/misc.xml

@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="JavaScriptSettings">
+    <option name="languageLevel" value="ES6" />
+  </component>
+  <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6" project-jdk-type="Python SDK" />
+</project>

+ 8 - 0
.idea/modules.xml

@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectModuleManager">
+    <modules>
+      <module fileurl="file://$PROJECT_DIR$/.idea/PaperCut.iml" filepath="$PROJECT_DIR$/.idea/PaperCut.iml" />
+    </modules>
+  </component>
+</project>

+ 693 - 0
.idea/workspace.xml

@@ -0,0 +1,693 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ChangeListManager">
+    <list default="true" id="e8f4bf68-87b2-47fe-aa7b-653542aa7b3c" name="Default Changelist" comment="" />
+    <option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
+    <option name="SHOW_DIALOG" value="false" />
+    <option name="HIGHLIGHT_CONFLICTS" value="true" />
+    <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
+    <option name="LAST_RESOLUTION" value="IGNORE" />
+  </component>
+  <component name="CoverageDataManager">
+    <SUITE FILE_PATH="coverage/PaperCut$x1.coverage" NAME="x1 Coverage Results" MODIFIED="1586249694371" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/tmp" />
+    <SUITE FILE_PATH="coverage/PaperCut$_2.coverage" NAME=".2 Coverage Results" MODIFIED="1584932495692" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
+    <SUITE FILE_PATH="coverage/PaperCut$tools.coverage" NAME="tools Coverage Results" MODIFIED="1585807308214" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
+    <SUITE FILE_PATH="coverage/PaperCut$ocrapi.coverage" NAME="ocrapi Coverage Results" MODIFIED="1585620538595" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
+    <SUITE FILE_PATH="coverage/PaperCut$dev_image.coverage" NAME="dev_image Coverage Results" MODIFIED="1585559336020" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
+    <SUITE FILE_PATH="coverage/PaperCut$keys_crnn.coverage" NAME="keys_crnn Coverage Results" MODIFIED="1585045122005" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/crnn" />
+    <SUITE FILE_PATH="coverage/PaperCut$page.coverage" NAME="page Coverage Results" MODIFIED="1586243779154" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/tmp" />
+    <SUITE FILE_PATH="coverage/PaperCut$app.coverage" NAME="app Coverage Results" MODIFIED="1588230936295" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
+  </component>
+  <component name="DatabaseView">
+    <option name="SHOW_INTERMEDIATE" value="true" />
+    <option name="GROUP_DATA_SOURCES" value="true" />
+    <option name="GROUP_SCHEMA" value="true" />
+    <option name="GROUP_CONTENTS" value="false" />
+    <option name="SORT_POSITIONED" value="false" />
+    <option name="SHOW_EMPTY_GROUPS" value="false" />
+    <option name="AUTO_SCROLL_FROM_SOURCE" value="false" />
+    <option name="HIDDEN_KINDS">
+      <set />
+    </option>
+    <expand />
+    <select />
+  </component>
+  <component name="FileEditorManager">
+    <leaf SIDE_TABS_SIZE_LIMIT_KEY="300">
+      <file pinned="false" current-in-tab="true">
+        <entry file="file://$PROJECT_DIR$/app.py">
+          <provider selected="true" editor-type-id="text-editor">
+            <state relative-caret-position="-734">
+              <caret line="47" column="12" selection-start-line="47" selection-start-column="12" selection-end-line="47" selection-end-column="12" />
+            </state>
+          </provider>
+        </entry>
+      </file>
+      <file pinned="false" current-in-tab="false">
+        <entry file="file://$PROJECT_DIR$/TextModel.py">
+          <provider selected="true" editor-type-id="text-editor">
+            <state>
+              <caret column="10" selection-start-column="10" selection-end-column="10" />
+            </state>
+          </provider>
+        </entry>
+      </file>
+      <file pinned="false" current-in-tab="false">
+        <entry file="file://$PROJECT_DIR$/tools.py">
+          <provider selected="true" editor-type-id="text-editor">
+            <state relative-caret-position="90">
+              <caret line="3" column="27" selection-start-line="3" selection-start-column="27" selection-end-line="3" selection-end-column="27" />
+            </state>
+          </provider>
+        </entry>
+      </file>
+      <file pinned="false" current-in-tab="false">
+        <entry file="file://$PROJECT_DIR$/CCPN/textbbx.py">
+          <provider selected="true" editor-type-id="text-editor">
+            <state relative-caret-position="5010">
+              <caret line="190" selection-start-line="190" selection-end-line="190" />
+            </state>
+          </provider>
+        </entry>
+      </file>
+      <file pinned="false" current-in-tab="false">
+        <entry file="file://$PROJECT_DIR$/222.py">
+          <provider selected="true" editor-type-id="text-editor">
+            <state relative-caret-position="420">
+              <caret line="15" selection-start-line="15" selection-end-line="15" />
+              <folding>
+                <element signature="e#23#42#0" expanded="true" />
+              </folding>
+            </state>
+          </provider>
+        </entry>
+      </file>
+      <file pinned="false" current-in-tab="false">
+        <entry file="file://$PROJECT_DIR$/crnn/app.py">
+          <provider selected="true" editor-type-id="text-editor">
+            <state>
+              <folding>
+                <element signature="e#0#46#0" expanded="true" />
+              </folding>
+            </state>
+          </provider>
+        </entry>
+      </file>
+      <file pinned="false" current-in-tab="false">
+        <entry file="file://$PROJECT_DIR$/crnn/crnn.py">
+          <provider selected="true" editor-type-id="text-editor">
+            <state relative-caret-position="30">
+              <caret line="15" column="10" selection-start-line="15" selection-start-column="6" selection-end-line="15" selection-end-column="10" />
+            </state>
+          </provider>
+        </entry>
+      </file>
+      <file pinned="false" current-in-tab="false">
+        <entry file="file://$PROJECT_DIR$/dev_image.py">
+          <provider selected="true" editor-type-id="text-editor">
+            <state relative-caret-position="150">
+              <caret line="5" column="24" selection-start-line="5" selection-start-column="24" selection-end-line="5" selection-end-column="24" />
+            </state>
+          </provider>
+        </entry>
+      </file>
+      <file pinned="false" current-in-tab="false">
+        <entry file="file://$PROJECT_DIR$/CCPN/3.html">
+          <provider selected="true" editor-type-id="text-editor" />
+        </entry>
+      </file>
+      <file pinned="false" current-in-tab="false">
+        <entry file="file://$PROJECT_DIR$/CCPN/refinenet.py">
+          <provider selected="true" editor-type-id="text-editor" />
+        </entry>
+      </file>
+    </leaf>
+  </component>
+  <component name="FileTemplateManagerImpl">
+    <option name="RECENT_TEMPLATES">
+      <list>
+        <option value="HTML File" />
+        <option value="Python Script" />
+      </list>
+    </option>
+  </component>
+  <component name="FindInProjectRecents">
+    <findStrings>
+      <find>get_range</find>
+      <find>ranges</find>
+      <find>np.sum(i</find>
+      <find>_find_right</find>
+      <find>find_top</find>
+      <find>change_location</find>
+    </findStrings>
+  </component>
+  <component name="IdeDocumentHistory">
+    <option name="CHANGED_PATHS">
+      <list>
+        <option value="$PROJECT_DIR$/templates/2.html" />
+        <option value="$PROJECT_DIR$/templates/3.html" />
+        <option value="$PROJECT_DIR$/templates/word.html" />
+        <option value="$PROJECT_DIR$/templates/jeitu.html" />
+        <option value="$PROJECT_DIR$/CRAFT-pytorch/textbbx.py" />
+        <option value="$PROJECT_DIR$/templates/show.html" />
+        <option value="$PROJECT_DIR$/.2.py" />
+        <option value="$PROJECT_DIR$/Detection.py" />
+        <option value="$PROJECT_DIR$/neighbor.py" />
+        <option value="$PROJECT_DIR$/dev_image.py" />
+        <option value="$PROJECT_DIR$/ocrapi.py" />
+        <option value="$PROJECT_DIR$/tmp/page.py" />
+        <option value="$PROJECT_DIR$/tmp/x1.py" />
+        <option value="$PROJECT_DIR$/tmp/charpoint.py" />
+        <option value="$PROJECT_DIR$/tmp/araw.py" />
+        <option value="$PROJECT_DIR$/Near.py" />
+        <option value="$PROJECT_DIR$/app.py" />
+        <option value="$PROJECT_DIR$/tools.py" />
+        <option value="$PROJECT_DIR$/image_tools.py" />
+        <option value="$PROJECT_DIR$/222.py" />
+        <option value="$PROJECT_DIR$/crnn/crnn.py" />
+      </list>
+    </option>
+  </component>
+  <component name="ProjectFrameBounds">
+    <option name="x" value="10" />
+    <option name="width" value="1696" />
+    <option name="height" value="1026" />
+  </component>
+  <component name="ProjectView">
+    <navigator proportions="" version="1">
+      <foldersAlwaysOnTop value="true" />
+    </navigator>
+    <panes>
+      <pane id="Scope" />
+      <pane id="ProjectPane">
+        <subPane>
+          <expand>
+            <path>
+              <item name="PaperCut" type="b2602c69:ProjectViewProjectNode" />
+              <item name="PaperCut" type="462c0819:PsiDirectoryNode" />
+            </path>
+          </expand>
+          <select />
+        </subPane>
+      </pane>
+    </panes>
+  </component>
+  <component name="PropertiesComponent">
+    <property name="DefaultHtmlFileTemplate" value="HTML File" />
+    <property name="WebServerToolWindowFactoryState" value="false" />
+    <property name="last_opened_file_path" value="$PROJECT_DIR$" />
+    <property name="nodejs_interpreter_path.stuck_in_default_project" value="undefined stuck path" />
+    <property name="nodejs_npm_path_reset_for_default_project" value="true" />
+    <property name="settings.editor.selected.configurable" value="com.jetbrains.python.templateLanguages.PyTemplateLanguageModulesConfigurable" />
+  </component>
+  <component name="RecentsManager">
+    <key name="CopyFile.RECENT_KEYS">
+      <recent name="D:\PaperCut" />
+      <recent name="D:\PaperCut\image_dir" />
+      <recent name="D:\PaperCut\static" />
+    </key>
+  </component>
+  <component name="RunDashboard">
+    <option name="ruleStates">
+      <list>
+        <RuleState>
+          <option name="name" value="ConfigurationTypeDashboardGroupingRule" />
+        </RuleState>
+        <RuleState>
+          <option name="name" value="StatusDashboardGroupingRule" />
+        </RuleState>
+      </list>
+    </option>
+  </component>
+  <component name="RunManager" selected="Python.app">
+    <configuration name="app" type="PythonConfigurationType" factoryName="Python" temporary="true">
+      <module name="PaperCut" />
+      <option name="INTERPRETER_OPTIONS" value="" />
+      <option name="PARENT_ENVS" value="true" />
+      <envs>
+        <env name="PYTHONUNBUFFERED" value="1" />
+      </envs>
+      <option name="SDK_HOME" value="" />
+      <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
+      <option name="IS_MODULE_SDK" value="true" />
+      <option name="ADD_CONTENT_ROOTS" value="true" />
+      <option name="ADD_SOURCE_ROOTS" value="true" />
+      <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
+      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/app.py" />
+      <option name="PARAMETERS" value="" />
+      <option name="SHOW_COMMAND_LINE" value="false" />
+      <option name="EMULATE_TERMINAL" value="false" />
+      <option name="MODULE_MODE" value="false" />
+      <option name="REDIRECT_INPUT" value="false" />
+      <option name="INPUT_FILE" value="" />
+      <method v="2" />
+    </configuration>
+    <configuration name="ocrapi" type="PythonConfigurationType" factoryName="Python" temporary="true">
+      <module name="PaperCut" />
+      <option name="INTERPRETER_OPTIONS" value="" />
+      <option name="PARENT_ENVS" value="true" />
+      <envs>
+        <env name="PYTHONUNBUFFERED" value="1" />
+      </envs>
+      <option name="SDK_HOME" value="" />
+      <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
+      <option name="IS_MODULE_SDK" value="true" />
+      <option name="ADD_CONTENT_ROOTS" value="true" />
+      <option name="ADD_SOURCE_ROOTS" value="true" />
+      <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
+      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/ocrapi.py" />
+      <option name="PARAMETERS" value="" />
+      <option name="SHOW_COMMAND_LINE" value="false" />
+      <option name="EMULATE_TERMINAL" value="false" />
+      <option name="MODULE_MODE" value="false" />
+      <option name="REDIRECT_INPUT" value="false" />
+      <option name="INPUT_FILE" value="" />
+      <method v="2" />
+    </configuration>
+    <configuration name="page" type="PythonConfigurationType" factoryName="Python" temporary="true">
+      <module name="PaperCut" />
+      <option name="INTERPRETER_OPTIONS" value="" />
+      <option name="PARENT_ENVS" value="true" />
+      <envs>
+        <env name="PYTHONUNBUFFERED" value="1" />
+      </envs>
+      <option name="SDK_HOME" value="" />
+      <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/tmp" />
+      <option name="IS_MODULE_SDK" value="true" />
+      <option name="ADD_CONTENT_ROOTS" value="true" />
+      <option name="ADD_SOURCE_ROOTS" value="true" />
+      <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
+      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/tmp/page.py" />
+      <option name="PARAMETERS" value="" />
+      <option name="SHOW_COMMAND_LINE" value="false" />
+      <option name="EMULATE_TERMINAL" value="false" />
+      <option name="MODULE_MODE" value="false" />
+      <option name="REDIRECT_INPUT" value="false" />
+      <option name="INPUT_FILE" value="" />
+      <method v="2" />
+    </configuration>
+    <configuration name="tools" type="PythonConfigurationType" factoryName="Python" temporary="true">
+      <module name="PaperCut" />
+      <option name="INTERPRETER_OPTIONS" value="" />
+      <option name="PARENT_ENVS" value="true" />
+      <envs>
+        <env name="PYTHONUNBUFFERED" value="1" />
+      </envs>
+      <option name="SDK_HOME" value="" />
+      <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
+      <option name="IS_MODULE_SDK" value="true" />
+      <option name="ADD_CONTENT_ROOTS" value="true" />
+      <option name="ADD_SOURCE_ROOTS" value="true" />
+      <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
+      <option name="SCRIPT_NAME" value="D:\PaperCut\base64crnn.py" />
+      <option name="PARAMETERS" value="" />
+      <option name="SHOW_COMMAND_LINE" value="false" />
+      <option name="EMULATE_TERMINAL" value="false" />
+      <option name="MODULE_MODE" value="false" />
+      <option name="REDIRECT_INPUT" value="false" />
+      <option name="INPUT_FILE" value="" />
+      <method v="2" />
+    </configuration>
+    <configuration name="x1" type="PythonConfigurationType" factoryName="Python" temporary="true">
+      <module name="PaperCut" />
+      <option name="INTERPRETER_OPTIONS" value="" />
+      <option name="PARENT_ENVS" value="true" />
+      <envs>
+        <env name="PYTHONUNBUFFERED" value="1" />
+      </envs>
+      <option name="SDK_HOME" value="" />
+      <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/tmp" />
+      <option name="IS_MODULE_SDK" value="true" />
+      <option name="ADD_CONTENT_ROOTS" value="true" />
+      <option name="ADD_SOURCE_ROOTS" value="true" />
+      <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
+      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/tmp/x1.py" />
+      <option name="PARAMETERS" value="" />
+      <option name="SHOW_COMMAND_LINE" value="false" />
+      <option name="EMULATE_TERMINAL" value="false" />
+      <option name="MODULE_MODE" value="false" />
+      <option name="REDIRECT_INPUT" value="false" />
+      <option name="INPUT_FILE" value="" />
+      <method v="2" />
+    </configuration>
+    <recent_temporary>
+      <list>
+        <item itemvalue="Python.app" />
+        <item itemvalue="Python.x1" />
+        <item itemvalue="Python.page" />
+        <item itemvalue="Python.tools" />
+        <item itemvalue="Python.ocrapi" />
+      </list>
+    </recent_temporary>
+  </component>
+  <component name="SvnConfiguration">
+    <configuration />
+  </component>
+  <component name="TaskManager">
+    <task active="true" id="Default" summary="Default task">
+      <changelist id="e8f4bf68-87b2-47fe-aa7b-653542aa7b3c" name="Default Changelist" comment="" />
+      <created>1584929454870</created>
+      <option name="number" value="Default" />
+      <option name="presentableId" value="Default" />
+      <updated>1584929454870</updated>
+      <workItem from="1584929456015" duration="8101000" />
+      <workItem from="1585014584228" duration="4164000" />
+      <workItem from="1585101698129" duration="4059000" />
+      <workItem from="1585303164159" duration="6000" />
+      <workItem from="1585552634245" duration="7974000" />
+      <workItem from="1585618060596" duration="12394000" />
+      <workItem from="1585704538316" duration="4036000" />
+      <workItem from="1585791771961" duration="3943000" />
+      <workItem from="1585877121532" duration="3305000" />
+      <workItem from="1585899151472" duration="593000" />
+      <workItem from="1585902527402" duration="619000" />
+      <workItem from="1586223211002" duration="10272000" />
+      <workItem from="1586309157784" duration="2496000" />
+      <workItem from="1586395583676" duration="592000" />
+      <workItem from="1586482072842" duration="589000" />
+      <workItem from="1586747677645" duration="588000" />
+      <workItem from="1586828175919" duration="578000" />
+      <workItem from="1587550294357" duration="1018000" />
+      <workItem from="1587605226320" duration="1182000" />
+      <workItem from="1587608148359" duration="1988000" />
+      <workItem from="1587692949637" duration="772000" />
+      <workItem from="1587977020112" duration="2481000" />
+      <workItem from="1588038248992" duration="4100000" />
+      <workItem from="1588066458210" duration="591000" />
+      <workItem from="1588124876283" duration="400000" />
+      <workItem from="1588228670456" duration="5170000" />
+      <workItem from="1588728721256" duration="599000" />
+      <workItem from="1588814609756" duration="914000" />
+    </task>
+    <servers />
+  </component>
+  <component name="TimeTrackingManager">
+    <option name="totallyTimeSpent" value="83524000" />
+  </component>
+  <component name="ToolWindowManager">
+    <frame x="10" y="0" width="1696" height="1026" extended-state="0" />
+    <editor active="true" />
+    <layout>
+      <window_info active="true" content_ui="combo" id="Project" order="0" visible="true" weight="0.19559902" />
+      <window_info id="Structure" order="1" side_tool="true" weight="0.25" />
+      <window_info id="Favorites" order="2" side_tool="true" />
+      <window_info anchor="bottom" id="Message" order="0" />
+      <window_info anchor="bottom" id="Find" order="1" />
+      <window_info anchor="bottom" id="Run" order="2" visible="true" weight="0.343785" />
+      <window_info anchor="bottom" id="Debug" order="3" weight="0.39977604" />
+      <window_info anchor="bottom" id="Cvs" order="4" weight="0.25" />
+      <window_info anchor="bottom" id="Inspection" order="5" weight="0.4" />
+      <window_info anchor="bottom" id="TODO" order="6" />
+      <window_info anchor="bottom" id="Docker" order="7" show_stripe_button="false" />
+      <window_info anchor="bottom" id="Version Control" order="8" />
+      <window_info anchor="bottom" id="Database Changes" order="9" />
+      <window_info anchor="bottom" id="Event Log" order="10" side_tool="true" />
+      <window_info anchor="bottom" id="Terminal" order="11" />
+      <window_info anchor="bottom" id="Python Console" order="12" />
+      <window_info anchor="right" id="Commander" internal_type="SLIDING" order="0" type="SLIDING" weight="0.4" />
+      <window_info anchor="right" id="Ant Build" order="1" weight="0.25" />
+      <window_info anchor="right" content_ui="combo" id="Hierarchy" order="2" weight="0.25" />
+      <window_info anchor="right" id="SciView" order="3" />
+      <window_info anchor="right" id="Database" order="4" visible="true" weight="0.10635697" />
+    </layout>
+  </component>
+  <component name="TypeScriptGeneratedFilesManager">
+    <option name="version" value="1" />
+  </component>
+  <component name="editorHistoryManager">
+    <entry file="file://$PROJECT_DIR$/static/html2canvas.js">
+      <provider selected="true" editor-type-id="text-editor" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/gray.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/.2.py" />
+    <entry file="file://$PROJECT_DIR$/image_dir/7.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/image_dir/34.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/image_dir/35.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/image_dir/666.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/image_dir/2.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/image_dir/3.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/image_dir/1.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/ret.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/image_dir/17.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/image_dir/6.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$USER_HOME$/.PyCharm2018.3/system/python_stubs/1382460654/cv2/cv2/__init__.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="780">
+          <caret line="7258" column="4" selection-start-line="7258" selection-start-column="4" selection-end-line="7258" selection-end-column="4" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/templates/upload.html">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="90">
+          <caret line="3" selection-start-line="3" selection-end-line="6" selection-end-column="7" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/tmp/word_count.py">
+      <provider selected="true" editor-type-id="text-editor" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/tmp/char64d.txt">
+      <provider selected="true" editor-type-id="LargeFileEditor" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/templates/jeitu.html">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="2610">
+          <caret line="87" column="25" selection-start-line="87" selection-start-column="25" selection-end-line="87" selection-end-column="25" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/image_generator.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="2160">
+          <caret line="77" column="4" selection-start-line="77" selection-start-column="4" selection-end-line="77" selection-end-column="4" />
+          <folding>
+            <element signature="e#0#10#0" expanded="true" />
+          </folding>
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/templates/show.html">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="720">
+          <caret line="24" selection-start-line="24" selection-end-line="29" selection-end-column="16" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/tmp/araw.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="420">
+          <caret line="14" selection-start-line="14" selection-end-line="14" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/tmp/x1.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="610">
+          <caret line="59" column="39" selection-start-line="59" selection-start-column="39" selection-end-line="59" selection-end-column="39" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/tmp/charpoint.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="870">
+          <caret line="29" column="34" selection-start-line="29" selection-start-column="34" selection-end-line="29" selection-end-column="34" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/tmp/g.py">
+      <provider selected="true" editor-type-id="text-editor" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/tmp/fidn.py">
+      <provider selected="true" editor-type-id="text-editor" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/tmp/DataBase.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="8460">
+          <caret line="288" selection-start-line="288" selection-end-line="288" />
+          <folding>
+            <element signature="e#24#54#0" expanded="true" />
+          </folding>
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/tmp/x.py">
+      <provider selected="true" editor-type-id="text-editor" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/tmp/page.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="240">
+          <caret line="8" column="5401" selection-start-line="8" selection-start-column="5401" selection-end-line="8" selection-end-column="5468" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/tmp/mydraw.py">
+      <provider selected="true" editor-type-id="text-editor" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/ocrapi.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="132">
+          <caret line="14" selection-start-line="14" selection-end-line="14" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/image_dir/4.jpg">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/base64crnn.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="-124">
+          <caret line="92" column="20" selection-start-line="92" selection-start-column="20" selection-end-line="92" selection-end-column="20" />
+          <folding>
+            <element signature="e#0#13#0" expanded="true" />
+          </folding>
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/bbb.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/binary.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/Detection.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="120">
+          <caret line="4" selection-start-line="4" selection-end-line="4" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/image_dir/timmmmmmmmmmmmmmmmmmmmmmm.png">
+      <provider selected="true" editor-type-id="images" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/neighbor.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="450">
+          <caret line="18" column="25" selection-start-line="18" selection-start-column="25" selection-end-line="18" selection-end-column="25" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/DeepModel.py">
+      <provider selected="true" editor-type-id="text-editor" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/image_tools.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="2490">
+          <caret line="83" column="30" selection-start-line="83" selection-start-column="30" selection-end-line="83" selection-end-column="30" />
+          <folding>
+            <element signature="e#23#34#0" expanded="true" />
+          </folding>
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/Near.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="3690">
+          <caret line="123" selection-start-line="123" selection-end-line="123" />
+          <folding>
+            <element signature="e#23#41#0" expanded="true" />
+          </folding>
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/CCPN/craft.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="1727">
+          <caret line="81" selection-start-line="81" selection-end-line="84" selection-end-column="23" />
+          <folding>
+            <element signature="e#86#98#0" expanded="true" />
+          </folding>
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/TextModel.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state>
+          <caret column="10" selection-start-column="10" selection-end-column="10" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/tools.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="90">
+          <caret line="3" column="27" selection-start-line="3" selection-start-column="27" selection-end-line="3" selection-end-column="27" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/CCPN/textbbx.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="5010">
+          <caret line="190" selection-start-line="190" selection-end-line="190" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/222.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="420">
+          <caret line="15" selection-start-line="15" selection-end-line="15" />
+          <folding>
+            <element signature="e#23#42#0" expanded="true" />
+          </folding>
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/crnn/app.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state>
+          <folding>
+            <element signature="e#0#46#0" expanded="true" />
+          </folding>
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/dev_image.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="150">
+          <caret line="5" column="24" selection-start-line="5" selection-start-column="24" selection-end-line="5" selection-end-column="24" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/CCPN/3.html">
+      <provider selected="true" editor-type-id="text-editor" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/CCPN/refinenet.py">
+      <provider selected="true" editor-type-id="text-editor" />
+    </entry>
+    <entry file="file://$PROJECT_DIR$/crnn/crnn.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="30">
+          <caret line="15" column="10" selection-start-line="15" selection-start-column="6" selection-end-line="15" selection-end-column="10" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/app.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="-734">
+          <caret line="47" column="12" selection-start-line="47" selection-start-column="12" selection-end-line="47" selection-end-column="12" />
+        </state>
+      </provider>
+    </entry>
+  </component>
+</project>

+ 15 - 0
222.py

@@ -0,0 +1,15 @@
+# -*- coding:utf-8 -*-
+from app import app
+from flask import request
+app.test_request_context('/basic').push()#激活请求上下文
+from flask import current_app
+app.app_context().push()#激活程序上下文
+
+
+#定义LoginForm类
+from wtforms import Form, StringField,PasswordField,BooleanField,SelectField
+from wtforms.validators import DataRequired,length
+class LoginForm(Form):
+     username = StringField('Username', validators=[DataRequired()])
+     password = PasswordField('Password',validators=[DataRequired(),length(8,128)])
+

+ 6 - 0
CCPN/.gitignore

@@ -0,0 +1,6 @@
+*.pyc
+*.swp
+*.pkl
+*.pth
+result*
+weights*

+ 11 - 0
CCPN/3.html

@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+    <meta charset="UTF-8">
+    <title>Title</title>
+</head>
+<body>
+
+<div style="ma"></div>
+</body>
+</html>

+ 19 - 0
CCPN/LICENSE

@@ -0,0 +1,19 @@
+Copyright (c) 2019-present NAVER Corp.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 103 - 0
CCPN/README.md

@@ -0,0 +1,103 @@
+## CRAFT: Character-Region Awareness For Text detection
+Official Pytorch implementation of CRAFT text detector | [Paper](https://arxiv.org/abs/1904.01941) | [Pretrained Model](https://drive.google.com/open?id=1Jk4eGD7crsqCCg9C9VjCLkMN3ze8kutZ) | [Supplementary](https://youtu.be/HI8MzpY8KMI)
+
+**[Youngmin Baek](mailto:youngmin.baek@navercorp.com), Bado Lee, Dongyoon Han, Sangdoo Yun, Hwalsuk Lee.**
+ 
+Clova AI Research, NAVER Corp.
+
+### Sample Results
+
+### Overview
+PyTorch implementation for CRAFT text detector that effectively detect text area by exploring each character region and affinity between characters. The bounding box of texts are obtained by simply finding minimum bounding rectangles on binary map after thresholding character region and affinity scores. 
+
+<img width="1000" alt="teaser" src="./figures/craft_example.gif">
+
+## Updates
+**13 Jun, 2019**: Initial update
+**20 Jul, 2019**: Added post-processing for polygon result
+**28 Sep, 2019**: Added the trained model on IC15 and the link refiner
+
+
+## Getting started
+### Install dependencies
+#### Requirements
+- PyTorch>=0.4.1
+- torchvision>=0.2.1
+- opencv-python>=3.4.2
+- check requiremtns.txt
+```
+pip install -r requirements.txt
+```
+
+### Training
+The code for training is not included in this repository, and we cannot release the full training code for IP reason.
+
+
+### Test instruction using pretrained model
+- Download the trained models
+ 
+ *Model name* | *Used datasets* | *Languages* | *Purpose* | *Model Link* |
+ | :--- | :--- | :--- | :--- | :--- |
+General | SynthText, IC13, IC17 | Eng + MLT | For general purpose | [Click](https://drive.google.com/open?id=1Jk4eGD7crsqCCg9C9VjCLkMN3ze8kutZ)
+IC15 | SynthText, IC15 | Eng | For IC15 only | [Click](https://drive.google.com/open?id=1i2R7UIUqmkUtF0jv_3MXTqmQ_9wuAnLf)
+LinkRefiner | CTW1500 | - | Used with the General Model | [Click](https://drive.google.com/open?id=1XSaFwBkOaFOdtk4Ane3DFyJGPRw6v5bO)
+
+* Run with pretrained model
+``` (with python 3.7)
+python test.py --trained_model=[weightfile] --test_folder=[folder path to test images]
+```
+
+The result image and socre maps will be saved to `./result` by default.
+
+### Arguments
+* `--trained_model`: pretrained model
+* `--text_threshold`: text confidence threshold
+* `--low_text`: text low-bound score
+* `--link_threshold`: link confidence threshold
+* `--cuda`: use cuda for inference (default:True)
+* `--canvas_size`: max image size for inference
+* `--mag_ratio`: image magnification ratio
+* `--poly`: enable polygon type result
+* `--show_time`: show processing time
+* `--test_folder`: folder path to input images
+* `--refine`: use link refiner for sentense-level dataset
+* `--refiner_model`: pretrained refiner model
+
+
+## Links
+- WebDemo : https://demo.ocr.clova.ai/
+- Repo of recognition : https://github.com/clovaai/deep-text-recognition-benchmark
+
+## Citation
+```
+@inproceedings{baek2019character,
+  title={Character Region Awareness for Text Detection},
+  author={Baek, Youngmin and Lee, Bado and Han, Dongyoon and Yun, Sangdoo and Lee, Hwalsuk},
+  booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
+  pages={9365--9374},
+  year={2019}
+}
+```
+
+## License
+```
+Copyright (c) 2019-present NAVER Corp.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+```

+ 0 - 0
CCPN/basenet/__init__.py


+ 73 - 0
CCPN/basenet/vgg16_bn.py

@@ -0,0 +1,73 @@
+from collections import namedtuple
+
+import torch
+import torch.nn as nn
+import torch.nn.init as init
+from torchvision import models
+from torchvision.models.vgg import model_urls
+
+def init_weights(modules):
+    for m in modules:
+        if isinstance(m, nn.Conv2d):
+            init.xavier_uniform_(m.weight.data)
+            if m.bias is not None:
+                m.bias.data.zero_()
+        elif isinstance(m, nn.BatchNorm2d):
+            m.weight.data.fill_(1)
+            m.bias.data.zero_()
+        elif isinstance(m, nn.Linear):
+            m.weight.data.normal_(0, 0.01)
+            m.bias.data.zero_()
+
+class vgg16_bn(torch.nn.Module):
+    def __init__(self, pretrained=True, freeze=True):
+        super(vgg16_bn, self).__init__()
+        model_urls['vgg16_bn'] = model_urls['vgg16_bn'].replace('https://', 'http://')
+        vgg_pretrained_features = models.vgg16_bn(pretrained=pretrained).features
+        self.slice1 = torch.nn.Sequential()
+        self.slice2 = torch.nn.Sequential()
+        self.slice3 = torch.nn.Sequential()
+        self.slice4 = torch.nn.Sequential()
+        self.slice5 = torch.nn.Sequential()
+        for x in range(12):         # conv2_2
+            self.slice1.add_module(str(x), vgg_pretrained_features[x])
+        for x in range(12, 19):         # conv3_3
+            self.slice2.add_module(str(x), vgg_pretrained_features[x])
+        for x in range(19, 29):         # conv4_3
+            self.slice3.add_module(str(x), vgg_pretrained_features[x])
+        for x in range(29, 39):         # conv5_3
+            self.slice4.add_module(str(x), vgg_pretrained_features[x])
+
+        # fc6, fc7 without atrous conv
+        self.slice5 = torch.nn.Sequential(
+                nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
+                nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6),
+                nn.Conv2d(1024, 1024, kernel_size=1)
+        )
+
+        if not pretrained:
+            init_weights(self.slice1.modules())
+            init_weights(self.slice2.modules())
+            init_weights(self.slice3.modules())
+            init_weights(self.slice4.modules())
+
+        init_weights(self.slice5.modules())        # no pretrained model for fc6 and fc7
+
+        if freeze:
+            for param in self.slice1.parameters():      # only first conv
+                param.requires_grad= False
+
+    def forward(self, X):
+        h = self.slice1(X)
+        h_relu2_2 = h
+        h = self.slice2(h)
+        h_relu3_2 = h
+        h = self.slice3(h)
+        h_relu4_3 = h
+        h = self.slice4(h)
+        h_relu5_3 = h
+        h = self.slice5(h)
+        h_fc7 = h
+        vgg_outputs = namedtuple("VggOutputs", ['fc7', 'relu5_3', 'relu4_3', 'relu3_2', 'relu2_2'])
+        out = vgg_outputs(h_fc7, h_relu5_3, h_relu4_3, h_relu3_2, h_relu2_2)
+        return out

+ 85 - 0
CCPN/craft.py

@@ -0,0 +1,85 @@
+"""  
+Copyright (c) 2019-present NAVER Corp.
+MIT License
+"""
+
+# -*- coding: utf-8 -*-
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from basenet.vgg16_bn import vgg16_bn, init_weights
+
+class double_conv(nn.Module):
+    def __init__(self, in_ch, mid_ch, out_ch):
+        super(double_conv, self).__init__()
+        self.conv = nn.Sequential(
+            nn.Conv2d(in_ch + mid_ch, mid_ch, kernel_size=1),
+            nn.BatchNorm2d(mid_ch),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(mid_ch, out_ch, kernel_size=3, padding=1),
+            nn.BatchNorm2d(out_ch),
+            nn.ReLU(inplace=True)
+        )
+
+    def forward(self, x):
+        x = self.conv(x)
+        return x
+
+
+class CRAFT(nn.Module):
+    def __init__(self, pretrained=False, freeze=False):
+        super(CRAFT, self).__init__()
+
+        """ Base network """
+        self.basenet = vgg16_bn(pretrained, freeze)
+
+        """ U network """
+        self.upconv1 = double_conv(1024, 512, 256)
+        self.upconv2 = double_conv(512, 256, 128)
+        self.upconv3 = double_conv(256, 128, 64)
+        self.upconv4 = double_conv(128, 64, 32)
+
+        num_class = 2
+        self.conv_cls = nn.Sequential(
+            nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True),
+            nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True),
+            nn.Conv2d(32, 16, kernel_size=3, padding=1), nn.ReLU(inplace=True),
+            nn.Conv2d(16, 16, kernel_size=1), nn.ReLU(inplace=True),
+            nn.Conv2d(16, num_class, kernel_size=1),
+        )
+
+        init_weights(self.upconv1.modules())
+        init_weights(self.upconv2.modules())
+        init_weights(self.upconv3.modules())
+        init_weights(self.upconv4.modules())
+        init_weights(self.conv_cls.modules())
+        
+    def forward(self, x):
+        """ Base network """
+        sources = self.basenet(x)
+
+        """ U network """
+        y = torch.cat([sources[0], sources[1]], dim=1)
+        y = self.upconv1(y)
+
+        y = F.interpolate(y, size=sources[2].size()[2:], mode='bilinear', align_corners=False)
+        y = torch.cat([y, sources[2]], dim=1)
+        y = self.upconv2(y)
+
+        y = F.interpolate(y, size=sources[3].size()[2:], mode='bilinear', align_corners=False)
+        y = torch.cat([y, sources[3]], dim=1)
+        y = self.upconv3(y)
+
+        y = F.interpolate(y, size=sources[4].size()[2:], mode='bilinear', align_corners=False)
+        y = torch.cat([y, sources[4]], dim=1)
+        feature = self.upconv4(y)
+
+        y = self.conv_cls(feature)
+
+        return y.permute(0,2,3,1), feature
+
+if __name__ == '__main__':
+    model = CRAFT(pretrained=True).cuda()
+    output, _ = model(torch.randn(1, 3, 768, 768).cuda())
+    print(output.shape)

+ 256 - 0
CCPN/craft_utils.py

@@ -0,0 +1,256 @@
+"""  
+Copyright (c) 2019-present NAVER Corp.
+MIT License
+"""
+
+# -*- coding: utf-8 -*-
+import numpy as np
+import cv2
+import math
+
+""" auxilary functions """
+
+
+# unwarp corodinates
+def warpCoord(Minv, pt):
+    out = np.matmul(Minv, (pt[0], pt[1], 1))
+    return np.array([out[0] / out[2], out[1] / out[2]])
+
+
+""" end of auxilary functions """
+
+
+def getDetBoxes_core(textmap, linkmap, text_threshold, link_threshold, low_text):
+    # prepare data
+    linkmap = linkmap.copy()
+    textmap = textmap.copy()
+    img_h, img_w = textmap.shape
+
+    """ labeling method """
+    ret, text_score = cv2.threshold(textmap, low_text, 1, 0)
+    ret, link_score = cv2.threshold(linkmap, link_threshold, 1, 0)
+
+    text_score_comb = np.clip(text_score + link_score, 0, 1)
+    nLabels, labels, stats, centroids = cv2.connectedComponentsWithStats(text_score_comb.astype(np.uint8),
+                                                                         connectivity=4)
+
+    det = []
+    mapper = []
+    for k in range(1, nLabels):
+        # size filtering
+        size = stats[k, cv2.CC_STAT_AREA]
+        if size < 10: continue
+
+        # thresholding
+        if np.max(textmap[labels == k]) < text_threshold: continue
+
+        # make segmentation map
+        segmap = np.zeros(textmap.shape, dtype=np.uint8)
+        segmap[labels == k] = 255
+        segmap[np.logical_and(link_score == 1, text_score == 0)] = 0  # remove link area
+        x, y = stats[k, cv2.CC_STAT_LEFT], stats[k, cv2.CC_STAT_TOP]
+        w, h = stats[k, cv2.CC_STAT_WIDTH], stats[k, cv2.CC_STAT_HEIGHT]
+        niter = int(math.sqrt(size * min(w, h) / (w * h)) * 2)
+        sx, ex, sy, ey = x - niter, x + w + niter + 1, y - niter, y + h + niter + 1
+        # boundary check
+        if sx < 0: sx = 0
+        if sy < 0: sy = 0
+        if ex >= img_w: ex = img_w
+        if ey >= img_h: ey = img_h
+        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1 + niter, 1 + niter))
+        segmap[sy:ey, sx:ex] = cv2.dilate(segmap[sy:ey, sx:ex], kernel)
+
+        # make box
+        np_contours = np.roll(np.array(np.where(segmap != 0)), 1, axis=0).transpose().reshape(-1, 2)
+        rectangle = cv2.minAreaRect(np_contours)
+        box = cv2.boxPoints(rectangle)
+
+        # align diamond-shape
+        w, h = np.linalg.norm(box[0] - box[1]), np.linalg.norm(box[1] - box[2])
+        box_ratio = max(w, h) / (min(w, h) + 1e-5)
+        if abs(1 - box_ratio) <= 0.1:
+            l, r = min(np_contours[:, 0]), max(np_contours[:, 0])
+            t, b = min(np_contours[:, 1]), max(np_contours[:, 1])
+            box = np.array([[l, t], [r, t], [r, b], [l, b]], dtype=np.float32)
+
+        # make clock-wise order
+        startidx = box.sum(axis=1).argmin()
+        box = np.roll(box, 4 - startidx, 0)
+        box = np.array(box)
+
+        det.append(box)
+        mapper.append(k)
+
+    return det, labels, mapper
+
+
+def getPoly_core(boxes, labels, mapper, linkmap):
+    # configs
+    num_cp = 5
+    max_len_ratio = 0.7
+    expand_ratio = 1.45
+    max_r = 2.0
+    step_r = 0.2
+
+    polys = []
+    for k, box in enumerate(boxes):
+        # size filter for small instance
+        w, h = int(np.linalg.norm(box[0] - box[1]) + 1), int(np.linalg.norm(box[1] - box[2]) + 1)
+        if w < 10 or h < 10:
+            polys.append(None);
+            continue
+
+        # warp image
+        tar = np.float32([[0, 0], [w, 0], [w, h], [0, h]])
+        M = cv2.getPerspectiveTransform(box, tar)
+        word_label = cv2.warpPerspective(labels, M, (w, h), flags=cv2.INTER_NEAREST)
+        try:
+            Minv = np.linalg.inv(M)
+        except:
+            polys.append(None);
+            continue
+
+        # binarization for selected label
+        cur_label = mapper[k]
+        word_label[word_label != cur_label] = 0
+        word_label[word_label > 0] = 1
+
+        """ Polygon generation """
+        # find top/bottom contours
+        cp = []
+        max_len = -1
+        for i in range(w):
+            region = np.where(word_label[:, i] != 0)[0]
+            if len(region) < 2: continue
+            cp.append((i, region[0], region[-1]))
+            length = region[-1] - region[0] + 1
+            if length > max_len: max_len = length
+
+        # pass if max_len is similar to h
+        if h * max_len_ratio < max_len:
+            polys.append(None);
+            continue
+
+        # get pivot points with fixed length
+        tot_seg = num_cp * 2 + 1
+        seg_w = w / tot_seg  # segment width
+        pp = [None] * num_cp  # init pivot points
+        cp_section = [[0, 0]] * tot_seg
+        seg_height = [0] * num_cp
+        seg_num = 0
+        num_sec = 0
+        prev_h = -1
+        for i in range(0, len(cp)):
+            (x, sy, ey) = cp[i]
+            if (seg_num + 1) * seg_w <= x and seg_num <= tot_seg:
+                # average previous segment
+                if num_sec == 0: break
+                cp_section[seg_num] = [cp_section[seg_num][0] / num_sec, cp_section[seg_num][1] / num_sec]
+                num_sec = 0
+
+                # reset variables
+                seg_num += 1
+                prev_h = -1
+
+            # accumulate center points
+            cy = (sy + ey) * 0.5
+            cur_h = ey - sy + 1
+            cp_section[seg_num] = [cp_section[seg_num][0] + x, cp_section[seg_num][1] + cy]
+            num_sec += 1
+
+            if seg_num % 2 == 0: continue  # No polygon area
+
+            if prev_h < cur_h:
+                pp[int((seg_num - 1) / 2)] = (x, cy)
+                seg_height[int((seg_num - 1) / 2)] = cur_h
+                prev_h = cur_h
+
+        # processing last segment
+        if num_sec != 0:
+            cp_section[-1] = [cp_section[-1][0] / num_sec, cp_section[-1][1] / num_sec]
+
+        # pass if num of pivots is not sufficient or segment widh is smaller than character height 
+        if None in pp or seg_w < np.max(seg_height) * 0.25:
+            polys.append(None);
+            continue
+
+        # calc median maximum of pivot points
+        half_char_h = np.median(seg_height) * expand_ratio / 2
+
+        # calc gradiant and apply to make horizontal pivots
+        new_pp = []
+        for i, (x, cy) in enumerate(pp):
+            dx = cp_section[i * 2 + 2][0] - cp_section[i * 2][0]
+            dy = cp_section[i * 2 + 2][1] - cp_section[i * 2][1]
+            if dx == 0:  # gradient if zero
+                new_pp.append([x, cy - half_char_h, x, cy + half_char_h])
+                continue
+            rad = - math.atan2(dy, dx)
+            c, s = half_char_h * math.cos(rad), half_char_h * math.sin(rad)
+            new_pp.append([x - s, cy - c, x + s, cy + c])
+
+        # get edge points to cover character heatmaps
+        isSppFound, isEppFound = False, False
+        grad_s = (pp[1][1] - pp[0][1]) / (pp[1][0] - pp[0][0]) + (pp[2][1] - pp[1][1]) / (pp[2][0] - pp[1][0])
+        grad_e = (pp[-2][1] - pp[-1][1]) / (pp[-2][0] - pp[-1][0]) + (pp[-3][1] - pp[-2][1]) / (pp[-3][0] - pp[-2][0])
+        for r in np.arange(0.5, max_r, step_r):
+            dx = 2 * half_char_h * r
+            if not isSppFound:
+                line_img = np.zeros(word_label.shape, dtype=np.uint8)
+                dy = grad_s * dx
+                p = np.array(new_pp[0]) - np.array([dx, dy, dx, dy])
+                cv2.line(line_img, (int(p[0]), int(p[1])), (int(p[2]), int(p[3])), 1, thickness=1)
+                if np.sum(np.logical_and(word_label, line_img)) == 0 or r + 2 * step_r >= max_r:
+                    spp = p
+                    isSppFound = True
+            if not isEppFound:
+                line_img = np.zeros(word_label.shape, dtype=np.uint8)
+                dy = grad_e * dx
+                p = np.array(new_pp[-1]) + np.array([dx, dy, dx, dy])
+                cv2.line(line_img, (int(p[0]), int(p[1])), (int(p[2]), int(p[3])), 1, thickness=1)
+                if np.sum(np.logical_and(word_label, line_img)) == 0 or r + 2 * step_r >= max_r:
+                    epp = p
+                    isEppFound = True
+            if isSppFound and isEppFound:
+                break
+
+        # pass if boundary of polygon is not found
+        if not (isSppFound and isEppFound):
+            polys.append(None);
+            continue
+
+        # make final polygon
+        poly = []
+        poly.append(warpCoord(Minv, (spp[0], spp[1])))
+        for p in new_pp:
+            poly.append(warpCoord(Minv, (p[0], p[1])))
+        poly.append(warpCoord(Minv, (epp[0], epp[1])))
+        poly.append(warpCoord(Minv, (epp[2], epp[3])))
+        for p in reversed(new_pp):
+            poly.append(warpCoord(Minv, (p[2], p[3])))
+        poly.append(warpCoord(Minv, (spp[2], spp[3])))
+
+        # add to final result
+        polys.append(np.array(poly))
+
+    return polys
+
+
+def getDetBoxes(textmap, linkmap, text_threshold, link_threshold, low_text, poly=False):
+    boxes, labels, mapper = getDetBoxes_core(textmap, linkmap, text_threshold, link_threshold, low_text)
+
+    if poly:
+        polys = getPoly_core(boxes, labels, mapper, linkmap)
+    else:
+        polys = [None] * len(boxes)
+
+    return boxes, polys
+
+
+def adjustResultCoordinates(polys, ratio_w, ratio_h, ratio_net=2):
+    if len(polys) > 0:
+        polys = np.array(polys)
+        for k in range(len(polys)):
+            if polys[k] is not None:
+                polys[k] *= (ratio_w * ratio_net, ratio_h * ratio_net)
+    return polys

BIN
CCPN/data/1.png


BIN
CCPN/figures/craft_example.gif


+ 76 - 0
CCPN/file_utils.py

@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+import os
+import numpy as np
+import cv2
+import imgproc
+
+# borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/utils.py
+def get_files(img_dir):
+    imgs, masks, xmls = list_files(img_dir)
+    return imgs, masks, xmls
+
+def list_files(in_path):
+    img_files = []
+    mask_files = []
+    gt_files = []
+    for (dirpath, dirnames, filenames) in os.walk(in_path):
+        for file in filenames:
+            filename, ext = os.path.splitext(file)
+            ext = str.lower(ext)
+            if ext == '.jpg' or ext == '.jpeg' or ext == '.gif' or ext == '.png' or ext == '.pgm':
+                img_files.append(os.path.join(dirpath, file))
+            elif ext == '.bmp':
+                mask_files.append(os.path.join(dirpath, file))
+            elif ext == '.xml' or ext == '.gt' or ext == '.txt':
+                gt_files.append(os.path.join(dirpath, file))
+            elif ext == '.zip':
+                continue
+    # img_files.sort()
+    # mask_files.sort()
+    # gt_files.sort()
+    return img_files, mask_files, gt_files
+
+def saveResult(img_file, img, boxes, dirname='./result/', verticals=None, texts=None):
+        """ save text detection result one by one
+        Args:
+            img_file (str): image file name
+            img (array): raw image context
+            boxes (array): array of result file
+                Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output
+        Return:
+            None
+        """
+        img = np.array(img)
+
+        # make result file list
+        filename, file_ext = os.path.splitext(os.path.basename(img_file))
+
+        # result directory
+        res_file = dirname + "res_" + filename + '.txt'
+        res_img_file = dirname + "res_" + filename + '.jpg'
+
+        if not os.path.isdir(dirname):
+            os.mkdir(dirname)
+
+        with open(res_file, 'w') as f:
+            for i, box in enumerate(boxes):
+                poly = np.array(box).astype(np.int32).reshape((-1))
+                strResult = ','.join([str(p) for p in poly]) + '\r\n'
+                f.write(strResult)
+
+                poly = poly.reshape(-1, 2)
+                cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2)
+                ptColor = (0, 255, 255)
+                if verticals is not None:
+                    if verticals[i]:
+                        ptColor = (255, 0, 0)
+
+                if texts is not None:
+                    font = cv2.FONT_HERSHEY_SIMPLEX
+                    font_scale = 0.5
+                    cv2.putText(img, "{}".format(texts[i]), (poly[0][0]+1, poly[0][1]+1), font, font_scale, (0, 0, 0), thickness=1)
+                    cv2.putText(img, "{}".format(texts[i]), tuple(poly[0]), font, font_scale, (0, 255, 255), thickness=1)
+
+        # Save result image
+        cv2.imwrite(res_img_file, img)
+

+ 70 - 0
CCPN/imgproc.py

@@ -0,0 +1,70 @@
+"""  
+Copyright (c) 2019-present NAVER Corp.
+MIT License
+"""
+
+# -*- coding: utf-8 -*-
+import numpy as np
+from skimage import io
+import cv2
+
+def loadImage(img_file):
+    img = io.imread(img_file)           # RGB order
+    if img.shape[0] == 2: img = img[0]
+    if len(img.shape) == 2 : img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
+    if img.shape[2] == 4:   img = img[:,:,:3]
+    img = np.array(img)
+
+    return img
+
+def normalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)):
+    # should be RGB order
+    img = in_img.copy().astype(np.float32)
+
+    img -= np.array([mean[0] * 255.0, mean[1] * 255.0, mean[2] * 255.0], dtype=np.float32)
+    img /= np.array([variance[0] * 255.0, variance[1] * 255.0, variance[2] * 255.0], dtype=np.float32)
+    return img
+
+def denormalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)):
+    # should be RGB order
+    img = in_img.copy()
+    img *= variance
+    img += mean
+    img *= 255.0
+    img = np.clip(img, 0, 255).astype(np.uint8)
+    return img
+
+def resize_aspect_ratio(img, square_size, interpolation, mag_ratio=1):
+    height, width, channel = img.shape
+
+    # magnify image size
+    target_size = mag_ratio * max(height, width)
+
+    # set original image size
+    if target_size > square_size:
+        target_size = square_size
+    
+    ratio = target_size / max(height, width)    
+
+    target_h, target_w = int(height * ratio), int(width * ratio)
+    proc = cv2.resize(img, (target_w, target_h), interpolation = interpolation)
+
+
+    # make canvas and paste image
+    target_h32, target_w32 = target_h, target_w
+    if target_h % 32 != 0:
+        target_h32 = target_h + (32 - target_h % 32)
+    if target_w % 32 != 0:
+        target_w32 = target_w + (32 - target_w % 32)
+    resized = np.zeros((target_h32, target_w32, channel), dtype=np.float32)
+    resized[0:target_h, 0:target_w, :] = proc
+    target_h, target_w = target_h32, target_w32
+
+    size_heatmap = (int(target_w/2), int(target_h/2))
+
+    return resized, ratio, size_heatmap
+
+def cvt2HeatmapImg(img):
+    img = (np.clip(img, 0, 1) * 255).astype(np.uint8)
+    img = cv2.applyColorMap(img, cv2.COLORMAP_JET)
+    return img

+ 65 - 0
CCPN/refinenet.py

@@ -0,0 +1,65 @@
+"""  
+Copyright (c) 2019-present NAVER Corp.
+MIT License
+"""
+
+# -*- coding: utf-8 -*-
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.autograd import Variable
+from basenet.vgg16_bn import init_weights
+
+
+class RefineNet(nn.Module):
+    def __init__(self):
+        super(RefineNet, self).__init__()
+
+        self.last_conv = nn.Sequential(
+            nn.Conv2d(34, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True),
+            nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True),
+            nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True)
+        )
+
+        self.aspp1 = nn.Sequential(
+            nn.Conv2d(64, 128, kernel_size=3, dilation=6, padding=6), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
+            nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
+            nn.Conv2d(128, 1, kernel_size=1)
+        )
+
+        self.aspp2 = nn.Sequential(
+            nn.Conv2d(64, 128, kernel_size=3, dilation=12, padding=12), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
+            nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
+            nn.Conv2d(128, 1, kernel_size=1)
+        )
+
+        self.aspp3 = nn.Sequential(
+            nn.Conv2d(64, 128, kernel_size=3, dilation=18, padding=18), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
+            nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
+            nn.Conv2d(128, 1, kernel_size=1)
+        )
+
+        self.aspp4 = nn.Sequential(
+            nn.Conv2d(64, 128, kernel_size=3, dilation=24, padding=24), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
+            nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
+            nn.Conv2d(128, 1, kernel_size=1)
+        )
+
+        init_weights(self.last_conv.modules())
+        init_weights(self.aspp1.modules())
+        init_weights(self.aspp2.modules())
+        init_weights(self.aspp3.modules())
+        init_weights(self.aspp4.modules())
+
+    def forward(self, y, upconv4):
+        refine = torch.cat([y.permute(0,3,1,2), upconv4], dim=1)
+        refine = self.last_conv(refine)
+
+        aspp1 = self.aspp1(refine)
+        aspp2 = self.aspp2(refine)
+        aspp3 = self.aspp3(refine)
+        aspp4 = self.aspp4(refine)
+
+        #out = torch.add([aspp1, aspp2, aspp3, aspp4], dim=1)
+        out = aspp1 + aspp2 + aspp3 + aspp4
+        return out.permute(0, 2, 3, 1)  # , refine.permute(0,2,3,1)

+ 5 - 0
CCPN/requirements.txt

@@ -0,0 +1,5 @@
+torch==0.4.1.post2
+torchvision==0.2.1
+opencv-python==3.4.2.17
+scikit-image==0.14.2
+scipy==1.1.0

+ 171 - 0
CCPN/test.py

@@ -0,0 +1,171 @@
+"""  
+Copyright (c) 2019-present NAVER Corp.
+MIT License
+"""
+
+# -*- coding: utf-8 -*-
+import sys
+import os
+import time
+import argparse
+
+import torch
+import torch.nn as nn
+import torch.backends.cudnn as cudnn
+from torch.autograd import Variable
+
+from PIL import Image
+
+import cv2
+from skimage import io
+import numpy as np
+import craft_utils
+import imgproc
+import file_utils
+import json
+import zipfile
+
+from craft import CRAFT
+
+from collections import OrderedDict
+def copyStateDict(state_dict):
+    if list(state_dict.keys())[0].startswith("module"):
+        start_idx = 1
+    else:
+        start_idx = 0
+    new_state_dict = OrderedDict()
+    for k, v in state_dict.items():
+        name = ".".join(k.split(".")[start_idx:])
+        new_state_dict[name] = v
+    return new_state_dict
+
+def str2bool(v):
+    return v.lower() in ("yes", "y", "true", "t", "1")
+
+parser = argparse.ArgumentParser(description='CRAFT Text Detection')
+parser.add_argument('--trained_model', default='weights/craft_mlt_25k.pth', type=str, help='pretrained model')
+parser.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold')
+parser.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')
+parser.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold')
+parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda for inference')
+parser.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')
+parser.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')
+parser.add_argument('--poly', default=False, action='store_true', help='enable polygon type')
+parser.add_argument('--show_time', default=False, action='store_true', help='show processing time')
+parser.add_argument('--test_folder', default='./data/', type=str, help='folder path to input images')
+parser.add_argument('--refine', default=False, action='store_true', help='enable link refiner')
+parser.add_argument('--refiner_model', default='weights/craft_refiner_CTW1500.pth', type=str, help='pretrained refiner model')
+
+args = parser.parse_args()
+
+
+""" For test images in a folder """
+image_list, _, _ = file_utils.get_files(args.test_folder)
+
+result_folder = './result/'
+if not os.path.isdir(result_folder):
+    os.mkdir(result_folder)
+
+def test_net(net, image, text_threshold, link_threshold, low_text, cuda, poly, refine_net=None):
+    t0 = time.time()
+
+    # resize
+    img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, args.canvas_size, interpolation=cv2.INTER_LINEAR, mag_ratio=args.mag_ratio)
+    ratio_h = ratio_w = 1 / target_ratio
+
+    # preprocessing
+    x = imgproc.normalizeMeanVariance(img_resized)
+    x = torch.from_numpy(x).permute(2, 0, 1)    # [h, w, c] to [c, h, w]
+    x = Variable(x.unsqueeze(0))                # [c, h, w] to [b, c, h, w]
+    if cuda:
+        x = x.cuda()
+
+    # forward pass
+    with torch.no_grad():
+        y, feature = net(x)
+
+    # make score and link map
+    score_text = y[0,:,:,0].cpu().data.numpy()
+    score_link = y[0,:,:,1].cpu().data.numpy()
+
+    # refine link
+    if refine_net is not None:
+        with torch.no_grad():
+            y_refiner = refine_net(y, feature)
+        score_link = y_refiner[0,:,:,0].cpu().data.numpy()
+
+    t0 = time.time() - t0
+    t1 = time.time()
+
+    # Post-processing
+    boxes, polys = craft_utils.getDetBoxes(score_text, score_link, text_threshold, link_threshold, low_text, poly)
+
+    # coordinate adjustment
+    boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h)
+    polys = craft_utils.adjustResultCoordinates(polys, ratio_w, ratio_h)
+    for k in range(len(polys)):
+        if polys[k] is None: polys[k] = boxes[k]
+
+    t1 = time.time() - t1
+
+    # render results (optional)
+    render_img = score_text.copy()
+    render_img = np.hstack((render_img, score_link))
+    ret_score_text = imgproc.cvt2HeatmapImg(render_img)
+
+    if args.show_time : print("\ninfer/postproc time : {:.3f}/{:.3f}".format(t0, t1))
+
+    return boxes, polys, ret_score_text
+
+
+
+if __name__ == '__main__':
+    # load net
+    net = CRAFT()     # initialize
+
+    print('Loading weights from checkpoint (' + args.trained_model + ')')
+    if args.cuda:
+        net.load_state_dict(copyStateDict(torch.load(args.trained_model)))
+    else:
+        net.load_state_dict(copyStateDict(torch.load(args.trained_model, map_location='cpu')))
+
+    if args.cuda:
+        net = net.cuda()
+        net = torch.nn.DataParallel(net)
+        cudnn.benchmark = False
+
+    net.eval()
+
+    # LinkRefiner
+    refine_net = None
+    if args.refine:
+        from refinenet import RefineNet
+        refine_net = RefineNet()
+        print('Loading weights of refiner from checkpoint (' + args.refiner_model + ')')
+        if args.cuda:
+            refine_net.load_state_dict(copyStateDict(torch.load(args.refiner_model)))
+            refine_net = refine_net.cuda()
+            refine_net = torch.nn.DataParallel(refine_net)
+        else:
+            refine_net.load_state_dict(copyStateDict(torch.load(args.refiner_model, map_location='cpu')))
+
+        refine_net.eval()
+        args.poly = True
+
+    t = time.time()
+
+    # load data
+    for k, image_path in enumerate(image_list):
+        print("Test image {:d}/{:d}: {:s}".format(k+1, len(image_list), image_path), end='\r')
+        image = imgproc.loadImage(image_path)
+
+        bboxes, polys, score_text = test_net(net, image, args.text_threshold, args.link_threshold, args.low_text, args.cuda, args.poly, refine_net)
+
+        # save score text
+        filename, file_ext = os.path.splitext(os.path.basename(image_path))
+        mask_file = result_folder + "/res_" + filename + '_mask.jpg'
+        cv2.imwrite(mask_file, score_text)
+
+        file_utils.saveResult(image_path, image[:,:,::-1], polys, dirname=result_folder)
+
+    print("elapsed time : {}s".format(time.time() - t))

+ 190 - 0
CCPN/textbbx.py

@@ -0,0 +1,190 @@
+"""  
+Copyright (c) 2019-present NAVER Corp.
+MIT License
+"""
+
+# -*- coding: utf-8 -*-
+import sys
+import os
+import time
+import argparse
+
+import torch
+import torch.nn as nn
+import torch.backends.cudnn as cudnn
+from torch.autograd import Variable
+
+from PIL import Image
+
+import cv2
+from skimage import io
+import numpy as np
+import craft_utils
+import imgproc
+import file_utils
+import json
+import zipfile
+
+from craft import CRAFT
+
+from collections import OrderedDict
+
+
+def copyStateDict(state_dict):
+    if list(state_dict.keys())[0].startswith("module"):
+        start_idx = 1
+    else:
+        start_idx = 0
+    new_state_dict = OrderedDict()
+    for k, v in state_dict.items():
+        name = ".".join(k.split(".")[start_idx:])
+        new_state_dict[name] = v
+    return new_state_dict
+
+
+def str2bool(v):
+    return v.lower() in ("yes", "y", "true", "t", "1")
+
+
+parser = argparse.ArgumentParser(description='CRAFT Text Detection')
+parser.add_argument('--trained_model', default='D:\试卷切割\CRAFT-pytorch\weights\craft_mlt_25k.pth', type=str, help='pretrained model')
+parser.add_argument('--text_threshold', default=0.3, type=float, help='text confidence threshold')
+parser.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')
+parser.add_argument('--link_threshold', default=0.6, type=float, help='link confidence threshold')
+parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda for inference')
+parser.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')
+parser.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')
+parser.add_argument('--poly', default=False, action='store_true', help='enable polygon type')
+parser.add_argument('--show_time', default=False, action='store_true', help='show processing time')
+parser.add_argument('--test_folder', default='./data/', type=str, help='folder path to input images')
+parser.add_argument('--refine', default=False, action='store_true', help='enable link refiner')
+parser.add_argument('--refiner_model', default='D:\试卷切割\CRAFT-pytorch\weights\craft_refiner_CTW1500.pth', type=str,
+                    help='pretrained refiner model')
+
+args = parser.parse_args()
+
+""" For test images in a folder """
+image_list, _, _ = file_utils.get_files(args.test_folder)
+
+result_folder = './result/'
+if not os.path.isdir(result_folder):
+    os.mkdir(result_folder)
+
+
+def test_net(net, image, text_threshold, link_threshold, low_text, cuda, poly, refine_net=None):
+    t0 = time.time()
+
+    # resize
+    img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, args.canvas_size,
+                                                                          interpolation=cv2.INTER_LINEAR,
+                                                                          mag_ratio=args.mag_ratio)
+    ratio_h = ratio_w = 1 / target_ratio
+
+    # preprocessing
+    x = imgproc.normalizeMeanVariance(img_resized)
+    x = torch.from_numpy(x).permute(2, 0, 1)  # [h, w, c] to [c, h, w]
+    x = Variable(x.unsqueeze(0))  # [c, h, w] to [b, c, h, w]
+    if cuda:
+        x = x.cuda()
+
+    # forward pass
+    with torch.no_grad():
+        y, feature = net(x)
+
+    # make score and link map
+    score_text = y[0, :, :, 0].cpu().data.numpy()
+    score_link = y[0, :, :, 1].cpu().data.numpy()
+
+    # refine link
+    if refine_net is not None:
+        with torch.no_grad():
+            y_refiner = refine_net(y, feature)
+        score_link = y_refiner[0, :, :, 0].cpu().data.numpy()
+
+    t0 = time.time() - t0
+    t1 = time.time()
+    # print('score', score_text.shape)
+    # print('score_link', score_link)
+    # Post-processing
+    boxes, polys = craft_utils.getDetBoxes(score_text, score_link, text_threshold, link_threshold, low_text, poly)
+
+    # coordinate adjustment
+    boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h)
+    polys = craft_utils.adjustResultCoordinates(polys, ratio_w, ratio_h)
+    for k in range(len(polys)):
+        if polys[k] is None: polys[k] = boxes[k]
+
+    t1 = time.time() - t1
+
+    # render results (optional)
+    render_img = score_text.copy()
+    render_img = np.hstack((render_img, score_link))
+    ret_score_text = imgproc.cvt2HeatmapImg(render_img)
+
+    if args.show_time: print("\ninfer/postproc time : {:.3f}/{:.3f}".format(t0, t1))
+
+    return boxes, polys, ret_score_text
+
+
+def getResult(boxes):
+    Results = []
+    for i, box in enumerate(boxes):
+        poly = np.array(box).astype(np.int32).reshape((-1))
+        Result = [p for p in poly]
+        Results.append(Result)
+    return Results
+
+
+net = CRAFT()
+
+print('Loading weights from checkpoint (' + args.trained_model + ')')
+if args.cuda:
+    net.load_state_dict(copyStateDict(torch.load(args.trained_model)))
+else:
+    net.load_state_dict(copyStateDict(torch.load(args.trained_model, map_location='cpu')))
+
+if args.cuda:
+    net = net.cuda()
+    net = torch.nn.DataParallel(net)
+    cudnn.benchmark = False
+
+net.eval()
+
+# LinkRefiner
+refine_net = None
+if args.refine:
+    from refinenet import RefineNet
+
+    refine_net = RefineNet()
+    print('Loading weights of refiner from checkpoint (' + args.refiner_model + ')')
+    if args.cuda:
+        refine_net.load_state_dict(copyStateDict(torch.load(args.refiner_model)))
+        refine_net = refine_net.cuda()
+        refine_net = torch.nn.DataParallel(refine_net)
+    else:
+        refine_net.load_state_dict(copyStateDict(torch.load(args.refiner_model, map_location='cpu')))
+
+    refine_net.eval()
+    args.poly = True
+
+if __name__ == '__main__':
+    # load net
+    # net = CRAFT()  # initialize
+
+    t = time.time()
+
+    # load data
+    for k, image_path in enumerate(image_list):
+        image = imgproc.loadImage(image_path)
+
+        bboxes, polys, score_text = test_net(net, image, args.text_threshold, args.link_threshold, args.low_text,
+                                             args.cuda, args.poly, refine_net)
+
+        # save score text
+        filename, file_ext = os.path.splitext(os.path.basename(image_path))
+        mask_file = result_folder + "/res_" + filename + '_mask.jpg'
+        cv2.imwrite(mask_file, score_text)
+
+        file_utils.saveResult(image_path, image[:, :, ::-1], polys, dirname=result_folder)
+        print('====>', getResult(polys))
+    print("elapsed time : {}s".format(time.time() - t))

+ 31 - 0
DeepModel.py

@@ -0,0 +1,31 @@
+class DeePredict:
+    def __init__(self, model=None, bridge=None):
+        if model:
+            self._model = model
+        if bridge:
+            self._bridge = bridge
+
+    def model(self, *args, **kwargs):
+        if self.__getattribute__('_model'):
+            return self._model(*args, **kwargs)
+        else:
+            raise ValueError('缺少模型')
+
+    def bridge(self, *args, **kwargs):
+        return self._bridge(*args, **kwargs)
+
+    def predict(self, *args, **kwargs):
+        x = self.model(*args, **kwargs)
+        if self.__getattribute__('_bridge'):
+            y = self.bridge(x)
+            return y
+        else:
+            return x
+
+if __name__ == '__main__':
+    def f1(x):
+        return x
+
+
+    dp = DeePredict(f1, f1)
+    print(dp.predict([1]))

+ 36 - 0
Detection.py

@@ -0,0 +1,36 @@
+# -*- coding:utf-8 -*-
+import numpy as np
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+class Detection:
+    def __init__(self):
+        ...
+
+    def predict(self,image:np.array):
+        return
+
+
+Detection = Detection()
+
+
+# Detection.predict()

+ 326 - 0
Near.py

@@ -0,0 +1,326 @@
+# -*- coding:utf-8 -*-
+import numpy as np
+import pandas as pd
+from copy import deepcopy
+import numba as nb
+
+
+@nb.njit
+def _find_right(boxes, i, j, find_range):
+    for xs in range(int(boxes[i][2]), int(boxes[i][2] + find_range)):
+        for ys in range(boxes[i][1] + 1, boxes[i][3] - 1):
+            if boxes[j][0] < xs < boxes[j][2] and boxes[j][1] < ys < boxes[j][3]:
+                boxes[i][0] = min(boxes[i][0], boxes[j][0])
+                boxes[i][1] = min(boxes[i][1], boxes[j][1])
+                boxes[i][2] = max(boxes[i][2], boxes[j][2])
+                boxes[i][3] = max(boxes[i][3], boxes[j][3])
+
+                boxes[j] = np.array([0, 0, 0, 0, 10])
+
+                return xs - boxes[i][2]
+
+
+@nb.njit
+def _find_down(boxes, y_max, i, j):
+    for xs in range(boxes[i][0], boxes[i][2]):
+        for ys in range(boxes[i][3], y_max):
+            if boxes[j][0] < xs < boxes[j][2] and boxes[j][1] < ys < boxes[j][2]:
+                boxes[i][0] = min(boxes[i][0], boxes[j][0])
+                boxes[i][1] = min(boxes[i][1], boxes[j][1])
+                boxes[i][2] = max(boxes[i][2], boxes[j][2])
+                boxes[i][3] = max(boxes[i][3], boxes[j][3])
+                return 1
+
+
+@nb.njit
+def find_in(in_box, out_box, i, j, find_range=0):
+    # if all(in_box[i] == out_box[j] ):
+    #
+    #     return
+    # x_min, y_min, x_max, y_max = 0, 1, 2, 3
+    # if in_box[i][x_max] <= out_box[j][x_max] and in_box[i][x_min] >= out_box[j][x_min]:
+    #     overlap_w = in_box[i]
+    # elif in_box[i][x_max] >= out_box[j][x_max] and in_box[i][x_min] >= out_box[j][x_min]:
+    #     overlap_w = in_box[i]
+    #
+    #
+    # if  in_box[i][x_max] <= out_box[j][x_max] and \
+    #     in_box[i][y_max] <= out_box[j][y_max] and \
+    #     in_box[i][x_min] > out_box[j][x_min] and \
+    #     in_box[i][y_min] < out_box[j][y_min]+11:
+    #     if find_range==888:
+    #         print('********************')
+    #         print(in_box[i])
+    #     out_box[j][0] = min(out_box[j][0], in_box[i][0])
+    #     out_box[j][1] = min(out_box[j][1], in_box[i][1])
+    #     out_box[j][2] = max(out_box[j][2], in_box[i][2])
+    #     out_box[j][3] = max(out_box[j][3], in_box[i][3])
+    #     in_box[i] = np.array([0, 0, 0, 0, 10])
+    #     return 1
+
+    for xs in range(in_box[i][0] - find_range, in_box[i][2] + find_range):
+        for ys in range(in_box[i][1] - find_range, in_box[i][3] + find_range):
+            if out_box[j][0] <= xs <= out_box[j][2] and out_box[j][1] <= ys <= out_box[j][3]:
+                out_box[j][0] = min(out_box[j][0], in_box[i][0])
+                out_box[j][1] = min(out_box[j][1], in_box[i][1])
+                out_box[j][2] = max(out_box[j][2], in_box[i][2])
+                out_box[j][3] = max(out_box[j][3], in_box[i][3])
+                in_box[i] = np.array([0, 0, 0, 0, 10])
+                return 1
+
+
+@nb.njit
+def find_right(the_boxes, unknow_box, i, j, find_range):
+    for xs in range(the_boxes[i][2], the_boxes[i][2] + int(find_range)):
+        for ys in range(the_boxes[i][1], the_boxes[i][3]):
+            if unknow_box[j][0] < xs < unknow_box[j][2] and unknow_box[j][1] < ys < unknow_box[j][3]:
+                the_boxes[i][0] = min(the_boxes[i][0], unknow_box[j][0])
+                the_boxes[i][1] = min(the_boxes[i][1], unknow_box[j][1])
+                the_boxes[i][2] = max(the_boxes[i][2], unknow_box[j][2])
+                the_boxes[i][3] = max(the_boxes[i][3], unknow_box[j][3])
+                unknow_box[j] = np.array([0, 0, 0, 0, 10])
+                return xs - the_boxes[i][2]
+
+
+@nb.njit
+def find_left(the_boxes, unknow_box, i, j, find_range):
+    for xs in range(the_boxes[i][0], max(int(the_boxes[i][0] - find_range), 0), -1):
+        for ys in range(the_boxes[i][1], the_boxes[i][3]):
+            if unknow_box[j][0] < xs < unknow_box[j][2] and unknow_box[j][1] < ys < unknow_box[j][3]:
+                the_boxes[i][0] = min(the_boxes[i][0], unknow_box[j][0])
+                the_boxes[i][1] = min(the_boxes[i][1], unknow_box[j][1])
+                the_boxes[i][2] = max(the_boxes[i][2], unknow_box[j][2])
+                the_boxes[i][3] = max(the_boxes[i][3], unknow_box[j][3])
+                unknow_box[j] = np.array([0, 0, 0, 0, 10])
+                return 1
+
+
+@nb.njit
+def find_down(the_boxes, unknow_box, i, j, find_range):
+    for xs in range(int(unknow_box[j][0]), int(unknow_box[j][2])):
+        for ys in range(int(unknow_box[j][3]), int(unknow_box[j][3] + find_range)):
+            if the_boxes[i][0] < xs < the_boxes[i][2] and the_boxes[i][1] < ys < the_boxes[i][3]:
+                the_boxes[i][0] = min(the_boxes[i][0], unknow_box[j][0])
+                the_boxes[i][1] = min(the_boxes[i][1], unknow_box[j][1])
+                the_boxes[i][2] = max(the_boxes[i][2], unknow_box[j][2])
+                the_boxes[i][3] = max(the_boxes[i][3], unknow_box[j][3])
+                unknow_box[j] = np.array([0, 0, 0, 0, 10])
+                return ys - unknow_box[j][3]
+
+
+@nb.njit
+def find_top(the_boxes, unknow_box, i, j, find_range):
+    for xs in range(int(unknow_box[j][0]), int(unknow_box[j][2])):
+        for ys in range(int(unknow_box[j][1]), int(unknow_box[j][1] - find_range), -1):
+            if the_boxes[i][0] < xs < the_boxes[i][2] and the_boxes[i][1] < ys < the_boxes[i][3]:
+                the_boxes[i][0] = min(the_boxes[i][0], unknow_box[j][0])
+                the_boxes[i][1] = min(the_boxes[i][1], unknow_box[j][1])
+                the_boxes[i][2] = max(the_boxes[i][2], unknow_box[j][2])
+                the_boxes[i][3] = max(the_boxes[i][3], unknow_box[j][3])
+                unknow_box[j] = np.array([0, 0, 0, 0, 10])
+                return unknow_box[j][3] - ys
+
+
+def neighbor_change(tex_box, im_box):
+    """
+    后期处理 临近合并
+    :param tex_box: 文字区域
+    :param im_box:  未知区域
+    :return:
+    """
+    # return tex_box, im_box
+    text_box, image_box, unknow_box, small_text = [], [], [], []
+    tex_box_df = pd.DataFrame(tex_box)
+    height = tex_box_df[3] - tex_box_df[1]
+    mix = height.median()
+    min_xs = 1.2 * mix ** 2
+    img_w = int(tex_box_df[2].max())
+    for i in tex_box:
+        if (i[2] - i[0]) * (i[3] - i[1]) < min_xs and False:
+            small_text.append(np.array(i))
+            unknow_box.append(np.array(i))
+        else:
+            text_box.append(i)
+
+    # 分开大图和小图
+    for i in im_box:
+        if (i[2] - i[0]) * (i[3] - i[1]) < min_xs:
+            unknow_box.append(i)
+        else:
+            image_box.append(i)
+    if len(image_box):
+        image_box = pd.DataFrame(image_box)
+        image_box[4] = 1
+        image_box = image_box.sort_values(by=0).astype(np.int).values
+        # print(unknow_box)
+    if len(unknow_box) > 0:
+        unknow_box = pd.DataFrame(unknow_box)
+        unknow_box[4] = 2
+        unknow_box = unknow_box.sort_values(by=0).astype(np.int).values
+
+    tex_box = pd.DataFrame(text_box)
+    tex_box[4] = 1
+    tex_box = tex_box.sort_values(by=0).astype(np.int).values
+
+    # 小图片向上粘贴
+    for i in range(len(image_box)):
+        for j in range(len(unknow_box)):
+            find_top(image_box, unknow_box, i, j, mix // 2)
+
+    unknow_box = np.array([i for i in unknow_box if i[0]])
+
+    # 小图片向下粘贴
+    for i in range(len(image_box)):
+        for j in range(len(unknow_box)):
+            find_down(image_box, unknow_box, i, j, mix // 2)
+
+    unknow_box = np.array([i for i in unknow_box if i[0]])
+
+    # 小图片向左粘贴
+    for i in range(len(image_box)):
+        for j in range(len(unknow_box)):
+            find_left(image_box, unknow_box, i, j, mix // 2)
+
+    unknow_box = np.array([i for i in unknow_box if i[0]])
+
+    # 小图片向右粘贴
+    for i in range(len(image_box)):
+        for j in range(len(unknow_box)):
+            find_right(image_box, unknow_box, i, j, mix // 2)
+
+    unknow_box = np.array([i for i in unknow_box if i[0]])
+
+    # 消除在imagebox里的textbox
+    # for i in range(len(tex_box)):
+    #     for j in range(len(image_box)):
+    #         __find_in(tex_box, image_box, i, j)
+    for i in range(len(small_text)):
+        for j in range(len(image_box)):
+            if not all(small_text[i] == image_box[j]):
+                find_in(small_text, image_box, i, j)
+
+    # textbox向右寻找
+    for i in range(len(tex_box)):
+        for j in range(i + 1, len(tex_box)):
+            _find_right(tex_box, i, j, mix)
+
+    tex_box = np.array([i for i in tex_box if i[0]])
+
+    text_box = []
+    for i in tex_box:
+        if 1 < (i[2] - i[0]) * (i[3] - i[1]) < min_xs:
+            small_text.append(np.array(i))
+        else:
+            text_box.append(i)
+    small_text = np.array([i for i in small_text if i[0]])
+    tex_box = pd.DataFrame(text_box)
+    tex_box[4] = 1
+    tex_box = tex_box.sort_values(by=0).astype(np.int).values
+    if len(small_text) > 0:
+        small_text = pd.DataFrame(small_text)
+        small_text[4] = 3
+        small_text = small_text.sort_values(by=0).astype(np.int).values
+
+    # textbox向左合并小图
+    for i in range(len(tex_box)):
+        if (tex_box[i][2] - tex_box[i][0]) * (tex_box[i][3] - tex_box[i][1]) > 5 * min_xs:
+            for j in range(len(unknow_box)):
+                find_left(tex_box, unknow_box, i, j, mix)
+
+    for i in range(len(tex_box)):
+        if (tex_box[i][2] - tex_box[i][0]) * (tex_box[i][3] - tex_box[i][1]) > 5 * min_xs:
+            for j in range(len(small_text)):
+                find_left(tex_box, small_text, i, j, 5 * mix)
+
+    # textbox向下寻找
+    for i in range(len(tex_box)):
+        for j in range(len(unknow_box)):
+            find_down(tex_box, unknow_box, i, j, mix // 3)
+    unknow_box = np.array([i for i in unknow_box if i[0]])
+    for i in range(len(tex_box)):
+        for j in range(len(small_text)):
+            find_down(tex_box, small_text, i, j, mix // 3)
+    small_text = np.array([i for i in small_text if i[0]])
+    # textbox向上寻找
+    for i in range(len(tex_box)):
+        for j in range(len(unknow_box)):
+            find_top(tex_box, unknow_box, i, j, mix // 3)
+
+    for i in range(len(tex_box)):
+        for j in range(len(small_text)):
+            find_top(tex_box, small_text, i, j, mix // 3)
+
+    image_box = np.array([i for i in image_box if i[0]])
+    tex_box = np.array([i for i in tex_box if i[0]])
+
+    # image_box_bk = deepcopy(image_box)
+    # 消除内部imagebox
+    for i in range(len(image_box)):
+        for j in range(i + 1, len(image_box)):
+            if not all(image_box[i] == image_box[j]):
+                find_in(image_box, image_box, i, j, 0)
+    # 消除内部textbox
+    for i in range(len(tex_box)):
+        for j in range(i + 1, len(tex_box)):
+            if i != j:
+                find_in(tex_box, tex_box, i, j, -int(mix // 5))
+
+    # text_box_p = [i[:4] for i in tex_box]# + [i[:4] for i in small_text]
+    # image_box_p = [i[:4] for i in image_box]
+    #
+    # return text_box_p, image_box_p
+    #
+    # for i in range(len(tex_box)):
+    #     for j in range(i+1,len(tex_box)):
+    #         __find_in(tex_box, tex_box, i, j, -int(mix//10))
+
+    for i in range(len(small_text)):
+        for j in range(len(tex_box)):
+            if not all(small_text[i] == tex_box[j]):
+                find_in(small_text, tex_box, i, j, 0)
+
+    for i in range(len(tex_box)):
+        for j in range(len(image_box)):
+            if not all(tex_box[i] == image_box[j]):
+                find_in(tex_box, image_box, i, j, 0)
+    for i in range(len(small_text)):
+        for j in range(len(image_box)):
+            if not all(small_text[i] == image_box[j]):
+                find_in(small_text, image_box, i, j, 0)
+    text_box_p = [i[:4] for i in tex_box if np.sum(i[:4])] + [i[:4] for i in small_text if np.sum(i[:4])]
+    image_box_p = [i[:4] for i in image_box if np.sum(i[:4])]
+
+    return text_box_p, image_box_p
+
+
+class Neighbor:
+    def __init__(self, tex_box, im_box):
+        self.text_box, self.image_box, self.unknow_box = [], [], []
+        tex_box_df = pd.DataFrame(tex_box)
+        height = tex_box_df[3] - tex_box_df[1]
+        mix = height.median()
+        min_xs = mix ** 2
+
+        for i in tex_box:
+            if (i[2] - i[0]) * (i[3] - i[1]) < min_xs / 2000:
+                self.unknow_box.append(np.array(i))
+            else:
+                self.text_box.append(i)
+
+        for i in im_box:
+            if (i[2] - i[0]) * (i[3] - i[1]) < min_xs:
+                self.unknow_box.append(i)
+            else:
+                self.image_box.append(i)
+        if len(self.image_box):
+            image_box = pd.DataFrame(self.image_box)
+            image_box[4] = 1
+            self.image_box = image_box.sort_values(by=0).astype(np.int).values
+        if len(self.unknow_box) > 0:
+            unknow_box = pd.DataFrame(self.unknow_box)
+            unknow_box[4] = 2
+            self.unknow_box = unknow_box.sort_values(by=0).astype(np.int).values
+        if len(self.text_box) > 0:
+            tex_box = pd.DataFrame(self.text_box)
+            tex_box[4] = 1
+            self.tex_box = tex_box.sort_values(by=0).astype(np.int).values

+ 16 - 0
TextModel.py

@@ -0,0 +1,16 @@
+from textbbx import net,imgproc,test_net,args,refine_net,getResult
+import os,cv2
+
+from DeepModel import DeePredict
+
+def text_model(image_path):
+    image = imgproc.loadImage(image_path)
+
+    bboxes, polys, score_text = test_net(net, image, args.text_threshold, args.link_threshold, args.low_text,
+                                         args.cuda, args.poly, refine_net)
+
+
+    return getResult(polys)
+
+
+# print(text_model(r"D:\试卷切割\img\5.png"))

BIN
__pycache__/DeepModel.cpython-36.pyc


BIN
__pycache__/Near.cpython-36.pyc


BIN
__pycache__/TextModel.cpython-36.pyc


BIN
__pycache__/dev_image.cpython-36.pyc


BIN
__pycache__/image_tools.cpython-36.pyc


BIN
__pycache__/neighbor.cpython-36.pyc


BIN
__pycache__/ocrapi.cpython-36.pyc


BIN
__pycache__/tools.cpython-36.pyc


+ 94 - 0
app.py

@@ -0,0 +1,94 @@
+# -*- coding:utf-8 -*-
+
+from flask import Flask, render_template, request, redirect, url_for, make_response, jsonify,Markup
+from werkzeug.utils import secure_filename
+from tools import get_text,get_image
+from datetime import timedelta
+import os
+from dev_image import run_cut
+import json
+
+ALLOWED_EXTENSIONS = set(['png', 'jpg', 'JPG', 'PNG', 'bmp'])
+
+
+def allowed_file(filename):
+    return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
+
+
+app = Flask(__name__)
+# 设置静态文件缓存过期时间
+app.send_file_max_age_default = timedelta(seconds=1)
+
+
+# @app.route('/upload', methods=['POST', 'GET'])
+@app.route('/', methods=['POST', 'GET'])  # 添加路由
+def upload():
+    if request.method == 'POST':
+        print(request.files)
+
+        f = request.files['file']
+
+        if not (f and allowed_file(f.filename)):
+            return jsonify({"error": 1001, "msg": "请检查上传的图片类型,仅限于png、PNG、jpg、JPG、bmp"})
+
+        # user_input = request.form.get("name")
+
+        basepath = os.path.dirname(__file__)  # 当前文件所在路径
+
+        upload_path = os.path.join('./image_dir', secure_filename(f.filename))  # 注意:没有的文件夹一定要先创建,不然会提示没有该路径
+        # upload_path = os.path.join(basepath, 'static/images','test.jpg')  #注意:没有的文件夹一定要先创建,不然会提示没有该路径
+        print(upload_path)
+        f.save(upload_path)
+
+        # 使用Opencv转换一下图片格式和名称
+        # img = cv2.imread(upload_path)
+        # cv2.imwrite(os.path.join(basepath, 'static/images', 'test.jpg'), img)
+        # maple = requests.post('http://127.0.0.1:6666/static_pic',json={'path':upload_path}).text
+        # print(maple)
+        run_cut(upload_path,tabel=False)
+        text = get_text()
+        images = get_image()
+        print(text)
+        return render_template('show.html', w=500, contents=text,
+                               images=images
+                               )
+
+    return render_template('upload.html')
+
+
+@app.route('/online', methods=['POST', 'GET'])  # 添加路由
+def upload_online():
+    if request.method == 'POST':
+        f = request.files['file']
+
+        if not (f and allowed_file(f.filename)):
+            return jsonify({"error": 1001, "msg": "请检查上传的图片类型,仅限于png、PNG、jpg、JPG、bmp"})
+
+        # user_input = request.form.get("name")
+
+        basepath = os.path.dirname(__file__)  # 当前文件所在路径
+
+        upload_path = os.path.join('./image_dir', secure_filename(f.filename))  # 注意:没有的文件夹一定要先创建,不然会提示没有该路径
+        # upload_path = os.path.join(basepath, 'static/images','test.jpg')  #注意:没有的文件夹一定要先创建,不然会提示没有该路径
+        print(upload_path)
+        f.save(upload_path)
+
+        # 使用Opencv转换一下图片格式和名称
+        # img = cv2.imread(upload_path)
+        # cv2.imwrite(os.path.join(basepath, 'static/images', 'test.jpg'), img)
+        # maple = requests.post('http://127.0.0.1:6666/static_pic',json={'path':upload_path}).text
+        # print(maple)
+        run_cut(upload_path,tabel=False)
+        text = get_text(online=False)
+        images = get_image()
+
+        return render_template('show.html', w=500, contents=text,
+                               images=images
+                               )
+
+    return render_template('upload.html')
+
+
+
+if __name__ == '__main__':
+    app.run('0.0.0.0',port=12535)

+ 118 - 0
base64crnn.py

@@ -0,0 +1,118 @@
+import base64
+import os
+import re
+from crnn.crnn import crnnOcr
+from ocrapi import ocr
+import numpy as np
+import cv2
+
+def get_image():
+    image_list = []
+    for i in os.listdir('./result/image'):
+        if re.match('(\d+)-(\d+)-(\d+)-(\d+)',i):
+            with open(os.path.join('./result/image',i), 'rb') as f:
+                base64_data = base64.b64encode(f.read())
+                s = base64_data.decode()
+                y_min, y_max, x_min, x_max = re.match('(\d+)-(\d+)-(\d+)-(\d+)', i).groups()
+                image_list.append({'left':x_min,'top':y_min,'b64':s})
+
+    return image_list
+
+def get_text(online=False):
+    image_list = []
+    for i in os.listdir('./result/text_img'):
+        if re.match('(\d+)-(\d+)-(\d+)-(\d+)',i):
+            with open(os.path.join('./result/text_img',i), 'rb') as f:
+
+
+                if online:
+                    s = ocr(os.path.join('./result/text_img',i))
+                else:
+                    s = crnnOcr(os.path.join('./result/text_img', i))
+
+                y_min, y_max, x_min, x_max = re.match('(\d+)-(\d+)-(\d+)-(\d+)', i).groups()
+                image_list.append({'left':x_min,'top':y_min,
+                       'w':int(x_max)-int(x_min),
+                       'h':int(y_max)-int(y_min),
+                       'size':int((int(y_max)-int(y_min))*0.6),
+                       's':s})
+
+                # print(i)
+                # print({'left':x_min,'top':y_min,
+                #        'w':int(x_max)-int(x_min),
+                #        'h':int(y_max)-int(y_min),
+                #        'size':int((int(y_max)-int(y_min))*0.6),
+                #        's':s})
+
+    return image_list
+
+
+
+
+def get_image_html():
+    ...
+
+
+def get_text_html():
+    image_list = []
+    for i in os.listdir('./result/text_img'):
+        if re.match('(\d+)-(\d+)-(\d+)-(\d+)',i):
+            with open(os.path.join('./result/text_img',i), 'rb') as f:
+
+
+                s = ocr(os.path.join('./result/text_img',i))
+                y_min, y_max, x_min, x_max = re.match('(\d+)-(\d+)-(\d+)-(\d+)', i).groups()
+                image_list.append({'left':x_min,'top':y_min,
+                       'w':int(x_max)-int(x_min),
+                       'h':int(y_max)-int(y_min),
+                       'size':int((int(y_max)-int(y_min))*0.6),
+                       's':s})
+
+
+
+import json
+def base64ocr(base64_str):
+    imgString = base64.b64decode(base64_str)
+    array = np.fromstring(imgString, np.uint8)
+    image = cv2.imdecode(array, cv2.IMREAD_COLOR)
+    cv2.imshow('1',image)
+    cv2.waitKey()
+    string = crnnOcr(image)
+
+    return string
+
+if __name__ == '__main__':
+
+
+    import pandas as pd
+    data = pd.read_csv(r'F:\exam_segment_django_0330\x11.csv',encoding='utf8')
+    for i in data['base64']:
+        try:
+            print(base64ocr(i))
+        except Exception as e:
+            print(e)
+
+    # with open(r'F:\exam_segment_django_0330\2.txt') as f:
+    #     for i in f:
+    #         print(base64ocr(i))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

BIN
bbb.png


BIN
binary.png


+ 0 - 0
crnn/__init__.py


BIN
crnn/__pycache__/__init__.cpython-36.pyc


BIN
crnn/__pycache__/crnn.cpython-36.pyc


BIN
crnn/__pycache__/dataset.cpython-36.pyc


BIN
crnn/__pycache__/keys_crnn.cpython-36.pyc


BIN
crnn/__pycache__/util.cpython-36.pyc


+ 23 - 0
crnn/app.py

@@ -0,0 +1,23 @@
+from flask import request, Response,Flask,json
+from gevent.pywsgi import WSGIServer
+from gevent import monkey
+from crnn import crnnOcr
+from PIL import Image
+import numpy as np
+monkey.patch_all()
+app = Flask(__name__)
+
+
+@app.route('/ocr', methods=['POST', 'GET'])
+def repeatss():
+    if request.method == 'POST':
+
+        res = crnnOcr(Image.open(request.json['path']).convert("L"))
+        return json.dumps(res)
+
+
+
+if __name__ == '__main__':
+    # app.run(port=5002)
+    http_server = WSGIServer(('0.0.0.0', 10666), app)
+    http_server.serve_forever()

+ 110 - 0
crnn/crnn.py

@@ -0,0 +1,110 @@
+# coding:utf-8
+import sys
+from PIL import Image
+sys.path.insert(1, "./crnn")
+import torch
+import torch.utils.data
+from torch.autograd import Variable
+import numpy as np
+import util
+import dataset
+import models.crnn as crnn
+import keys_crnn
+from math import *
+import cv2
+
+GPU = True
+
+
+def dumpRotateImage_(img, degree, pt1, pt2, pt3, pt4):
+    height, width = img.shape[:2]
+    heightNew = int(width * fabs(sin(radians(degree))) + height * fabs(cos(radians(degree))))
+    widthNew = int(height * fabs(sin(radians(degree))) + width * fabs(cos(radians(degree))))
+    matRotation = cv2.getRotationMatrix2D((width / 2, height / 2), degree, 1)
+    matRotation[0, 2] += (widthNew - width) / 2
+    matRotation[1, 2] += (heightNew - height) / 2
+    imgRotation = cv2.warpAffine(img, matRotation, (widthNew, heightNew), borderValue=(255, 255, 255))
+    pt1 = list(pt1)
+    pt3 = list(pt3)
+
+    [[pt1[0]], [pt1[1]]] = np.dot(matRotation, np.array([[pt1[0]], [pt1[1]], [1]]))
+    [[pt3[0]], [pt3[1]]] = np.dot(matRotation, np.array([[pt3[0]], [pt3[1]], [1]]))
+    imgOut = imgRotation[int(pt1[1]):int(pt3[1]), int(pt1[0]):int(pt3[0])]
+    height, width = imgOut.shape[:2]
+    return imgOut
+
+
+def crnnSource():
+    alphabet = keys_crnn.alphabet
+    converter = util.strLabelConverter(alphabet)
+    if torch.cuda.is_available() and GPU:
+        model = crnn.CRNN(32, 1, len(alphabet) + 1, 256, 1).cuda()
+    else:
+        model = crnn.CRNN(32, 1, len(alphabet) + 1, 256, 1).cpu()
+    # path = '../crnn/samples/netCRNN_61_134500.pth'
+    path = './crnn/samples/model_acc97.pth'
+    model.eval()
+    # w = torch.load(path)
+    # ww = {}
+    # for i in w:
+    #     ww[i.replace('module.', '')] = w[i]
+    #
+    # model.load_state_dict(ww)
+    model.load_state_dict(torch.load(path))
+    return model, converter
+
+
+##加载模型
+model, converter = crnnSource()
+
+
+def crnnOcr(image):
+    """
+    crnn模型,ocr识别
+    @@model,
+    @@converter,
+    @@im
+    @@text_recs:text box
+
+    """
+    if isinstance(image,str):
+        image = Image.open(image).convert("L")
+    else:
+        image = Image.fromarray(image).convert("L")
+    scale = image.size[1] * 1.0 / 32
+    w = image.size[0] / scale
+    w = int(w)
+    # print "im size:{},{}".format(image.size,w)
+    transformer = dataset.resizeNormalize((w, 32))
+    if torch.cuda.is_available() and GPU:
+        image = transformer(image).cuda()
+    else:
+        image = transformer(image).cpu()
+
+    image = image.view(1, *image.size())
+    image = Variable(image)
+    model.eval()
+    preds = model(image)
+    _, preds = preds.max(2)
+    preds = preds.transpose(1, 0).contiguous().view(-1)
+    preds_size = Variable(torch.IntTensor([preds.size(0)]))
+    sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
+    if len(sim_pred) > 0:
+        if sim_pred[0] == u'-':
+            sim_pred = sim_pred[1:]
+
+    return quchong(sim_pred)
+
+
+def quchong(s):
+    ls = list(s)
+    for i in range(len(ls)-1):
+        if ls[i]==ls[i+1]:
+            ls[i+1]=''
+    return ''.join(ls)
+
+if __name__ == '__main__':
+
+    #
+    print(crnnOcr(Image.open(r'D:\试卷切割\result\text_img\132-145-46-182.png').convert("L")))
+    # print(quchong('abcdefghiijjklmn'))

+ 132 - 0
crnn/dataset.py

@@ -0,0 +1,132 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+import random
+import sys
+
+import lmdb
+import numpy as np
+import six
+import torch
+import torchvision.transforms as transforms
+from PIL import Image
+from torch.utils.data import Dataset
+from torch.utils.data import sampler
+
+
+class lmdbDataset(Dataset):
+    def __init__(self, root=None, transform=None, target_transform=None):
+        self.env = lmdb.open(
+            root,
+            max_readers=1,
+            readonly=True,
+            lock=False,
+            readahead=False,
+            meminit=False)
+
+        if not self.env:
+            print('cannot creat lmdb from %s' % (root))
+            sys.exit(0)
+
+        with self.env.begin(write=False) as txn:
+            nSamples = int(txn.get('num-samples'))
+            self.nSamples = nSamples
+
+        self.transform = transform
+        self.target_transform = target_transform
+
+    def __len__(self):
+        return self.nSamples
+
+    def __getitem__(self, index):
+        assert index <= len(self), 'index range error'
+        index += 1
+        with self.env.begin(write=False) as txn:
+            img_key = 'image-%09d' % index
+            imgbuf = txn.get(img_key)
+
+            buf = six.BytesIO()
+            buf.write(imgbuf)
+            buf.seek(0)
+            try:
+                img = Image.open(buf).convert('L')
+            except IOError:
+                print('Corrupted image for %d' % index)
+                return self[index + 1]
+
+            if self.transform is not None:
+                img = self.transform(img)
+
+            label_key = 'label-%09d' % index
+            label = str(txn.get(label_key))
+            if self.target_transform is not None:
+                label = self.target_transform(label)
+
+        return (img, label)
+
+
+class resizeNormalize(object):
+    def __init__(self, size, interpolation=Image.BILINEAR):
+        self.size = size
+        self.interpolation = interpolation
+        self.toTensor = transforms.ToTensor()
+
+    def __call__(self, img):
+        img = img.resize(self.size, self.interpolation)
+        img = self.toTensor(img)
+        img.sub_(0.5).div_(0.5)
+        return img
+
+
+class randomSequentialSampler(sampler.Sampler):
+    def __init__(self, data_source, batch_size):
+        self.num_samples = len(data_source)
+        self.batch_size = batch_size
+
+    def __iter__(self):
+        n_batch = len(self) // self.batch_size
+        tail = len(self) % self.batch_size
+        index = torch.LongTensor(len(self)).fill_(0)
+        for i in range(n_batch):
+            random_start = random.randint(0, len(self) - self.batch_size)
+            batch_index = random_start + torch.range(0, self.batch_size - 1)
+            index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index
+        # deal with tail
+        if tail:
+            random_start = random.randint(0, len(self) - self.batch_size)
+            tail_index = random_start + torch.range(0, tail - 1)
+            index[(i + 1) * self.batch_size:] = tail_index
+
+        return iter(index)
+
+    def __len__(self):
+        return self.num_samples
+
+
+class alignCollate(object):
+    def __init__(self, imgH=32, imgW=128, keep_ratio=False, min_ratio=1):
+        self.imgH = imgH
+        self.imgW = imgW
+        self.keep_ratio = keep_ratio
+        self.min_ratio = min_ratio
+
+    def __call__(self, batch):
+        images, labels = zip(*batch)
+
+        imgH = self.imgH
+        imgW = self.imgW
+        if self.keep_ratio:
+            ratios = []
+            for image in images:
+                w, h = image.size
+                ratios.append(w / float(h))
+            ratios.sort()
+            max_ratio = ratios[-1]
+            imgW = int(np.floor(max_ratio * imgH))
+            imgW = max(imgH * self.min_ratio, imgW)  # assure imgH >= imgW
+
+        transform = resizeNormalize((imgW, imgH))
+        images = [transform(image) for image in images]
+        images = torch.cat([t.unsqueeze(0) for t in images], 0)
+
+        return images, labels

Diferenças do arquivo suprimidas por serem muito extensas
+ 1 - 0
crnn/keys_crnn.py


+ 0 - 0
crnn/models/__init__.py


BIN
crnn/models/__pycache__/__init__.cpython-36.pyc


BIN
crnn/models/__pycache__/crnn.cpython-36.pyc


BIN
crnn/models/__pycache__/utils.cpython-36.pyc


+ 84 - 0
crnn/models/crnn.py

@@ -0,0 +1,84 @@
+import sys
+sys.path.insert(1, "./crnn")
+import torch.nn as nn
+import models.utils as utils
+
+
+class BidirectionalLSTM(nn.Module):
+    def __init__(self, nIn, nHidden, nOut, ngpu):
+        super(BidirectionalLSTM, self).__init__()
+        self.ngpu = ngpu
+
+        self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
+        self.embedding = nn.Linear(nHidden * 2, nOut)
+
+    def forward(self, input):
+        recurrent, _ = utils.data_parallel(self.rnn, input,
+                                           self.ngpu)  # [T, b, h * 2]
+
+        T, b, h = recurrent.size()
+        t_rec = recurrent.view(T * b, h)
+        output = utils.data_parallel(self.embedding, t_rec,
+                                     self.ngpu)  # [T * b, nOut]
+        output = output.view(T, b, -1)
+
+        return output
+
+
+class CRNN(nn.Module):
+    def __init__(self, imgH, nc, nclass, nh, ngpu, n_rnn=2, leakyRelu=False):
+        super(CRNN, self).__init__()
+        self.ngpu = ngpu
+        assert imgH % 16 == 0, 'imgH has to be a multiple of 16'
+
+        ks = [3, 3, 3, 3, 3, 3, 2]
+        ps = [1, 1, 1, 1, 1, 1, 0]
+        ss = [1, 1, 1, 1, 1, 1, 1]
+        nm = [64, 128, 256, 256, 512, 512, 512]
+
+        cnn = nn.Sequential()
+
+        def convRelu(i, batchNormalization=False):
+            nIn = nc if i == 0 else nm[i - 1]
+            nOut = nm[i]
+            cnn.add_module('conv{0}'.format(i),
+                           nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))
+            if batchNormalization:
+                cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
+            if leakyRelu:
+                cnn.add_module('relu{0}'.format(i),
+                               nn.LeakyReLU(0.2, inplace=True))
+            else:
+                cnn.add_module('relu{0}'.format(i), nn.ReLU(True))
+
+        convRelu(0)
+        cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2))  # 64x16x64
+        convRelu(1)
+        cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2))  # 128x8x32
+        convRelu(2, True)
+        convRelu(3)
+        cnn.add_module('pooling{0}'.format(2),
+                       nn.MaxPool2d((2, 2), (2, 1), (0, 1)))  # 256x4x16
+        convRelu(4, True)
+        convRelu(5)
+        cnn.add_module('pooling{0}'.format(3),
+                       nn.MaxPool2d((2, 2), (2, 1), (0, 1)))  # 512x2x16
+        convRelu(6, True)  # 512x1x16
+
+        self.cnn = cnn
+        self.rnn = nn.Sequential(
+            BidirectionalLSTM(512, nh, nh, ngpu),
+            BidirectionalLSTM(nh, nh, nclass, ngpu))
+
+    def forward(self, input):
+        # conv features
+        conv = utils.data_parallel(self.cnn, input, self.ngpu)
+        b, c, h, w = conv.size()
+        assert h == 1, "the height of conv must be 1"
+        conv = conv.squeeze(2)
+        conv = conv.permute(2, 0, 1)  # [w, b, c]
+
+        # rnn features
+        output = utils.data_parallel(self.rnn, conv, self.ngpu)
+
+        return output

+ 13 - 0
crnn/models/utils.py

@@ -0,0 +1,13 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+import torch.nn as nn
+import torch.nn.parallel
+
+
+def data_parallel(model, input, ngpu):
+    if isinstance(input.data, torch.cuda.FloatTensor) and ngpu > 1:
+        output = nn.parallel.data_parallel(model, input, range(ngpu))
+    else:
+        output = model(input)
+    return output

BIN
crnn/samples/model_acc97.pth


BIN
crnn/samples/netCRNN_143_16500.pth


BIN
crnn/samples/netCRNN_1952_247000.pth


BIN
crnn/samples/netCRNN_474_58000.pth


BIN
crnn/samples/netCRNN_61_134500.pth


+ 41 - 0
crnn/test.py

@@ -0,0 +1,41 @@
+# coding:utf-8
+
+import dataset
+import keys
+import models.crnn as crnn
+import torch.utils.data
+import util
+from PIL import Image
+from torch.autograd import Variable
+
+alphabet = keys.alphabet
+print(len(alphabet))
+raw_input('\ninput:')
+converter = util.strLabelConverter(alphabet)
+model = crnn.CRNN(32, 1, len(alphabet) + 1, 256, 1).cuda()
+path = './samples/netCRNN63.pth'
+model.load_state_dict(torch.load(path))
+print(model)
+
+while 1:
+    im_name = raw_input("\nplease input file name:")
+    im_path = "./img/" + im_name
+    image = Image.open(im_path).convert('L')
+    scale = image.size[1] * 1.0 / 32
+    w = image.size[0] / scale
+    w = int(w)
+    print(w)
+
+    transformer = dataset.resizeNormalize((w, 32))
+    image = transformer(image).cuda()
+    image = image.view(1, *image.size())
+    image = Variable(image)
+    model.eval()
+    preds = model(image)
+    _, preds = preds.max(2)
+    preds = preds.squeeze(2)
+    preds = preds.transpose(1, 0).contiguous().view(-1)
+    preds_size = Variable(torch.IntTensor([preds.size(0)]))
+    raw_pred = converter.decode(preds.data, preds_size.data, raw=True)
+    sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
+    print('%-20s => %-20s' % (raw_pred, sim_pred))

+ 103 - 0
crnn/util.py

@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+import torch
+import torch.nn as nn
+
+unicode = str
+
+class strLabelConverter(object):
+    def __init__(self, alphabet):
+        self.alphabet = alphabet + u'-'  # for `-1` index
+        self.dict = {}
+        for i, char in enumerate(alphabet):
+            # NOTE: 0 is reserved for 'blank' required by wrap_ctc
+            self.dict[char] = i + 1
+
+    def encode(self, text, depth=0):
+        """Support batch or single str."""
+        length = []
+        result = []
+        for str in text:
+            str = unicode(str, "utf8")
+            length.append(len(str))
+            for char in str:
+                # print(char)
+                index = self.dict[char]
+                result.append(index)
+        text = result
+        return (torch.IntTensor(text), torch.IntTensor(length))
+
+    def decode(self, t, length, raw=False):
+        if length.numel() == 1:
+            length = length[0]
+            t = t[:length]
+            if raw:
+                return ''.join([self.alphabet[i - 1] for i in t])
+            else:
+                char_list = []
+                for i in range(length):
+                    if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
+                        char_list.append(self.alphabet[t[i] - 1])
+                return ''.join(char_list)
+        else:
+            texts = []
+            index = 0
+            for i in range(length.numel()):
+                l = length[i]
+                texts.append(self.decode(
+                    t[index:index + l], torch.IntTensor([l]), raw=raw))
+                index += l
+            return texts
+
+
+class averager(object):
+    def __init__(self):
+        self.reset()
+
+    def add(self, v):
+        self.n_count += v.data.numel()
+        # NOTE: not `+= v.sum()`, which will add a node in the compute graph,
+        # which lead to memory leak
+        self.sum += v.data.sum()
+
+    def reset(self):
+        self.n_count = 0
+        self.sum = 0
+
+    def val(self):
+        res = 0
+        if self.n_count != 0:
+            res = self.sum / float(self.n_count)
+        return res
+
+
+def oneHot(v, v_length, nc):
+    batchSize = v_length.size(0)
+    maxLength = v_length.max()
+    v_onehot = torch.FloatTensor(batchSize, maxLength, nc).fill_(0)
+    acc = 0
+    for i in range(batchSize):
+        length = v_length[i]
+        label = v[acc:acc + length].view(-1, 1).long()
+        v_onehot[i, :length].scatter_(1, label, 1.0)
+        acc += length
+    return v_onehot
+
+
+def loadData(v, data):
+    v.data.resize_(data.size()).copy_(data)
+
+
+def prettyPrint(v):
+    print('Size {0}, Type: {1}'.format(str(v.size()), v.data.type()))
+    print('| Max: %f | Min: %f | Mean: %f' % (v.max().data[0], v.min().data[0], v.mean().data[0]))
+
+
+def assureRatio(img):
+    """Ensure imgH <= imgW."""
+    b, c, h, w = img.size()
+    if h > w:
+        main = nn.UpsamplingBilinear2d(size=(h, h), scale_factor=None)
+        img = main(img)
+    return img

+ 121 - 0
dev_image.py

@@ -0,0 +1,121 @@
+import cv2
+import math
+from copy import deepcopy
+import numpy as np
+import os
+from TextModel import text_model
+import pandas as pd
+from image_tools import *
+import requests
+import json
+import time
+from tools import get_text
+
+
+
+def run_cut(path,draw=False,tabel=True):
+    """
+    切割图片,生成小图片放在文件夹里
+    :param path:
+    :param draw:
+    :param tabel:
+    :return:
+    """
+    if os.path.exists(r'D:\PaperCut\result\image'):
+        os.system(r'rd /s/q D:\PaperCut\result\image')
+    if os.path.exists(r'D:\PaperCut\result\text_img'):
+        os.system(r'rd /s/q D:\PaperCut\result\text_img')
+
+    if not os.path.exists(r'D:\PaperCut\result\image'):
+        os.makedirs(r'D:\PaperCut\result\image')
+    if not os.path.exists(r'D:\PaperCut\result\text_img'):
+        os.makedirs(r'D:\PaperCut\result\text_img')
+
+    t0 = time.time()
+    text_bbx = text_model(path)
+    img = cv2.imread(path)
+
+    t1 = time.time()
+    print('text时间',t1-t0)
+    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+    cv2.imwrite('gray.png', gray)
+    ret, binary = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
+    if draw:
+        cv2.imwrite('ret.png', ret)
+        cv2.imwrite('binary.png', binary)
+    contours, hierarchy = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_L1)
+
+    t2= time.time()
+    print('轮廓时间',t2-t1)
+
+    text_bbx = get_range(text_bbx) # 坐标转化
+    i_box = is_text(text_bbx, contours) # 得到图片坐标和未知坐标
+    i_box, text_bbx = processed(np.array(text_bbx),  i_box = np.array(i_box))
+
+    t3 = time.time()
+
+    print('预处理',t3-t2)
+    for pixel in i_box:
+        x_min, y_min, x_max, y_max = pixel
+        if (x_max - x_min) * (y_max - y_min) > 30:
+            pts = np.array([[x_min, y_min], [x_max, y_min], [x_max, y_max], [x_min, y_max]], np.int32)
+            # 顶点个数:4,矩阵变成4*1*2维
+            # OpenCV中需要将多边形的顶点坐标变成顶点数×1×2维的矩阵
+            # 这里 reshape 的第一个参数为-1, 表示“任意”,意思是这一维的值是根据后面的维度的计算出来的
+            if draw:
+                pts = pts.reshape((-1, 1, 2))
+                cv2.polylines(img, [pts], True, (0, 0, 255))  # 画文字图
+
+            cv2.imwrite('./result/image/%d-%d-%d-%d.png' % (y_min, y_max, x_min, x_max), img[y_min:y_max, x_min:x_max])
+
+
+            if tabel:# and (x_max - x_min) * (y_max - y_min) * 8 > img.shape[0] * img.shape[1]:
+                files = {'file': open(r'./result/image/%d-%d-%d-%d.png' % (y_min, y_max, x_min, x_max),
+                                      'rb')}  # 此处是重点!我们操作文件上传的时候,把目标文件以open打开,然后存储到变量file里面存到一个字典里面
+                upload_data = {"parentId": "", "fileCategory": "personal",
+                               "fileName": '%d-%d-%d-%d.png' % (y_min, y_max, x_min, x_max),
+                               "uoType": 1}
+
+                tabel_img = np.array(
+                    json.loads(requests.post('http://192.168.1.192:25255/img2maple', upload_data, files=files).text))
+                if np.sum(tabel_img):
+                    cv2.imwrite('./result/image/%d-%d-%d-%d.png' % (y_min, y_max, x_min, x_max), tabel_img)
+
+    for pixel in text_bbx:
+        x_min, y_min, x_max, y_max = pixel
+
+        pts = np.array([[x_min, y_min], [x_max, y_min], [x_max, y_max], [x_min, y_max]], np.int32)
+        # 顶点个数:4,矩阵变成4*1*2维
+        # OpenCV中需要将多边形的顶点坐标变成顶点数×1×2维的矩阵
+        # 这里 reshape 的第一个参数为-1, 表示“任意”,意思是这一维的值是根据后面的维度的计算出来的
+        if draw:
+            pts = pts.reshape((-1, 1, 2))
+            cv2.polylines(img, [pts], True, (0, 255, 0))  # 画插图
+
+
+
+        cv2.imwrite('./result/text_img/%d-%d-%d-%d.png' % (y_min, y_max, x_min, x_max), img[y_min:y_max, x_min:x_max])
+
+    get_text()
+    t4=time.time()
+    print('画图+ocr',t4-t3)
+    print('总时间',t4-t0)
+
+            # contours 轮廓所有的点
+    # cv2.drawContours(img, contours, -1, (0, 0, 255), 3)
+    if __name__ == '__main__':
+        cv2.imshow("img", img)
+        cv2.waitKey(0)
+
+        cv2.imwrite('bbb.png', img)
+
+
+if __name__ == '__main__':
+
+    start = time.time()
+    #19 23 29 30
+    #
+    #36?33
+    for i in range(10):
+        run_cut('./image_dir/1.jpg',draw=True,tabel=False)
+

BIN
gray.png


BIN
image_dir/01.jpg


BIN
image_dir/06.jpg


BIN
image_dir/1.jpg


BIN
image_dir/1.png


BIN
image_dir/11.jpg


BIN
image_dir/17.png


BIN
image_dir/18.png


BIN
image_dir/19.png


BIN
image_dir/2.png


BIN
image_dir/202004232018_0001.jpg


BIN
image_dir/29.png


BIN
image_dir/3.png


BIN
image_dir/34.png


BIN
image_dir/35.png


BIN
image_dir/4.jpg


BIN
image_dir/5.png


BIN
image_dir/6.png


BIN
image_dir/666.png


BIN
image_dir/7.png


BIN
image_dir/9_LAF40AXGZ37ZN.png


BIN
image_dir/QQ20200427170437.png


BIN
image_dir/c982aa7130d493db76fec10a918e8c6.png


BIN
image_dir/latex.png


BIN
image_dir/png


BIN
image_dir/timmmmmmmmmmmmmmmmmmmmmmm.png


+ 132 - 0
image_generator.py

@@ -0,0 +1,132 @@
+import cv2
+import math
+import pandas as pd
+import numpy as np
+import os
+from TextModel import text_model
+
+
+def get_range(bboxs):
+    ranges = []
+    for i in bboxs:
+        xs = i[::2]
+        ys = i[1::2]
+        box = [min(xs), min(ys), max(xs), max(ys)]
+        ranges.append(box)
+    return ranges
+
+
+def check_range(pixel_points, ranges):
+    pixel_point = pixel_points[0]
+    for i in ranges:
+        if i[2] >= pixel_point[0] >= i[0] and i[3] >= pixel_point[1] >= i[1]:
+            return 1
+    return 0
+
+
+def is_text(bboxs, contours):
+    ranges = get_range(bboxs)
+    illustration_box = []
+    for region in contours:
+        contain = [check_range(pixel_points, ranges) for pixel_points in region]
+        contain = sum(contain) / len(contain)
+        if contain < 0.2:
+            pixel = [math.inf, math.inf, -1, -1]
+            for i in region:
+                if i[0][0] < pixel[0]:
+                    pixel[0] = i[0][0]
+                if i[0][0] > pixel[2]:
+                    pixel[2] = i[0][0]
+                if i[0][1] < pixel[1]:
+                    pixel[1] = i[0][1]
+                if i[0][1] > pixel[3]:
+                    pixel[3] = i[0][1]
+
+            x_min, y_min, x_max, y_max = pixel
+            if (x_max - x_min) * (y_max - y_min) > 30:
+                illustration_box.append(pixel)
+                # pts = np.array([[x_min,y_min], [x_max,y_min], [x_max, y_max], [x_min, y_max]], np.int32)
+                # # 顶点个数:4,矩阵变成4*1*2维
+                # # OpenCV中需要将多边形的顶点坐标变成顶点数×1×2维的矩阵
+                # # 这里 reshape 的第一个参数为-1, 表示“任意”,意思是这一维的值是根据后面的维度的计算出来的
+                # pts = pts.reshape((-1, 1, 2))
+                # cv2.polylines(img, [pts], True, (0, 0, 255)) #画轮廓图
+    return illustration_box
+
+
+def processed(text_box, i_box):
+    i_box = np.array(i_box)
+    np.sort(i_box, axis=0)
+    np.sort(i_box, axis=1)
+    i_box = i_box[::-1][1:]
+
+    x_min, y_min, x_max, y_max = 0, 1, 2, 3
+    tmp_bbx = []
+    for i in range(len(i_box)):
+        for j in range(len(i_box)):
+            if i_box[i][x_min] > i_box[j][x_min] and i_box[i][y_min] > i_box[j][y_min] \
+                    and i_box[i][x_max] < i_box[j][x_max] and i_box[i][y_max] < i_box[j][y_max]:
+                tmp_bbx.append(i)
+
+    inx = [i for i in range(len(i_box)) if i not in tmp_bbx]
+
+    i_box_c = [i_box[i] for i in inx]
+
+    return i_box_c
+
+
+def run_cut(path):
+    if os.path.exists(r'D:\试卷切割\result\image'):
+        os.system(r'rd /s/q D:\试卷切割\result\image')
+    if os.path.exists(r'D:\试卷切割\result\text_img'):
+        os.system(r'rd /s/q D:\试卷切割\result\text_img')
+
+    if not os.path.exists(r'D:\试卷切割\result\image'):
+        os.makedirs(r'D:\试卷切割\result\image')
+    if not os.path.exists(r'D:\试卷切割\result\text_img'):
+        os.makedirs(r'D:\试卷切割\result\text_img')
+    text_bbx = text_model(path)
+    img = cv2.imread(path)
+
+    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+    ret, binary = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY)
+
+    contours, hierarchy = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_L1)
+
+    i_box = is_text(text_bbx, contours)
+    i_box = processed(1, i_box)
+    for pixel in i_box:
+        x_min, y_min, x_max, y_max = pixel
+        if (x_max - x_min) * (y_max - y_min) > 30:
+            pts = np.array([[x_min, y_min], [x_max, y_min], [x_max, y_max], [x_min, y_max]], np.int32)
+            # 顶点个数:4,矩阵变成4*1*2维
+            # OpenCV中需要将多边形的顶点坐标变成顶点数×1×2维的矩阵
+            # 这里 reshape 的第一个参数为-1, 表示“任意”,意思是这一维的值是根据后面的维度的计算出来的
+            # pts = pts.reshape((-1, 1, 2))
+            # cv2.polylines(img, [pts], True, (0, 0, 255)) #画文字图
+
+            cv2.imwrite('./result/image/%d-%d-%d-%d.png' % (y_min, y_max, x_min, x_max), img[y_min:y_max, x_min:x_max])
+
+    for pixel in get_range(text_bbx):
+        x_min, y_min, x_max, y_max = pixel
+
+        pts = np.array([[x_min, y_min], [x_max, y_min], [x_max, y_max], [x_min, y_max]], np.int32)
+        # 顶点个数:4,矩阵变成4*1*2维
+        # OpenCV中需要将多边形的顶点坐标变成顶点数×1×2维的矩阵
+        # 这里 reshape 的第一个参数为-1, 表示“任意”,意思是这一维的值是根据后面的维度的计算出来的
+        # pts = pts.reshape((-1, 1, 2))
+        # cv2.polylines(img, [pts], True, (0, 255, 0)) #画插图
+
+        cv2.imwrite('./result/text_img/%d-%d-%d-%d.png' % (y_min, y_max, x_min, x_max), img[y_min:y_max, x_min:x_max])
+
+    # contours 轮廓所有的点
+    # cv2.drawContours(img, contours, -1, (0, 0, 255), 3)
+
+    # cv2.imshow("img", img)
+    # cv2.waitKey(0)
+
+    # cv2.imwrite('bbb.png', img)
+
+
+if __name__ == '__main__':
+    run_cut('./img/5.png')

+ 84 - 0
image_tools.py

@@ -0,0 +1,84 @@
+# -*- coding:utf-8 -*-
+import math
+import numpy as np
+# from neighbor import neighbor_change
+from Near import neighbor_change
+import numba as nb
+
+def get_range(bboxs):
+    # 坐标转化
+    ranges = []
+    for i in bboxs:
+        xs = i[::2]
+        ys = i[1::2]
+        box = [min(xs), min(ys), max(xs), max(ys)]
+        ranges.append(box)
+    return ranges
+
+# @nb.jit
+def check_range(pixel_points, ranges):
+    """
+
+    :param pixel_points: 一团联通坐标
+    :param ranges:  所有textbox坐标
+    :return:
+    """
+    pixel_point = pixel_points[0]
+    for i in ranges:
+        if i[2] >= pixel_point[0] >= i[0] and i[3] >= pixel_point[1] >= i[1]:
+            return 1
+    return 0
+
+
+def is_text(bboxs, contours):
+    '''
+    doc
+    判断一个实物点在不在textbox中
+    :param bboxs: text的区域
+    :param contours: 所有区域,[array[一个x坐标,一个y坐标]]
+    :return: 插图区域
+    '''
+    ranges = bboxs
+    illustration_box = []
+    for region in contours:
+        contain = [check_range(pixel_points, ranges) for pixel_points in region]
+        contain = sum(contain) / len(contain)
+        if contain < 0.2:  # 交并比 iou
+
+            #  坐标结构化
+            pixel = [math.inf, math.inf, -1, -1]
+            for i in region:
+                if i[0][0] < pixel[0]:
+                    pixel[0] = i[0][0]
+                if i[0][0] > pixel[2]:
+                    pixel[2] = i[0][0]
+                if i[0][1] < pixel[1]:
+                    pixel[1] = i[0][1]
+                if i[0][1] > pixel[3]:
+                    pixel[3] = i[0][1]
+
+            x_min, y_min, x_max, y_max = pixel
+            if (x_max - x_min) * (y_max - y_min) > 30:  # 30是一个参数  像素点超过30
+                illustration_box.append(pixel)
+    return illustration_box
+
+
+def processed(text_boxes, i_box):
+    # 去除大图片中的小图片
+    x_min, y_min, x_max, y_max = 0, 1, 2, 3  # index
+    if len(i_box):
+        W = i_box[-1][2] - i_box[-1][0] * 0.9
+        i_box = [i for i in i_box if i[2] - i[0] < W]
+
+    tmp_bbx = []
+    for i in range(len(i_box)):
+        for j in range(len(i_box)):
+            if i_box[i][x_min] > i_box[j][x_min] and i_box[i][y_min] > i_box[j][y_min] \
+                    and i_box[i][x_max] < i_box[j][x_max] and i_box[i][y_max] < i_box[j][y_max]:
+                tmp_bbx.append(i)
+
+    inx = [i for i in range(len(i_box)) if i not in tmp_bbx]
+
+    i_box_c = [i_box[i] for i in inx]
+    text_boxes, i_box_c = neighbor_change(text_boxes, i_box_c)
+    return i_box_c, text_boxes

+ 287 - 0
neighbor.py

@@ -0,0 +1,287 @@
+# -*- coding:utf-8 -*-
+import numpy as np
+import pandas as pd
+from copy import deepcopy
+from numba import njit
+
+def neighbor_change(tex_box, im_box):
+    """
+    后期处理 临近合并
+    :param tex_box: 文字区域
+    :param im_box:  未知区域
+    :return:
+    """
+    # return tex_box, im_box
+    text_box, image_box, unknow_box, small_text = [], [], [], []
+    tex_box_df = pd.DataFrame(tex_box)
+    height = tex_box_df[3] - tex_box_df[1]
+    mix = height.median()
+    min_xs = 1.2*mix ** 2
+    img_w = int(tex_box_df[2].max())
+    for i in tex_box:
+        if (i[2] - i[0]) * (i[3] - i[1]) < min_xs and False:
+            small_text.append(np.array(i))
+            unknow_box.append(np.array(i))
+        else:
+            text_box.append(i)
+
+    # 分开大图和小图
+    for i in im_box:
+        if (i[2] - i[0]) * (i[3] - i[1]) < min_xs:
+            unknow_box.append(i)
+        else:
+            image_box.append(i)
+    if len(image_box):
+        image_box = pd.DataFrame(image_box)
+        image_box[4] = 1
+        image_box = image_box.sort_values(by=0).astype(np.int).values
+        # print(unknow_box)
+    if len(unknow_box) > 0:
+        unknow_box = pd.DataFrame(unknow_box)
+        unknow_box[4] = 2
+        unknow_box = unknow_box.sort_values(by=0).astype(np.int).values
+
+    tex_box = pd.DataFrame(text_box)
+    tex_box[4] = 1
+    tex_box = tex_box.sort_values(by=0).astype(np.int).values
+    # @njit
+    def _find_right(boxes, i, j, find_range):
+
+        for xs in range(int(boxes[i][2]), int(boxes[i][2] + find_range)):
+            for ys in range(boxes[i][1]+1, boxes[i][3]-1):
+                if boxes[j][0] < xs < boxes[j][2] and boxes[j][1] < ys < boxes[j][3]:
+                    boxes[i][0] = min(boxes[i][0], boxes[j][0])
+                    boxes[i][1] = min(boxes[i][1], boxes[j][1])
+                    boxes[i][2] = max(boxes[i][2], boxes[j][2])
+                    boxes[i][3] = max(boxes[i][3], boxes[j][3])
+
+                    boxes[j] = np.array([0, 0, 0, 0, 10])
+
+                    return xs - boxes[i][2]
+
+    # @njit
+    def __find_in(in_box, out_box, i, j, find_range=0):
+
+        # if all(in_box[i] == out_box[j] ):
+        #
+        #     return
+        # x_min, y_min, x_max, y_max = 0, 1, 2, 3
+        # if in_box[i][x_max] <= out_box[j][x_max] and in_box[i][x_min] >= out_box[j][x_min]:
+        #     overlap_w = in_box[i]
+        # elif in_box[i][x_max] >= out_box[j][x_max] and in_box[i][x_min] >= out_box[j][x_min]:
+        #     overlap_w = in_box[i]
+        #
+        #
+        # if  in_box[i][x_max] <= out_box[j][x_max] and \
+        #     in_box[i][y_max] <= out_box[j][y_max] and \
+        #     in_box[i][x_min] > out_box[j][x_min] and \
+        #     in_box[i][y_min] < out_box[j][y_min]+11:
+        #     if find_range==888:
+        #         print('********************')
+        #         print(in_box[i])
+        #     out_box[j][0] = min(out_box[j][0], in_box[i][0])
+        #     out_box[j][1] = min(out_box[j][1], in_box[i][1])
+        #     out_box[j][2] = max(out_box[j][2], in_box[i][2])
+        #     out_box[j][3] = max(out_box[j][3], in_box[i][3])
+        #     in_box[i] = np.array([0, 0, 0, 0, 10])
+        #     return 1
+
+        for xs in range(in_box[i][0]-find_range, in_box[i][2]+find_range):
+            for ys in range(in_box[i][1]-find_range, in_box[i][3]+find_range):
+                if out_box[j][0] <= xs <= out_box[j][2] and out_box[j][1] <= ys <= out_box[j][3]:
+                    out_box[j][0] = min(out_box[j][0], in_box[i][0])
+                    out_box[j][1] = min(out_box[j][1], in_box[i][1])
+                    out_box[j][2] = max(out_box[j][2], in_box[i][2])
+                    out_box[j][3] = max(out_box[j][3], in_box[i][3])
+                    in_box[i] = np.array([0, 0, 0, 0, 10])
+                    return 1
+
+    # @njit
+    def __find_right(the_boxes, unknow_box, i, j, find_range):
+
+        for xs in range(the_boxes[i][2], the_boxes[i][2] + int(find_range)):
+            for ys in range(the_boxes[i][1], the_boxes[i][3]):
+                if unknow_box[j][0] < xs < unknow_box[j][2] and unknow_box[j][1] < ys < unknow_box[j][3]:
+                    the_boxes[i][0] = min(the_boxes[i][0], unknow_box[j][0])
+                    the_boxes[i][1] = min(the_boxes[i][1], unknow_box[j][1])
+                    the_boxes[i][2] = max(the_boxes[i][2], unknow_box[j][2])
+                    the_boxes[i][3] = max(the_boxes[i][3], unknow_box[j][3])
+                    unknow_box[j] = np.array([0, 0, 0, 0, 10])
+                    return xs - the_boxes[i][2]
+
+    # @njit
+    def __find_left(the_boxes, unknow_box, i, j, find_range):
+
+        for xs in range(the_boxes[i][0], max(int(the_boxes[i][0] - find_range), 0), -1):
+            for ys in range(the_boxes[i][1], the_boxes[i][3]):
+                if unknow_box[j][0] < xs < unknow_box[j][2] and unknow_box[j][1] < ys < unknow_box[j][3]:
+                    the_boxes[i][0] = min(the_boxes[i][0], unknow_box[j][0])
+                    the_boxes[i][1] = min(the_boxes[i][1], unknow_box[j][1])
+                    the_boxes[i][2] = max(the_boxes[i][2], unknow_box[j][2])
+                    the_boxes[i][3] = max(the_boxes[i][3], unknow_box[j][3])
+                    unknow_box[j] = np.array([0, 0, 0, 0, 10])
+                    return 1
+
+    # @njit
+    def __find_down(the_boxes, unknow_box, i, j, find_range):
+
+        for xs in range(int(unknow_box[j][0]), int(unknow_box[j][2])):
+            for ys in range(int(unknow_box[j][3]), int(unknow_box[j][3] + find_range)):
+                if the_boxes[i][0] < xs < the_boxes[i][2] and the_boxes[i][1] < ys < the_boxes[i][3]:
+                    the_boxes[i][0] = min(the_boxes[i][0], unknow_box[j][0])
+                    the_boxes[i][1] = min(the_boxes[i][1], unknow_box[j][1])
+                    the_boxes[i][2] = max(the_boxes[i][2], unknow_box[j][2])
+                    the_boxes[i][3] = max(the_boxes[i][3], unknow_box[j][3])
+                    unknow_box[j] = np.array([0, 0, 0, 0, 10])
+                    return ys - unknow_box[j][3]
+
+    # @njit
+    def __find_top(the_boxes, unknow_box, i, j, find_range):
+
+        for xs in range(int(unknow_box[j][0]), int(unknow_box[j][2])):
+            for ys in range(int(unknow_box[j][1]), int(unknow_box[j][1] - find_range), -1):
+                if the_boxes[i][0] < xs < the_boxes[i][2] and the_boxes[i][1] < ys < the_boxes[i][3]:
+                    the_boxes[i][0] = min(the_boxes[i][0], unknow_box[j][0])
+                    the_boxes[i][1] = min(the_boxes[i][1], unknow_box[j][1])
+                    the_boxes[i][2] = max(the_boxes[i][2], unknow_box[j][2])
+                    the_boxes[i][3] = max(the_boxes[i][3], unknow_box[j][3])
+                    unknow_box[j] = np.array([0, 0, 0, 0, 10])
+                    return unknow_box[j][3] - ys
+
+    # @njit
+    def _find_down(boxes, y_max, i, j):
+        for xs in range(boxes[i][0], boxes[i][2]):
+            for ys in range(boxes[i][3], y_max):
+                if boxes[j][0] < xs < boxes[j][2] and boxes[j][1] < ys < boxes[j][2]:
+                    boxes[i][0] = min(boxes[i][0], boxes[j][0])
+                    boxes[i][1] = min(boxes[i][1], boxes[j][1])
+                    boxes[i][2] = max(boxes[i][2], boxes[j][2])
+                    boxes[i][3] = max(boxes[i][3], boxes[j][3])
+                    return 1
+
+    # 小图片向上粘贴
+    for i in range(len(image_box)):
+        for j in range(len(unknow_box)):
+            __find_top(image_box, unknow_box, i, j, mix//2)
+
+    # 小图片向下粘贴
+    for i in range(len(image_box)):
+        for j in range(len(unknow_box)):
+            __find_down(image_box, unknow_box, i, j, mix//2)
+
+    # 小图片向左粘贴
+    for i in range(len(image_box)):
+        for j in range(len(unknow_box)):
+            __find_left(image_box, unknow_box, i, j, mix//2 )
+
+    # 小图片向右粘贴
+    for i in range(len(image_box)):
+        for j in range(len(unknow_box)):
+            __find_right(image_box, unknow_box, i, j, mix//2)
+
+
+    # 消除在imagebox里的textbox
+    # for i in range(len(tex_box)):
+    #     for j in range(len(image_box)):
+    #         __find_in(tex_box, image_box, i, j)
+    for i in range(len(small_text)):
+        for j in range(len(image_box)):
+            if not all(small_text[i] == image_box[j]):
+                __find_in(small_text, image_box, i, j)
+
+    # textbox向右寻找
+    for i in range(len(tex_box)):
+        for j in range(i + 1, len(tex_box)):
+            _find_right(tex_box, i, j, mix)
+
+    text_box = []
+    for i in tex_box:
+        if 1 < (i[2] - i[0]) * (i[3] - i[1]) <  min_xs :
+            small_text.append(np.array(i))
+        else:
+            text_box.append(i)
+
+    tex_box = pd.DataFrame(text_box)
+    tex_box[4] = 1
+    tex_box = tex_box.sort_values(by=0).astype(np.int).values
+    if len(small_text) > 0:
+        small_text = pd.DataFrame(small_text)
+        small_text[4] = 3
+        small_text = small_text.sort_values(by=0).astype(np.int).values
+
+
+    # textbox向左合并小图
+    for i in range(len(tex_box)):
+        if (tex_box[i][2] - tex_box[i][0]) * (tex_box[i][3] - tex_box[i][1]) > 5 * min_xs:
+            for j in range(len(unknow_box)):
+                __find_left(tex_box, unknow_box, i, j, mix)
+
+
+
+    for i in range(len(tex_box)):
+        if (tex_box[i][2] - tex_box[i][0]) * (tex_box[i][3] - tex_box[i][1]) > 5 * min_xs:
+            for j in range(len(small_text)):
+                __find_left(tex_box, small_text, i, j, 5 * mix)
+
+
+
+    # textbox向下寻找
+    for i in range(len(tex_box)):
+        for j in range(len(unknow_box)):
+            __find_down(tex_box, unknow_box, i, j, mix // 3)
+
+    for i in range(len(tex_box)):
+        for j in range(len(small_text)):
+            __find_down(tex_box, small_text, i, j, mix // 3)
+
+
+
+    # textbox向上寻找
+    for i in range(len(tex_box)):
+        for j in range(len(unknow_box)):
+            __find_top(tex_box, unknow_box, i, j, mix // 3)
+
+    for i in range(len(tex_box)):
+        for j in range(len(small_text)):
+            __find_top(tex_box, small_text, i, j, mix // 3)
+
+
+
+    # image_box_bk = deepcopy(image_box)
+    # 消除内部imagebox
+    for i in range(len(image_box)):
+        for j in range(i+1,len(image_box)):
+            if not all(image_box[i] == image_box[j]):
+                __find_in(image_box, image_box, i, j, 0)
+    # 消除内部textbox
+    for i in range(len(tex_box)):
+        for j in range(i+1,len(tex_box)):
+            if not all(tex_box[i] == tex_box[j]):
+                __find_in(tex_box, tex_box, i, j, -int(mix//5))
+
+    # text_box_p = [i[:4] for i in tex_box]# + [i[:4] for i in small_text]
+    # image_box_p = [i[:4] for i in image_box]
+    #
+    # return text_box_p, image_box_p
+    #
+    # for i in range(len(tex_box)):
+    #     for j in range(i+1,len(tex_box)):
+    #         __find_in(tex_box, tex_box, i, j, -int(mix//10))
+
+    for i in range(len(small_text)):
+        for j in range(len(tex_box)):
+            if not all(small_text[i] == tex_box[j]):
+                __find_in(small_text, tex_box, i, j, 0)
+
+    for i in range(len(tex_box)):
+        for j in range(len(image_box)):
+            if not all(tex_box[i] == image_box[j]):
+                __find_in(tex_box, image_box, i, j, 0)
+    for i in range(len(small_text)):
+        for j in range(len(image_box)):
+            if not all(small_text[i] == image_box[j]):
+                __find_in(small_text, image_box, i, j, 0)
+    text_box_p = [i[:4] for i in tex_box if np.sum(i[:4])] + [i[:4] for i in small_text if np.sum(i[:4])]
+    image_box_p = [i[:4] for i in image_box if np.sum(i[:4])]
+
+    return text_box_p, image_box_p

+ 24 - 0
ocrapi.py

@@ -0,0 +1,24 @@
+import requests
+
+import base64
+import os
+import re
+import time
+import json
+def ocr(path):
+    with open(path, 'rb') as f:
+        base64_data = base64.b64encode(f.read())
+        s = base64_data.decode()
+    js = json.loads(requests.post('http://106.75.61.190:10811/spooo', json={'b64': s}).text)
+    print(js)
+    time.sleep(0.1)
+    s = js['texts'].replace('<latex>',' ').replace(
+        '</latex>', ' ')
+    return s
+
+
+
+
+
+if __name__ == '__main__':
+    ...

+ 9 - 0
readme.md

@@ -0,0 +1,9 @@
+基于迁移学习构造文本向量并依此完成了高效的KNN分类模型
+
+开发了基于MongoDB和相似度算法的题目检索排序系统
+
+基于实体关系抽取完成了语法树对题目的表述
+
+通过NLP技术优化OCR结果并分析提取结构化内容
+
+

BIN
result/image/129-150-45-107.png


BIN
result/image/129-208-576-779.png


BIN
result/image/129-323-44-563.png


BIN
result/image/360-470-238-553.png


BIN
result/image/397-471-561-770.png


BIN
result/image/399-471-44-236.png


BIN
result/text_img/10-38-43-989.png


Alguns arquivos não foram mostrados porque muitos arquivos mudaram nesse diff