This commit is contained in:
Your Name
2019-06-07 19:04:00 +08:00
parent c8df372f63
commit aed8c10d71
87 changed files with 782 additions and 215 deletions

Binary file not shown.

1
.gitignore vendored
View File

@@ -1,2 +1,3 @@
.vscode/ .vscode/
checkpoints/ checkpoints/
.idea/

307
.idea/workspace.xml generated
View File

@@ -11,63 +11,51 @@
</component> </component>
<component name="FileEditorManager"> <component name="FileEditorManager">
<leaf SIDE_TABS_SIZE_LIMIT_KEY="300"> <leaf SIDE_TABS_SIZE_LIMIT_KEY="300">
<file leaf-file-name="train_simple.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/../train_simple.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="296">
<caret line="87" column="36" lean-forward="false" selection-start-line="87" selection-start-column="36" selection-end-line="87" selection-end-column="36" />
<folding />
</state>
</provider>
</entry>
</file>
<file leaf-file-name="casia_hwdb.py" pinned="false" current-in-tab="false"> <file leaf-file-name="casia_hwdb.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/../../dataset/casia_hwdb.py"> <entry file="file://$PROJECT_DIR$/../dataset/casia_hwdb.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="418"> <state relative-caret-position="313">
<caret line="54" column="35" lean-forward="false" selection-start-line="54" selection-start-column="35" selection-end-line="54" selection-end-column="35" /> <caret line="120" column="45" lean-forward="false" selection-start-line="120" selection-start-column="45" selection-end-line="120" selection-end-column="45" />
<folding /> <folding />
</state> </state>
</provider> </provider>
</entry> </entry>
</file> </file>
<file leaf-file-name="train.py" pinned="false" current-in-tab="true"> <file leaf-file-name="demo.py" pinned="false" current-in-tab="true">
<entry file="file://$PROJECT_DIR$/../../train.py"> <entry file="file://$PROJECT_DIR$/../demo.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="401"> <state relative-caret-position="143">
<caret line="86" column="51" lean-forward="true" selection-start-line="86" selection-start-column="51" selection-end-line="86" selection-end-column="51" /> <caret line="52" column="3" lean-forward="true" selection-start-line="52" selection-start-column="3" selection-end-line="52" selection-end-column="3" />
<folding />
</state>
</provider>
</entry>
</file>
<file leaf-file-name=".gitignore" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/../../.gitignore">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="17">
<caret line="1" column="12" lean-forward="false" selection-start-line="1" selection-start-column="12" selection-end-line="1" selection-end-column="12" />
<folding />
</state>
</provider>
</entry>
</file>
<file leaf-file-name="cnn_net.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/../../models/cnn_net.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="646">
<caret line="38" column="0" lean-forward="false" selection-start-line="38" selection-start-column="0" selection-end-line="38" selection-end-column="0" />
<folding>
<element signature="e#1153#1176#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
</file>
<file leaf-file-name="tests.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/../../tests.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0">
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" />
<folding /> <folding />
</state> </state>
</provider> </provider>
</entry> </entry>
</file> </file>
<file leaf-file-name="convert_to_tfrecord.py" pinned="false" current-in-tab="false"> <file leaf-file-name="convert_to_tfrecord.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/../../dataset/convert_to_tfrecord.py"> <entry file="file://$PROJECT_DIR$/../dataset/convert_to_tfrecord.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0"> <state relative-caret-position="342">
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" /> <caret line="77" column="6" lean-forward="false" selection-start-line="77" selection-start-column="6" selection-end-line="77" selection-end-column="6" />
<folding />
</state>
</provider>
</entry>
</file>
<file leaf-file-name="cnn_net.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/../models/cnn_net.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="197">
<caret line="20" column="27" lean-forward="false" selection-start-line="20" selection-start-column="27" selection-end-line="20" selection-end-column="27" />
<folding /> <folding />
</state> </state>
</provider> </provider>
@@ -75,17 +63,39 @@
</file> </file>
</leaf> </leaf>
</component> </component>
<component name="FileTemplateManagerImpl">
<option name="RECENT_TEMPLATES">
<list>
<option value="Python Script" />
</list>
</option>
</component>
<component name="FindInProjectRecents">
<findStrings>
<find></find>
</findStrings>
</component>
<component name="Git.Settings"> <component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$/../.." /> <option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$/.." />
</component> </component>
<component name="IdeDocumentHistory"> <component name="IdeDocumentHistory">
<option name="CHANGED_PATHS"> <option name="CHANGED_PATHS">
<list> <list>
<option value="$PROJECT_DIR$/dataset/casia_hwdb.py" /> <option value="$PROJECT_DIR$/dataset/casia_hwdb.py" />
<option value="$PROJECT_DIR$/../../.gitignore" /> <option value="$PROJECT_DIR$/../../.gitignore" />
<option value="$PROJECT_DIR$/../../models/cnn_net.py" />
<option value="$PROJECT_DIR$/../../dataset/casia_hwdb.py" /> <option value="$PROJECT_DIR$/../../dataset/casia_hwdb.py" />
<option value="$PROJECT_DIR$/../../train.py" /> <option value="$PROJECT_DIR$/../../train.py" />
<option value="$PROJECT_DIR$/../../models/cnn_net.py" />
<option value="$PROJECT_DIR$/../../train_simple.py" />
<option value="$PROJECT_DIR$/../../dataset/convert_to_tfrecord.py" />
<option value="$PROJECT_DIR$/../../dataset/get_hwdb_1.0_1.1.sh" />
<option value="$PROJECT_DIR$/../.gitignore" />
<option value="$PROJECT_DIR$/../models/cnn_net.py" />
<option value="$PROJECT_DIR$/../dataset/convert_to_tfrecord.py" />
<option value="$PROJECT_DIR$/../train_simple.py" />
<option value="$PROJECT_DIR$/../dataset/.gitignore" />
<option value="$PROJECT_DIR$/../dataset/casia_hwdb.py" />
<option value="$PROJECT_DIR$/../demo.py" />
</list> </list>
</option> </option>
</component> </component>
@@ -95,10 +105,11 @@
<detection-done>true</detection-done> <detection-done>true</detection-done>
<sorting>DEFINITION_ORDER</sorting> <sorting>DEFINITION_ORDER</sorting>
</component> </component>
<component name="ProjectFrameBounds" extendedState="4"> <component name="ProjectFrameBounds">
<option name="y" value="25" /> <option name="x" value="63" />
<option name="width" value="1573" /> <option name="y" value="202" />
<option name="height" value="1415" /> <option name="width" value="1528" />
<option name="height" value="1061" />
</component> </component>
<component name="ProjectView"> <component name="ProjectView">
<navigator currentView="ProjectPane" proportions="" version="1"> <navigator currentView="ProjectPane" proportions="" version="1">
@@ -116,6 +127,7 @@
</navigator> </navigator>
<panes> <panes>
<pane id="Scope" /> <pane id="Scope" />
<pane id="Scratches" />
<pane id="ProjectPane"> <pane id="ProjectPane">
<subPane> <subPane>
<expand> <expand>
@@ -123,11 +135,20 @@
<item name="." type="b2602c69:ProjectViewProjectNode" /> <item name="." type="b2602c69:ProjectViewProjectNode" />
<item name="ocrcn_tf2" type="462c0819:PsiDirectoryNode" /> <item name="ocrcn_tf2" type="462c0819:PsiDirectoryNode" />
</path> </path>
<path>
<item name="." type="b2602c69:ProjectViewProjectNode" />
<item name="ocrcn_tf2" type="462c0819:PsiDirectoryNode" />
<item name="dataset" type="462c0819:PsiDirectoryNode" />
</path>
<path>
<item name="." type="b2602c69:ProjectViewProjectNode" />
<item name="ocrcn_tf2" type="462c0819:PsiDirectoryNode" />
<item name="models" type="462c0819:PsiDirectoryNode" />
</path>
</expand> </expand>
<select /> <select />
</subPane> </subPane>
</pane> </pane>
<pane id="Scratches" />
</panes> </panes>
</component> </component>
<component name="PropertiesComponent"> <component name="PropertiesComponent">
@@ -135,6 +156,11 @@
<property name="WebServerToolWindowFactoryState" value="false" /> <property name="WebServerToolWindowFactoryState" value="false" />
<property name="settings.editor.selected.configurable" value="preferences.editor" /> <property name="settings.editor.selected.configurable" value="preferences.editor" />
</component> </component>
<component name="RecentsManager">
<key name="MoveFile.RECENT_KEYS">
<recent name="$PROJECT_DIR$/../assets" />
</key>
</component>
<component name="RunDashboard"> <component name="RunDashboard">
<option name="ruleStates"> <option name="ruleStates">
<list> <list>
@@ -164,7 +190,7 @@
<servers /> <servers />
</component> </component>
<component name="ToolWindowManager"> <component name="ToolWindowManager">
<frame x="0" y="25" width="1573" height="1415" extended-state="4" /> <frame x="63" y="202" width="1528" height="1061" extended-state="0" />
<editor active="true" /> <editor active="true" />
<layout> <layout>
<window_info id="TODO" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" /> <window_info id="TODO" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
@@ -172,11 +198,11 @@
<window_info id="Run" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" /> <window_info id="Run" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
<window_info id="Version Control" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" /> <window_info id="Version Control" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
<window_info id="Python Console" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" /> <window_info id="Python Console" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
<window_info id="Terminal" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="true" show_stripe_button="true" weight="0.32943925" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" /> <window_info id="Terminal" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="true" show_stripe_button="true" weight="0.36989248" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
<window_info id="Project" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="true" show_stripe_button="true" weight="0.14800262" sideWeight="0.5" order="0" side_tool="false" content_ui="combo" /> <window_info id="Project" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="true" show_stripe_button="true" weight="0.20850202" sideWeight="0.5" order="0" side_tool="false" content_ui="combo" />
<window_info id="Docker" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="false" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" /> <window_info id="Docker" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="false" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
<window_info id="Database" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" /> <window_info id="Database" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
<window_info id="Find" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.32943925" sideWeight="0.5" order="-1" side_tool="false" content_ui="tabs" /> <window_info id="Find" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.32943925" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
<window_info id="SciView" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" /> <window_info id="SciView" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
<window_info id="Structure" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="true" content_ui="tabs" /> <window_info id="Structure" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="true" content_ui="tabs" />
<window_info id="Debug" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" /> <window_info id="Debug" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
@@ -194,38 +220,50 @@
<watches-manager /> <watches-manager />
</component> </component>
<component name="editorHistoryManager"> <component name="editorHistoryManager">
<entry file="file://$PROJECT_DIR$/../../dataset/dataset_hwdb.py"> <entry file="file://$PROJECT_DIR$/../train_simple.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0"> <state relative-caret-position="1530">
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" /> <caret line="90" column="0" lean-forward="true" selection-start-line="90" selection-start-column="0" selection-end-line="90" selection-end-column="0" />
<folding /> <folding />
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/../../tests.py"> <entry file="file://$PROJECT_DIR$/../.gitignore">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0"> <state relative-caret-position="34">
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" /> <caret line="2" column="6" lean-forward="false" selection-start-line="2" selection-start-column="6" selection-end-line="2" selection-end-column="6" />
<folding /> <folding />
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/../../dataset/convert_to_tfrecord.py"> <entry file="file://$PROJECT_DIR$/../dataset/casia_hwdb.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0"> <state relative-caret-position="1003">
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" /> <caret line="59" column="0" lean-forward="true" selection-start-line="59" selection-start-column="0" selection-end-line="59" selection-end-column="0" />
<folding /> <folding />
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/../../dataset/casia_hwdb.py"> <entry file="file://$PROJECT_DIR$/../dataset/convert_to_tfrecord.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="1190"> <state relative-caret-position="1156">
<caret line="70" column="26" lean-forward="true" selection-start-line="70" selection-start-column="26" selection-end-line="70" selection-end-column="26" /> <caret line="74" column="53" lean-forward="true" selection-start-line="74" selection-start-column="53" selection-end-line="74" selection-end-column="53" />
<folding /> <folding />
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/../models/cnn_net.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="629">
<caret line="38" column="78" lean-forward="false" selection-start-line="38" selection-start-column="78" selection-end-line="38" selection-end-column="78" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../../dataset/dataset_hwdb.py" />
<entry file="file://$PROJECT_DIR$/../../tests.py" />
<entry file="file://$PROJECT_DIR$/../../dataset/convert_to_tfrecord.py" />
<entry file="file://$PROJECT_DIR$/../../dataset/casia_hwdb.py" />
<entry file="file:///usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py"> <entry file="file:///usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="-7666"> <state relative-caret-position="-7666">
@@ -234,19 +272,30 @@
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/../../dataset/charactors.txt"> <entry file="file://$PROJECT_DIR$/../../dataset/charactors.txt" />
<provider selected="true" editor-type-id="text-editor"> <entry file="file://$PROJECT_DIR$/../../sample.png" />
<state relative-caret-position="0"> <entry file="file://$PROJECT_DIR$/../../tests.py" />
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" /> <entry file="file://$PROJECT_DIR$/../../.gitignore" />
</state> <entry file="file://$PROJECT_DIR$/../../dataset/dataset_hwdb.py" />
</provider> <entry file="file://$PROJECT_DIR$/../../models/cnn_net.py" />
</entry> <entry file="file://$PROJECT_DIR$/../../dataset/casia_hwdb.py" />
<entry file="file://$PROJECT_DIR$/../../sample.png"> <entry file="file://$PROJECT_DIR$/../../train_simple.py" />
<entry file="file://$PROJECT_DIR$/../../dataset/convert_to_tfrecord.py" />
<entry file="file://$PROJECT_DIR$/../../dataset/get_hwdb_1.0_1.1.sh" />
<entry file="file://$PROJECT_DIR$/../assets/sample.png">
<provider selected="true" editor-type-id="images"> <provider selected="true" editor-type-id="images">
<state /> <state />
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/../../tests.py"> <entry file="file://$PROJECT_DIR$/../.gitignore">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="34">
<caret line="2" column="6" lean-forward="false" selection-start-line="2" selection-start-column="6" selection-end-line="2" selection-end-column="6" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../dataset/characters.txt">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0"> <state relative-caret-position="0">
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" /> <caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" />
@@ -254,15 +303,17 @@
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/../../.gitignore"> <entry file="file://$PROJECT_DIR$/../assets/镑.png">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="images">
<state relative-caret-position="17"> <state />
<caret line="1" column="12" lean-forward="false" selection-start-line="1" selection-start-column="12" selection-end-line="1" selection-end-column="12" />
<folding />
</state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/../../dataset/dataset_hwdb.py"> <entry file="file://$PROJECT_DIR$/../assets/耙.png">
<provider selected="true" editor-type-id="images">
<state />
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../tests.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0"> <state relative-caret-position="0">
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" /> <caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" />
@@ -270,7 +321,15 @@
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/../../dataset/convert_to_tfrecord.py"> <entry file="file://$PROJECT_DIR$/../dataset/characters.txt">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="312">
<caret line="3246" column="1" lean-forward="false" selection-start-line="3246" selection-start-column="0" selection-end-line="3246" selection-end-column="1" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../readme.md">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0"> <state relative-caret-position="0">
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" /> <caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" />
@@ -278,28 +337,88 @@
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/../../dataset/casia_hwdb.py"> <entry file="file://$PROJECT_DIR$/../dataset/get_hwdb_1.0_1.1.sh">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="418"> <state relative-caret-position="0">
<caret line="54" column="35" lean-forward="false" selection-start-line="54" selection-start-column="35" selection-end-line="54" selection-end-column="35" /> <caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" />
<folding /> <folding />
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/../../models/cnn_net.py"> <entry file="file://$PROJECT_DIR$/../assets/饱.png">
<provider selected="true" editor-type-id="images">
<state />
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../assets/颁.png">
<provider selected="true" editor-type-id="images">
<state />
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../assets/袄.png">
<provider selected="true" editor-type-id="images">
<state />
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../assets/按.png">
<provider selected="true" editor-type-id="images">
<state />
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../assets/扳.png">
<provider selected="true" editor-type-id="images">
<state />
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../assets/包.png">
<provider selected="true" editor-type-id="images">
<state />
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../dataset/.gitignore">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="646"> <state relative-caret-position="68">
<caret line="38" column="0" lean-forward="false" selection-start-line="38" selection-start-column="0" selection-end-line="38" selection-end-column="0" /> <caret line="4" column="22" lean-forward="false" selection-start-line="4" selection-start-column="22" selection-end-line="4" selection-end-column="22" />
<folding> <folding />
<element signature="e#1153#1176#0" expanded="true" />
</folding>
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/../../train.py"> <entry file="file://$PROJECT_DIR$/../models/cnn_net.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="401"> <state relative-caret-position="197">
<caret line="86" column="51" lean-forward="true" selection-start-line="86" selection-start-column="51" selection-end-line="86" selection-end-column="51" /> <caret line="20" column="27" lean-forward="false" selection-start-line="20" selection-start-column="27" selection-end-line="20" selection-end-column="27" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../dataset/convert_to_tfrecord.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="342">
<caret line="77" column="6" lean-forward="false" selection-start-line="77" selection-start-column="6" selection-end-line="77" selection-end-column="6" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../train_simple.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="296">
<caret line="87" column="36" lean-forward="false" selection-start-line="87" selection-start-column="36" selection-end-line="87" selection-end-column="36" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../dataset/casia_hwdb.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="313">
<caret line="120" column="45" lean-forward="false" selection-start-line="120" selection-start-column="45" selection-end-line="120" selection-end-column="45" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/../demo.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="143">
<caret line="52" column="3" lean-forward="true" selection-start-line="52" selection-start-column="3" selection-end-line="52" selection-end-column="3" />
<folding /> <folding />
</state> </state>
</provider> </provider>

BIN
assets/0.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 KiB

BIN
assets/1.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.6 KiB

BIN
assets/10.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.8 KiB

BIN
assets/11.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.5 KiB

BIN
assets/12.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 KiB

BIN
assets/13.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

BIN
assets/14.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 KiB

BIN
assets/15.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 KiB

BIN
assets/16.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 KiB

BIN
assets/17.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

BIN
assets/18.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

BIN
assets/19.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 KiB

BIN
assets/2.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 KiB

BIN
assets/20.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 KiB

BIN
assets/21.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.8 KiB

BIN
assets/22.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 KiB

BIN
assets/23.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 KiB

BIN
assets/24.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

BIN
assets/25.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

BIN
assets/26.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

BIN
assets/27.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 KiB

BIN
assets/28.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

BIN
assets/29.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

BIN
assets/3.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 KiB

BIN
assets/30.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 KiB

BIN
assets/31.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 KiB

BIN
assets/32.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 KiB

BIN
assets/33.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

BIN
assets/34.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.5 KiB

BIN
assets/35.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

BIN
assets/4.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

BIN
assets/5.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 KiB

BIN
assets/6.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

BIN
assets/7.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.8 KiB

BIN
assets/8.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

BIN
assets/9.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.8 KiB

BIN
assets/pred_佯.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.3 KiB

BIN
assets/pred_俺.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

BIN
assets/pred_傍.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.2 KiB

BIN
assets/pred_傲.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.6 KiB

BIN
assets/pred_八.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.5 KiB

BIN
assets/pred_军.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.2 KiB

BIN
assets/pred_凹.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.2 KiB

BIN
assets/pred_吧.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.6 KiB

BIN
assets/pred_呐.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.6 KiB

BIN
assets/pred_奥.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.5 KiB

BIN
assets/pred_安.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 KiB

BIN
assets/pred_宋.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.3 KiB

BIN
assets/pred_宽.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.3 KiB

BIN
assets/pred_巴.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.9 KiB

BIN
assets/pred_年.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 KiB

BIN
assets/pred_弟.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 KiB

BIN
assets/pred_捞.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.6 KiB

BIN
assets/pred_换.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

BIN
assets/pred_昂.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

BIN
assets/pred_晒.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

BIN
assets/pred_杯.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.6 KiB

BIN
assets/pred_梆.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.8 KiB

BIN
assets/pred_氨.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

BIN
assets/pred_男.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.8 KiB

BIN
assets/pred_百.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.8 KiB

BIN
assets/pred_磅.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.5 KiB

BIN
assets/pred_笨.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.6 KiB

BIN
assets/pred_肮.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.7 KiB

BIN
assets/pred_蔼.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.7 KiB

BIN
assets/pred_败.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

BIN
assets/pred_跋.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.5 KiB

BIN
assets/pred_邦.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.1 KiB

BIN
assets/pred_霸.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.9 KiB

BIN
assets/pred_颁.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.2 KiB

BIN
assets/pred_饶.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.7 KiB

Binary file not shown.

2
dataset/.gitignore vendored
View File

@@ -1,3 +1,5 @@
hwdb_raw/ hwdb_raw/
*.tfrecord *.tfrecord
casia_hwdb.pyhwdb_11.tfrecord casia_hwdb.pyhwdb_11.tfrecord
HWDB1.1tst_gnt.tfrecord
HWDB1.1trn_gnt.tfrecord

View File

@@ -19,34 +19,6 @@ import os
this_dir = os.path.dirname(os.path.abspath(__file__)) this_dir = os.path.dirname(os.path.abspath(__file__))
class CASIAHWDBGNT(object):
"""
A .gnt file may contains many images and charactors
"""
def __init__(self, f_p):
self.f_p = f_p
def get_data_iter(self):
header_size = 10
with open(self.f_p, 'rb') as f:
while True:
header = np.fromfile(f, dtype='uint8', count=header_size)
if not header.size:
break
sample_size = header[0] + (header[1] << 8) + (
header[2] << 16) + (header[3] << 24)
tagcode = header[5] + (header[4] << 8)
width = header[6] + (header[7] << 8)
height = header[8] + (header[9] << 8)
if header_size + width * height != sample_size:
break
image = np.fromfile(f, dtype='uint8',
count=width * height).reshape(
(height, width))
yield image, tagcode
def parse_example(record): def parse_example(record):
features = tf.io.parse_single_example(record, features = tf.io.parse_single_example(record,
features={ features={
@@ -57,34 +29,101 @@ def parse_example(record):
}) })
img = tf.io.decode_raw(features['image'], out_type=tf.uint8) img = tf.io.decode_raw(features['image'], out_type=tf.uint8)
img = tf.cast(tf.reshape(img, (64, 64)), dtype=tf.float32) img = tf.cast(tf.reshape(img, (64, 64)), dtype=tf.float32)
label = tf.cast(features['label'], tf.int32) label = tf.cast(features['label'], tf.int64)
return {'image': img, 'label': label}
def parse_example_v2(record):
"""
latest version format
:param record:
:return:
"""
features = tf.io.parse_single_example(record,
features={
'width':
tf.io.FixedLenFeature([], tf.int64),
'height':
tf.io.FixedLenFeature([], tf.int64),
'label':
tf.io.FixedLenFeature([], tf.int64),
'image':
tf.io.FixedLenFeature([], tf.string),
})
img = tf.io.decode_raw(features['image'], out_type=tf.uint8)
# we can not reshape since it stores with original size
w = features['width']
h = features['height']
img = tf.cast(tf.reshape(img, (w, h)), dtype=tf.float32)
label = tf.cast(features['label'], tf.int64)
return {'image': img, 'label': label} return {'image': img, 'label': label}
def load_ds(): def load_ds():
input_files = ['dataset/hwdb_11.tfrecord'] input_files = ['dataset/HWDB1.1trn_gnt.tfrecord']
ds = tf.data.TFRecordDataset(input_files) ds = tf.data.TFRecordDataset(input_files)
ds = ds.map(parse_example) ds = ds.map(parse_example)
return ds return ds
def load_characters(): def load_val_ds():
input_files = ['dataset/HWDB1.1tst_gnt.tfrecord']
ds = tf.data.TFRecordDataset(input_files)
ds = ds.map(parse_example_v2)
return ds
a = open(os.path.join(this_dir, 'charactors.txt'), 'r').readlines()
def load_characters():
a = open(os.path.join(this_dir, 'characters.txt'), 'r').readlines()
return [i.strip() for i in a] return [i.strip() for i in a]
if __name__ == "__main__": if __name__ == "__main__":
ds = load_ds() ds = load_ds()
val_ds = load_val_ds()
val_ds = val_ds.shuffle(100)
charactors = load_characters() charactors = load_characters()
for img, label in ds.take(9):
# start training on model... is_show_combine = False
img = img.numpy() if is_show_combine:
img = np.resize(img, (64, 64)) combined = np.zeros([32*10, 32*20], dtype=np.uint8)
print(img.shape) i = 0
label = label.numpy() res = ''
label = charactors[label] for data in val_ds.take(200):
print(label) # start training on model...
cv2.imshow('rr', img) img, label = data['image'], data['label']
img = img.numpy()
img = np.array(img, dtype=np.uint8)
img = cv2.resize(img, (32, 32))
label = label.numpy()
label = charactors[label]
print(label)
row = i // 20
col = i % 20
print(i, col)
print(row, col)
combined[row*32: (row+1)*32, col*32: (col+1)*32] = img
i += 1
res += label
cv2.imshow('rr', combined)
print(res)
cv2.imwrite('assets/combined.png', combined)
cv2.waitKey(0) cv2.waitKey(0)
# break # break
else:
i = 0
for data in val_ds.take(36):
# start training on model...
img, label = data['image'], data['label']
img = img.numpy()
img = np.array(img, dtype=np.uint8)
print(img.shape)
# img = cv2.resize(img, (64, 64))
label = label.numpy()
label = charactors[label]
print(label)
cv2.imshow('rr', img)
cv2.imwrite('assets/{}.png'.format(i), img)
i += 1
cv2.waitKey(0)
# break

View File

@@ -1,6 +1,7 @@
""" """
generates HWDB data into tfrecord generates HWDB data into tfrecord
""" """
import sys
import struct import struct
import numpy as np import numpy as np
import cv2 import cv2
@@ -23,69 +24,83 @@ class CASIAHWDBGNT(object):
with open(self.f_p, 'rb') as f: with open(self.f_p, 'rb') as f:
while True: while True:
header = np.fromfile(f, dtype='uint8', count=header_size) header = np.fromfile(f, dtype='uint8', count=header_size)
if not header.size: if not header.size:
break break
sample_size = header[0] + (header[1]<<8) + (header[2]<<16) + (header[3]<<24) sample_size = header[0] + (header[1] << 8) + (header[2] << 16) + (header[3] << 24)
tagcode = header[5] + (header[4]<<8) tagcode = header[5] + (header[4] << 8)
width = header[6] + (header[7]<<8) width = header[6] + (header[7] << 8)
height = header[8] + (header[9]<<8) height = header[8] + (header[9] << 8)
if header_size + width*height != sample_size: if header_size + width * height != sample_size:
break break
image = np.fromfile(f, dtype='uint8', count=width*height).reshape((height, width)) image = np.fromfile(f, dtype='uint8', count=width * height).reshape((height, width))
yield image, tagcode yield image, tagcode
def run(): def run(p):
all_hwdb_gnt_files = glob.glob('./hwdb_raw/HWDB1.1trn_gnt/*.gnt') all_hwdb_gnt_files = glob.glob(os.path.join(p, '*.gnt'))
logging.info('got all {} gnt files.'.format(len(all_hwdb_gnt_files))) logging.info('got all {} gnt files.'.format(len(all_hwdb_gnt_files)))
logging.info('gathering charset...') logging.info('gathering charset...')
charset = [] charset = []
if os.path.exists('charactors.txt'): if os.path.exists('characters.txt'):
logging.info('found exist charactors.txt...') logging.info('found exist characters.txt...')
with open('charactors.txt', 'r') as f: with open('characters.txt', 'r') as f:
charset = f.readlines() charset = f.readlines()
charset = [i.strip() for i in charset] charset = [i.strip() for i in charset]
else: else:
for gnt in all_hwdb_gnt_files: if 'trn' in p:
hwdb = CASIAHWDBGNT(gnt) for gnt in all_hwdb_gnt_files:
for img, tagcode in hwdb.get_data_iter(): hwdb = CASIAHWDBGNT(gnt)
try: for img, tagcode in hwdb.get_data_iter():
label = struct.pack('>H', tagcode).decode('gb2312') try:
label = label.replace('\x00', '') label = struct.pack('>H', tagcode).decode('gb2312')
charset.append(label) label = label.replace('\x00', '')
except Exception as e: charset.append(label)
continue except Exception as e:
charset = sorted(set(charset)) continue
with open('charactors.txt', 'w') as f: charset = sorted(set(charset))
f.writelines('\n'.join(charset)) with open('characters.txt', 'w') as f:
logging.info('all got {} charactors.'.format(len(charset))) f.writelines('\n'.join(charset))
logging.info('all got {} characters.'.format(len(charset)))
logging.info('{}'.format(charset[:10])) logging.info('{}'.format(charset[:10]))
tfrecord_f = 'casia_hwdb_1.0_1.1.tfrecord' tfrecord_f = os.path.basename(os.path.dirname(p)) + '.tfrecord'
logging.info('tfrecord file saved into: {}'.format(tfrecord_f))
i = 0 i = 0
with tf.io.TFRecordWriter(tfrecord_f) as tfrecord_writer: with tf.io.TFRecordWriter(tfrecord_f) as tfrecord_writer:
for gnt in all_hwdb_gnt_files: for gnt in all_hwdb_gnt_files:
hwdb = CASIAHWDBGNT(gnt) hwdb = CASIAHWDBGNT(gnt)
for img, tagcode in hwdb.get_data_iter(): for img, tagcode in hwdb.get_data_iter():
try: try:
img = cv2.resize(img, (64, 64)) # why do you need resize?
label = struct.pack('>H', tagcode).decode('gb2312') w = img.shape[0]
h = img.shape[1]
# img = cv2.resize(img, (64, 64))
label = struct.pack('>H', tagcode).decode('gb2312')
label = label.replace('\x00', '') label = label.replace('\x00', '')
index = charset.index(label) index = charset.index(label)
# save img, label as example # save img, label as example
example = tf.train.Example(features=tf.train.Features( example = tf.train.Example(features=tf.train.Features(
feature={ feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[index])), "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[index])),
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img.tobytes()])) 'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img.tobytes()])),
})) 'width': tf.train.Feature(int64_list=tf.train.Int64List(value=[w])),
'height': tf.train.Feature(int64_list=tf.train.Int64List(value=[h])),
}))
tfrecord_writer.write(example.SerializeToString()) tfrecord_writer.write(example.SerializeToString())
if i%500: if i % 5000:
logging.info('solved {} examples. {}: {}'.format(i, label, index)) logging.info('solved {} examples. {}: {}'.format(i, label, index))
i += 1 i += 1
except Exception as e: except Exception as e:
logging.error(e) logging.error(e)
e.with_traceback()
continue continue
logging.info('done.') logging.info('done.')
if __name__ == "__main__": if __name__ == "__main__":
run() if len(sys.argv) <= 1:
logging.error('send a pattern like this: {}'.format('./hwdb_raw/HWDB1.1trn_gnt/'))
else:
p = sys.argv[1]
logging.info('converting from: {}'.format(p))
run(p)

View File

@@ -1,2 +1,2 @@
wget http://www.nlpr.ia.ac.cn/databases/download/feature_data/HWDB1.1trn_gnt.zip wget http://www.nlpr.ia.ac.cn/databases/download/feature_data/HWDB1.1trn_gnt.zip
wget wget http://www.nlpr.ia.ac.cn/databases/download/feature_data/HWDB1.1tst_gnt.zip wget http://www.nlpr.ia.ac.cn/databases/download/feature_data/HWDB1.1tst_gnt.zip

77
demo.py Executable file
View File

@@ -0,0 +1,77 @@
"""
inference on a single Chinese character
image and recognition the meaning of it
"""
from alfred.dl.tf.common import mute_tf
mute_tf()
import os
import cv2
import sys
import numpy as np
import tensorflow as tf
from alfred.utils.log import logger as logging
import tensorflow_datasets as tfds
from dataset.casia_hwdb import load_ds, load_characters, load_val_ds
from models.cnn_net import CNNNet, build_net_002, build_net_003
import glob
target_size = 64
characters = load_characters()
num_classes = len(characters)
# use_keras_fit = False
use_keras_fit = True
ckpt_path = './checkpoints/cn_ocr-{epoch}.ckpt'
def preprocess(x):
"""
minus mean pixel or normalize?
"""
# original is 64x64, add a channel dim
x['image'] = tf.expand_dims(x['image'], axis=-1)
x['image'] = tf.image.resize(x['image'], (target_size, target_size))
x['image'] = (x['image'] - 128.) / 128.
return x['image'], x['label']
def get_model():
# init model
model = build_net_003((64, 64, 1), num_classes)
logging.info('model loaded.')
latest_ckpt = tf.train.latest_checkpoint(os.path.dirname(ckpt_path))
if latest_ckpt:
start_epoch = int(latest_ckpt.split('-')[1].split('.')[0])
model.load_weights(latest_ckpt)
logging.info('model resumed from: {} at epoch: {}'.format(latest_ckpt, start_epoch))
return model
else:
logging.error('can not found any checkpoints matched: {}'.format(ckpt_path))
def predict(model, img_f):
ori_img = cv2.imread(img_f)
img = tf.expand_dims(ori_img[:, :, 0], axis=-1)
img = tf.image.resize(img, (target_size, target_size))
img = (img - 128.)/128.
img = tf.expand_dims(img, axis=0)
print(img.shape)
out = model(img).numpy()
print('predict: {}'.format(characters[np.argmax(out[0])]))
cv2.imwrite('assets/pred_{}.png'.format(characters[np.argmax(out[0])]), ori_img)
if __name__ == '__main__':
img_files = glob.glob('assets/*.png')
model = get_model()
for img_f in img_files:
a = cv2.imread(img_f)
cv2.imshow('rr', a)
predict(model, img_f)
cv2.waitKey(0)

View File

@@ -1,24 +1,5 @@
'''
conv_1 = slim.conv2d(images, 64, [3, 3], 1, padding='SAME', scope='conv1')
# (inputs,num_outputs,[卷积核个数] kernel_size,[卷积核的高度,卷积核的宽]stride=1,padding='SAME',)
max_pool_1 = slim.max_pool2d(conv_1, [2, 2], [2, 2], padding='SAME')
conv_2 = slim.conv2d(max_pool_1, 128, [3, 3], padding='SAME', scope='conv2')
max_pool_2 = slim.max_pool2d(conv_2, [2, 2], [2, 2], padding='SAME')
conv_3 = slim.conv2d(max_pool_2, 256, [3, 3], padding='SAME', scope='conv3')
max_pool_3 = slim.max_pool2d(conv_3, [2, 2], [2, 2], padding='SAME')
flatten = slim.flatten(max_pool_3)
fc1 = slim.fully_connected(tf.nn.dropout(flatten, keep_prob), 1024, activation_fn=tf.nn.tanh, scope='fc1')
logits = slim.fully_connected(tf.nn.dropout(fc1, keep_prob), FLAGS.charset_size, activation_fn=None, scope='fc2')
# logits = slim.fully_connected(flatten, FLAGS.charset_size, activation_fn=None, reuse=reuse, scope='fc')
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# y表示的是实际类别y_表示预测结果这实际上面是把原来的神经网络输出层的softmax和cross_entrop何在一起计算为了追求速度
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), labels), tf.float32))
'''
import tensorflow as tf import tensorflow as tf
from tensorflow.keras import layers from tensorflow.keras import layers
@@ -54,6 +35,23 @@ def build_net_002(input_shape, n_classes):
return model return model
# this model is converge in terms of chinese characters classification
# so simply is effective sometimes, adding a dense maybe model will be better?
def build_net_003(input_shape, n_classes):
model = tf.keras.Sequential([
layers.Conv2D(input_shape=input_shape, filters=32, kernel_size=(3, 3), strides=(1, 1),
padding='same', activation='relu'),
layers.MaxPool2D(pool_size=(2, 2), padding='same'),
layers.Conv2D(filters=64, kernel_size=(3, 3), padding='same'),
layers.MaxPool2D(pool_size=(2, 2), padding='same'),
layers.Flatten(),
# layers.Dense(1024, activation='relu'),
layers.Dense(n_classes, activation='softmax')
])
return model
# some models wrapped into tf.keras.Model # some models wrapped into tf.keras.Model
class CNNNet(tf.keras.Model): class CNNNet(tf.keras.Model):

322
readme.md
View File

@@ -1,9 +1,29 @@
# TensorFlow 2.0 中文手写字识别 # TensorFlow 2.0 中文手写字识别汉字OCR
> 在开始之前必须要说明的是本教程完全基于TensorFlow2.0 接口编写请误与其他古老的教程混为一谈本教程除了手把手教大家完成这个挑战性任务之外更多的会教大家如何分析整个调参过程的思考过程力求把人工智能算法工程师日常的工作通过这个例子毫无保留的展示给大家。另外我们建立了一个高端算法分享平台希望得到大家的支持http://manaai.cn , 也欢迎大家来我们的AI社区交流 http://talk.strangeai.pro
还在玩ministfashionmnist不如来尝试一下类别多大3000+的汉字手写识别吧虽然以前有一些文章教大家如何操作但是大多比较古老这篇文章将用全新的TensorFlow 2.0 来教大家如何搭建一个中文OCR系统
让我们来看一下相比于简单minist识别汉字识别具有哪些难点
- 搜索空间空前巨大我们使用的数据集1.0版本汉字就多大3755个如果加上1.1版本一起总共汉字可以分为多达7599+个类别这比10个阿拉伯字母识别难度大很多
- 数据集处理挑战更大相比于mnist和fasionmnist来说汉字手写字体识别数据集非常少而且仅有的数据集数据预处理难度非常大非常不直观但是千万别吓到相信你看完本教程一定会收货满满!
- 汉字识别更考验选手的建模能力还在分类花分类猫和狗随便搭建的几层在搜索空间巨大的汉字手写识别里根本不work你现在是不是想用很深的网络跃跃欲试更深的网络在这个任务上可能根本不可行看完本教程我们就可以一探究竟总之一句话模型太简单和太复杂都不好甚至会发散想亲身体验模型训练发散抓狂的可以来尝试一下
但是,挑战这个任务也有很多好处:
- 本教程基于TensorFlow2.0从数据预处理图片转Tensor以及Tensor的一系列骚操作都包含在内做完本任务相信你会对TensorFlow2.0 API有一个很深刻的认识
- 如果你是新手,通过这个教程你完全可以深入体会一下调参(或者说随意修改网络)的纠结性和蛋疼性!
本项目实现了基于CNN的中文手写字识别并且采用标准的**tensorflow 2.0 api** 来构建!相比对简单的字母手写识别,本项目更能体现模型设计的精巧性和数据增强的熟练操作性,并且最终设计出来的模型可以直接应用于工业场合,比如 **票据识别**, **手写文本自动扫描**相比于百度api接口或者QQ接口等具有可优化性、免费性、本地性等优点。 本项目实现了基于CNN的中文手写字识别并且采用标准的**tensorflow 2.0 api** 来构建!相比对简单的字母手写识别,本项目更能体现模型设计的精巧性和数据增强的熟练操作性,并且最终设计出来的模型可以直接应用于工业场合,比如 **票据识别**, **手写文本自动扫描**相比于百度api接口或者QQ接口等具有可优化性、免费性、本地性等优点。
## Data
## 数据准备
在开始之前先介绍一下本项目所采用的数据信息。我们的数据全部来自于CASIA的开源中文手写字数据集该数据集分为两部分 在开始之前先介绍一下本项目所采用的数据信息。我们的数据全部来自于CASIA的开源中文手写字数据集该数据集分为两部分
@@ -11,17 +31,23 @@
- CASIA-OLHWDB在线的HWDB格式一样包含了约7185个汉字以及171个英文字母、数字、标点符号等我们不用。 - CASIA-OLHWDB在线的HWDB格式一样包含了约7185个汉字以及171个英文字母、数字、标点符号等我们不用。
其实你下载1.0的train和test差不多已经够了可以直接运行 `dataset/get_hwdb_1.0_1.1.sh` 下载。原始数据下载链接点击[这里](http://www.nlpr.ia.ac.cn/databases/handwriting/Offline_database.html). 其实你下载1.0的train和test差不多已经够了可以直接运行 `dataset/get_hwdb_1.0_1.1.sh` 下载。原始数据下载链接点击[这里](http://www.nlpr.ia.ac.cn/databases/handwriting/Offline_database.html).
由于原始数据过于复杂,我们自己写了一个数据wrapper方便读取统一将其转换为类似于Dataframe (Pandas)的格式这样可以将一个字的特征和label方便的显示也可以十分方便的将手写字转换为图片采用CNN进行处理。这是我们展示的效果: 由于原始数据过于复杂,我们使用一个类来封装数据读取过程,这是我们展示的效果:
<p align="center"> <p align="center">
<img src="https://s2.ax1x.com/2019/05/27/VeFtZq.md.png" />
<img src="/home/jintian/Downloads/1240" />
</p>
看到这么密密麻麻的文字相信连人类都.... 开始头疼了,这些复杂的文字能够通过一个神经网络来识别出来??答案是肯定的.... 不有得感叹一下神经网络的强大。。上面的部分文字识别出来的结果是这样的:
<p align="center">
<img src="https://upload-images.jianshu.io/upload_images/617746-c1ffebc0c4ab0554.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240" />
</p> </p>
其对应的label为
```
['!', '"', '#', '$', '%', '&', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '、', '。', '々', '…', '', '', '“', '”']
```
关于数据的处理部分,从服务器下载到的原始数据是 `trn_gnt.zip` 解压之后是 `gnt.alz` 需要再次解压得到一个包含 gnt文件的文件夹。里面每一个gnt文件都包含了若干个汉字及其标注。直接处理比较麻烦也不方便抽取出图片再进行操作**虽然转为图片存入文件夹比较直观,但是不适合批量读取和训练**, 后面我们统一转为tfrecord进行训练。 关于数据的处理部分,从服务器下载到的原始数据是 `trn_gnt.zip` 解压之后是 `gnt.alz` 需要再次解压得到一个包含 gnt文件的文件夹。里面每一个gnt文件都包含了若干个汉字及其标注。直接处理比较麻烦也不方便抽取出图片再进行操作**虽然转为图片存入文件夹比较直观,但是不适合批量读取和训练**, 后面我们统一转为tfrecord进行训练。
@@ -33,8 +59,282 @@
- `cd dataset && python3 convert_to_tfrecord.py`, 请注意我们使用的是tf2.0 - `cd dataset && python3 convert_to_tfrecord.py`, 请注意我们使用的是tf2.0
- 你需要修改对应的路径等待生成完成大概有89万个example如果1.0和1.1都用那估计得double。 - 你需要修改对应的路径等待生成完成大概有89万个example如果1.0和1.1都用那估计得double。
## Model
关于我们采用的OCR模型的构建我们大致采用的是比较先进的MobileNetV3架构同时设计了一个修改的过的MobileNetV3Big的更深网络。主要考虑模型的轻量型和表达能力。最终训练结果表明我们的模型可以在中文手写字上达到约99.8%的准确率。
## 模型构建
关于我们采用的OCR模型的构建我们构建了3个模型分别做测试三个模型的复杂度逐渐的复杂网络层数逐渐深入。但是到最后发现最复杂的那个模型竟然不收敛。这个其中一个稍微简单模型的训练过程
![image.png](https://upload-images.jianshu.io/upload_images/617746-ebf0cd9de522066f.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)
大家可以看到准确率可以在短时间内达到87%非常不错测试集的准确率大概在40%,由于测试集中的样本在训练集中完全没有出现,相对训练集的准确率来讲偏低。可能原因无外乎两个,一个事模型泛化性能不强,另外一个原因是训练还不够。
不过好在这个简单的模型也能达到训练集90%的准确率it's a good start. 让我们来看一下如何快速的构建一个OCR网络模型
```python
def build_net_003(input_shape, n_classes):
model = tf.keras.Sequential([
layers.Conv2D(input_shape=input_shape, filters=32, kernel_size=(3, 3), strides=(1, 1),
padding='same', activation='relu'),
layers.MaxPool2D(pool_size=(2, 2), padding='same'),
layers.Conv2D(filters=64, kernel_size=(3, 3), padding='same'),
layers.MaxPool2D(pool_size=(2, 2), padding='same'),
layers.Flatten(),
layers.Dense(n_classes, activation='softmax')
])
return model
```
这是我们使用keras API构建的一个模型它足够简单仅仅包含两个卷积层以及两个maxpool层。下面我们让大家知道即便是再简单的模型有时候也能发挥出巨大的用处对于某些特定的问题可能比更深的网络更有用途。关于这部分模型构建大家只要知道这么几点
- 如果你只是构建序列模型没有太fancy的跳跃链接你可以直接用`keras.Sequential` 来构建你的模型;
- Conv2D中最好指定每个参数的名字不要省略否则别人不知道你的写的事输入的通道数还是filters。
最后,在你看完本篇博客后,并准备自己动手复现这个教程的时候, 可以思考一下为什么下面这个模型就发散了呢?(仅仅稍微复杂一点):
```python
def build_net_002(input_shape, n_classes):
model = tf.keras.Sequential([
layers.Conv2D(input_shape=input_shape, filters=64, kernel_size=(3, 3), strides=(1, 1),
padding='same', activation='relu'),
layers.MaxPool2D(pool_size=(2, 2), padding='same'),
layers.Conv2D(filters=128, kernel_size=(3, 3), padding='same'),
layers.MaxPool2D(pool_size=(2, 2), padding='same'),
layers.Conv2D(filters=256, kernel_size=(3, 3), padding='same'),
layers.MaxPool2D(pool_size=(2, 2), padding='same'),
layers.Flatten(),
layers.Dense(1024, activation='relu'),
layers.Dense(n_classes, activation='softmax')
])
return model
```
## 数据输入
其实最复杂的还是数据准备过程啊。这里着重说一下我们的数据存入tfrecords中的事image和label也就是这么一个example
```
example = tf.train.Example(features=tf.train.Features(
feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[index])),
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img.tobytes()])),
'width': tf.train.Feature(int64_list=tf.train.Int64List(value=[w])),
'height': tf.train.Feature(int64_list=tf.train.Int64List(value=[h])),
}))
```
然后读取的时候相应的读取即可,这里告诉大家几点坑爹的地方:
- 将numpyarray的bytes存入tfrecord跟将文件的bytes直接存入tfrecord解码的方式事不同的由于我们的图片数据不是来自于本地文件所以我们使用了一个tobytes()方法存入的事numpy array的bytes格式它实际上并不包含维度信息所以这就是坑爹的地方之一如果你不同时存储width和height你后面读取的时候便无法知道维度**存储tfrecord顺便存储图片长宽事一个好的习惯**.
- 关于不同的存储方式解码的方法有坑爹的地方比如这里我们存储numpy array的bytes通常情况下你很难知道如何解码。。不看本教程应该很多人不知道
最后load tfrecord也就比较直观了
```python
def parse_example(record):
features = tf.io.parse_single_example(record,
features={
'label':
tf.io.FixedLenFeature([], tf.int64),
'image':
tf.io.FixedLenFeature([], tf.string),
})
img = tf.io.decode_raw(features['image'], out_type=tf.uint8)
img = tf.cast(tf.reshape(img, (64, 64)), dtype=tf.float32)
label = tf.cast(features['label'], tf.int64)
return {'image': img, 'label': label}
def parse_example_v2(record):
"""
latest version format
:param record:
:return:
"""
features = tf.io.parse_single_example(record,
features={
'width':
tf.io.FixedLenFeature([], tf.int64),
'height':
tf.io.FixedLenFeature([], tf.int64),
'label':
tf.io.FixedLenFeature([], tf.int64),
'image':
tf.io.FixedLenFeature([], tf.string),
})
img = tf.io.decode_raw(features['image'], out_type=tf.uint8)
# we can not reshape since it stores with original size
w = features['width']
h = features['height']
img = tf.cast(tf.reshape(img, (w, h)), dtype=tf.float32)
label = tf.cast(features['label'], tf.int64)
return {'image': img, 'label': label}
def load_ds():
input_files = ['dataset/HWDB1.1trn_gnt.tfrecord']
ds = tf.data.TFRecordDataset(input_files)
ds = ds.map(parse_example)
return ds
```
这个v2的版本就是兼容了新的存入长宽的方式因为我第一次生成的时候就没有保存。。。最后入坑了。注意这行代码
```
img = tf.io.decode_raw(features['image'], out_type=tf.uint8)
```
它是对raw bytes进行解码这个解码跟从文件读取bytes存入tfrecord的有着本质的不同。**同时注意type的变化这里以unit8的方式解码因为我们存储进去的就是uint8**.
## 训练过程
不瞒你说我一开始写了一个很复杂的模型训练了大概一个晚上结果准确率0.00012 发散了。后面改成了更简单的模型才收敛。整个过程的训练pipleline
```python
def train():
all_characters = load_characters()
num_classes = len(all_characters)
logging.info('all characters: {}'.format(num_classes))
train_dataset = load_ds()
train_dataset = train_dataset.shuffle(100).map(preprocess).batch(32).repeat()
val_ds = load_val_ds()
val_ds = val_ds.shuffle(100).map(preprocess).batch(32).repeat()
for data in train_dataset.take(2):
print(data)
# init model
model = build_net_003((64, 64, 1), num_classes)
model.summary()
logging.info('model loaded.')
start_epoch = 0
latest_ckpt = tf.train.latest_checkpoint(os.path.dirname(ckpt_path))
if latest_ckpt:
start_epoch = int(latest_ckpt.split('-')[1].split('.')[0])
model.load_weights(latest_ckpt)
logging.info('model resumed from: {}, start at epoch: {}'.format(latest_ckpt, start_epoch))
else:
logging.info('passing resume since weights not there. training from scratch')
if use_keras_fit:
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
callbacks = [
tf.keras.callbacks.ModelCheckpoint(ckpt_path,
save_weights_only=True,
verbose=1,
period=500)
]
try:
model.fit(
train_dataset,
validation_data=val_ds,
validation_steps=1000,
epochs=15000,
steps_per_epoch=1024,
callbacks=callbacks)
except KeyboardInterrupt:
model.save_weights(ckpt_path.format(epoch=0))
logging.info('keras model saved.')
model.save_weights(ckpt_path.format(epoch=0))
model.save(os.path.join(os.path.dirname(ckpt_path), 'cn_ocr.h5'))
```
在本系列教程开篇之际,我们就立下了几条准则,其中一条就是**handle everything**, 从这里就能看出,它事一个很稳健的训练代码,同事也很自动化:
- 自动寻找之前保存的最新模型;
- 自动保存模型;
- 捕捉ctrl + c事件保存模型。
- 支持断点续训练
大家在以后编写训练代码的时候其实可以保持这个好的习惯。
OK整个模型训练起来之后可以在短时间内达到95%的准确率:
![image.png](https://upload-images.jianshu.io/upload_images/617746-c6658fc6ec7ae3cc.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)
效果还是很不错的!
## 模型测试
最后模型训练完了,时候测试一下模型效果到底咋样。我们使用了一些简单的文字来测试:
![image.png](https://upload-images.jianshu.io/upload_images/617746-e6574a10eab17af8.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)
这个字写的还真的。。。。具有鬼神之势。相信普通人类大部分字都能认出来,不过有些字还真的。。。。不好认。看看神经网络的表现怎么样!
![image.png](https://upload-images.jianshu.io/upload_images/617746-0ad1cc57975396d2.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)
这是大概2000次训练的结果 基本上能识别出来了!神经网络的认字能力还不错的! 收工!
## 总结
通过本教程我们完成了使用tensorflow 2.0全新的API搭建一个中文汉字手写识别系统。模型基本能够实现我们想要的功能。要知道这个模型可是在搜索空间多大3755的类别当中准确的找到最相似的类别通过本实验我们有几点心得
- 神经网络不仅仅是在学习,它具有一定的想象力!!比如它的一些看着很像的字:拜-佯, 扮-捞,笨-苯.... 这些字如果手写出来,连人都比较难以辨认!!但是大家要知道这些字在类别上并不是相领的!也就是说,模型具有一定的联想能力!
- 不管问题多复杂,要敢于动手、善于动手。
最后希望大家对本文点个赞编写教程不容易。希望大家多多支持。笨教程将支持为大家输出全新的tensorflow2.0教程!欢迎关注!!
本文所有代码开源在:
https://github.com/jinfagang/ocrcn_tf2.git
记得随手star哦
我们的AI社区
http://talk.strangeai.pro
全球最大的开源AI代码平台
http://manaai.cn

Binary file not shown.

Before

Width:  |  Height:  |  Size: 71 KiB

View File

@@ -1,11 +1,11 @@
''' """
training HWDB Chinese charactors classification
on MobileNetV2 training a simple net on Chinese Characters classification dataset
''' we got about 90% accuracy by simply applying a simple CNN net
"""
from alfred.dl.tf.common import mute_tf from alfred.dl.tf.common import mute_tf
mute_tf() mute_tf()
import os import os
import sys import sys
import numpy as np import numpy as np
@@ -13,8 +13,8 @@ import tensorflow as tf
from alfred.utils.log import logger as logging from alfred.utils.log import logger as logging
import tensorflow_datasets as tfds import tensorflow_datasets as tfds
from dataset.casia_hwdb import load_ds, load_characters from dataset.casia_hwdb import load_ds, load_characters, load_val_ds
from models.cnn_net import CNNNet, build_net_002 from models.cnn_net import CNNNet, build_net_002, build_net_003
@@ -29,10 +29,10 @@ def preprocess(x):
""" """
minus mean pixel or normalize? minus mean pixel or normalize?
""" """
# original is 64x64, add a channel dim
x['image'] = tf.expand_dims(x['image'], axis=-1) x['image'] = tf.expand_dims(x['image'], axis=-1)
x['image'] = tf.image.resize(x['image'], (target_size, target_size)) x['image'] = tf.image.resize(x['image'], (target_size, target_size))
x['image'] /= 255. x['image'] = (x['image'] - 128.) / 128.
x['image'] = 2 * x['image'] - 1
return x['image'], x['label'] return x['image'], x['label']
@@ -41,10 +41,16 @@ def train():
num_classes = len(all_characters) num_classes = len(all_characters)
logging.info('all characters: {}'.format(num_classes)) logging.info('all characters: {}'.format(num_classes))
train_dataset = load_ds() train_dataset = load_ds()
train_dataset = train_dataset.shuffle(100).map(preprocess).batch(4).repeat() train_dataset = train_dataset.shuffle(100).map(preprocess).batch(32).repeat()
val_ds = load_val_ds()
val_ds = val_ds.shuffle(100).map(preprocess).batch(32).repeat()
for data in train_dataset.take(2):
print(data)
# init model # init model
model = build_net_002((64, 64, 1), num_classes) model = build_net_003((64, 64, 1), num_classes)
model.summary() model.summary()
logging.info('model loaded.') logging.info('model loaded.')
@@ -62,10 +68,20 @@ def train():
optimizer=tf.keras.optimizers.Adam(), optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(), loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy']) metrics=['accuracy'])
callbacks = [
tf.keras.callbacks.ModelCheckpoint(ckpt_path,
save_weights_only=True,
verbose=1,
period=500)
]
try: try:
model.fit( model.fit(
train_dataset, epochs=50, train_dataset,
steps_per_epoch=700, ) validation_data=val_ds,
validation_steps=1000,
epochs=15000,
steps_per_epoch=1024,
callbacks=callbacks)
except KeyboardInterrupt: except KeyboardInterrupt:
model.save_weights(ckpt_path.format(epoch=0)) model.save_weights(ckpt_path.format(epoch=0))
logging.info('keras model saved.') logging.info('keras model saved.')