天天看點

pip install tensorflow 清華_基于tensorflow 實作端到端的OCR:二代身份證号識别

最近在研究OCR識别相關的東西,最終目标是能識别身份證上的所有中文漢字+數字,不過本文先設定一個小目标,先識别定長為18的身份證号,當然本文的思路也是可以複用來識别定長的驗證碼識别的。 本文實作思路主要來源于Xlvector的部落格,采用基于CNN實作端到端的OCR,下面引用博文介紹目前基于深度學習的兩種OCR識别方法:

  • 把OCR的問題當做一個多标簽學習的問題。4個數字組成的驗證碼就相當于有4個标簽的圖檔識别問題(這裡的标簽還是有序的),用CNN來解決。
  • 把OCR的問題當做一個語音識别的問題,語音識别是把連續的音頻轉化為文本,驗證碼識别就是把連續的圖檔轉化為文本,用CNN+LSTM+CTC來解決。

這裡方法1主要用來解決固定長度标簽的圖檔識别問題,而方法2主要用來解決不定長度标簽的圖檔識别問題,本文實作方法1識别固定18個數字字元的身份證号。

環境依賴

  1. 本文基于tensorflow架構實作,依賴于tensorflow環境,建議使用anaconda進行python包管理及環境管理
  2. 本文使用freetype-py 進行訓練集圖檔的實時生成,同時後續也可擴充為能生成中文字元圖檔的訓練集,建議使用pip安裝
pip install freetype-py
           
  1. 同時本文還依賴于numpy和opencv等常用庫
pip install numpy cv2
           

知識準備

  1. 本文不具體介紹CNN (卷積神經網絡)具體實作原理,不熟悉的建議參看集智博文卷積:如何成為一個很厲害的神經網絡,這篇文章寫得很
  2. 本文實作思路很容易了解,就是把一個有序排列18個數字組成的圖檔當做一個多标簽學習的問題,标簽的長度可以任意改變,隻要是固定長度的,這個訓練方法都是适用的,當然現實中很多情況是需要識别不定長度的标簽的,這部分就需要使用方法2(CNN+lSTM+CTC)來解決了。

訓練資料集生成

首先先完成訓練資料集圖檔的生成,主要依賴于freetype-py庫生成數字/中文的圖檔。其中要注意的一點是就是生成圖檔的大小,本文經過多次嘗試後,生成的圖檔是32 x 256大小的,如果圖檔太大,則可能導緻訓練不收斂

生成出來的示例圖檔如下:

pip install tensorflow 清華_基于tensorflow 實作端到端的OCR:二代身份證号識别

gen_image()方法傳回 image_data:圖檔像素資料 (32,256) label: 圖檔标簽 18位數字字元 477081933151463759 vec : 圖檔标簽轉成向量表示 (180,) 代表每個數字所處的列,總長度 18 * 10

#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
身份證文字+數字生成類
@author: pengyuanjie
"""
import numpy as np
import freetype
import copy
import random
import cv2
class put_chinese_text(object):
 def __init__(self, ttf):
 self._face = freetype.Face(ttf)
 def draw_text(self, image, pos, text, text_size, text_color):
 '''
 draw chinese(or not) text with ttf
 :param image: image(numpy.ndarray) to draw text
 :param pos: where to draw text
 :param text: the context, for chinese should be unicode type
 :param text_size: text size
 :param text_color:text color
 :return: image
 '''
 self._face.set_char_size(text_size * 64)
 metrics = self._face.size
 ascender = metrics.ascender/64.0
 #descender = metrics.descender/64.0
 #height = metrics.height/64.0
 #linegap = height - ascender + descender
 ypos = int(ascender)
 if not isinstance(text, unicode):
 text = text.decode('utf-8')
 img = self.draw_string(image, pos[0], pos[1]+ypos, text, text_color)
 return img
 def draw_string(self, img, x_pos, y_pos, text, color):
 '''
 draw string
 :param x_pos: text x-postion on img
 :param y_pos: text y-postion on img
 :param text: text (unicode)
 :param color: text color
 :return: image
 '''
 prev_char = 0
 pen = freetype.Vector()
 pen.x = x_pos << 6 # div 64
 pen.y = y_pos << 6
 hscale = 1.0
 matrix = freetype.Matrix(int(hscale)*0x10000L, int(0.2*0x10000L),
 int(0.0*0x10000L), int(1.1*0x10000L))
 cur_pen = freetype.Vector()
 pen_translate = freetype.Vector()
 image = copy.deepcopy(img)
 for cur_char in text:
 self._face.set_transform(matrix, pen_translate)
 self._face.load_char(cur_char)
 kerning = self._face.get_kerning(prev_char, cur_char)
 pen.x += kerning.x
 slot = self._face.glyph
 bitmap = slot.bitmap
 cur_pen.x = pen.x
 cur_pen.y = pen.y - slot.bitmap_top * 64
 self.draw_ft_bitmap(image, bitmap, cur_pen, color)
 pen.x += slot.advance.x
 prev_char = cur_char
 return image
 def draw_ft_bitmap(self, img, bitmap, pen, color):
 '''
 draw each char
 :param bitmap: bitmap
 :param pen: pen
 :param color: pen color e.g.(0,0,255) - red
 :return: image
 '''
 x_pos = pen.x >> 6
 y_pos = pen.y >> 6
 cols = bitmap.width
 rows = bitmap.rows
 glyph_pixels = bitmap.buffer
 for row in range(rows):
 for col in range(cols):
 if glyph_pixels[row*cols + col] != 0:
 img[y_pos + row][x_pos + col][0] = color[0]
 img[y_pos + row][x_pos + col][1] = color[1]
 img[y_pos + row][x_pos + col][2] = color[2]
class gen_id_card(object):
 def __init__(self):
 #self.words = open('AllWords.txt', 'r').read().split(' ')
 self.number = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
 self.char_set = self.number
 #self.char_set = self.words + self.number
 self.len = len(self.char_set)
 
 self.max_size = 18
 self.ft = put_chinese_text('fonts/OCR-B.ttf')
 
 #随機生成字串,長度固定
 #傳回text,及對應的向量
 def random_text(self):
 text = ''
 vecs = np.zeros((self.max_size * self.len))
 #size = random.randint(1, self.max_size)
 size = self.max_size
 for i in range(size):
 c = random.choice(self.char_set)
 vec = self.char2vec(c)
 text = text + c
 vecs[i*self.len:(i+1)*self.len] = np.copy(vec)
 return text,vecs
 
 #根據生成的text,生成image,傳回标簽和圖檔元素資料
 def gen_image(self):
 text,vec = self.random_text()
 img = np.zeros([32,256,3])
 color_ = (255,255,255) # Write
 pos = (0, 0)
 text_size = 21
 image = self.ft.draw_text(img, pos, text, text_size, color_)
 #僅傳回單通道值,顔色對于漢字識别沒有什麼意義
 return image[:,:,2],text,vec
 #單字轉向量
 def char2vec(self, c):
 vec = np.zeros((self.len))
 for j in range(self.len):
 if self.char_set[j] == c:
 vec[j] = 1
 return vec
 
 #向量轉文本
 def vec2text(self, vecs):
 text = ''
 v_len = len(vecs)
 for i in range(v_len):
 if(vecs[i] == 1):
 text = text + self.char_set[i % self.len]
 return text
if __name__ == '__main__':
 genObj = gen_id_card()
 image_data,label,vec = genObj.gen_image()
 cv2.imshow('image', image_data)
 cv2.waitKey(0)
           

建構網絡,開始訓練

首先定義生成一個batch的方法:

# 生成一個訓練batch
def get_next_batch(batch_size=128):
 obj = gen_id_card()
 batch_x = np.zeros([batch_size, IMAGE_HEIGHT*IMAGE_WIDTH])
 batch_y = np.zeros([batch_size, MAX_CAPTCHA*CHAR_SET_LEN])
 
 
 for i in range(batch_size):
		image, text, vec = obj.gen_image()
		batch_x[i,:] = image.reshape((IMAGE_HEIGHT*IMAGE_WIDTH))
		batch_y[i,:] = vec
 return batch_x, batch_y
           

用了Batch Normalization,個人還不是很了解,讀者可自行百度,代碼來源于參考博文

#Batch Normalization? 有空再了解,tflearn or slim都有封裝
## http://stackoverflow.com/a/34634291/2267819
def batch_norm(x, beta, gamma, phase_train, scope='bn', decay=0.9, eps=1e-5):
	with tf.variable_scope(scope):
		#beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0), trainable=True)
		#gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, stddev), trainable=True)
		batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
		ema = tf.train.ExponentialMovingAverage(decay=decay)
		def mean_var_with_update():
			ema_apply_op = ema.apply([batch_mean, batch_var])
			with tf.control_dependencies([ema_apply_op]):
				return tf.identity(batch_mean), tf.identity(batch_var)
		mean, var = tf.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))
		normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
	return normed
           

定義4層CNN和一層全連接配接層,卷積核分别是2層5x5、2層3x3,每層均使用tf.nn.relu非線性化,并使用max_pool,網絡結構讀者可自行調參優化

# 定義CNN
def crack_captcha_cnn(w_alpha=0.01, b_alpha=0.1):
	x = tf.reshape(X, shape=[-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1])
 
	# 4 conv layer
	w_c1 = tf.Variable(w_alpha*tf.random_normal([5, 5, 1, 32]))
	b_c1 = tf.Variable(b_alpha*tf.random_normal([32]))
	conv1 = tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides=[1, 1, 1, 1], padding='SAME'), b_c1)
	conv1 = batch_norm(conv1, tf.constant(0.0, shape=[32]), tf.random_normal(shape=[32], mean=1.0, stddev=0.02), train_phase, scope='bn_1')
	conv1 = tf.nn.relu(conv1)
	conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
	conv1 = tf.nn.dropout(conv1, keep_prob)
 
	w_c2 = tf.Variable(w_alpha*tf.random_normal([5, 5, 32, 64]))
	b_c2 = tf.Variable(b_alpha*tf.random_normal([64]))
	conv2 = tf.nn.bias_add(tf.nn.conv2d(conv1, w_c2, strides=[1, 1, 1, 1], padding='SAME'), b_c2)
	conv2 = batch_norm(conv2, tf.constant(0.0, shape=[64]), tf.random_normal(shape=[64], mean=1.0, stddev=0.02), train_phase, scope='bn_2')
	conv2 = tf.nn.relu(conv2)
	conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
	conv2 = tf.nn.dropout(conv2, keep_prob)
 
	w_c3 = tf.Variable(w_alpha*tf.random_normal([3, 3, 64, 64]))
	b_c3 = tf.Variable(b_alpha*tf.random_normal([64]))
	conv3 = tf.nn.bias_add(tf.nn.conv2d(conv2, w_c3, strides=[1, 1, 1, 1], padding='SAME'), b_c3)
	conv3 = batch_norm(conv3, tf.constant(0.0, shape=[64]), tf.random_normal(shape=[64], mean=1.0, stddev=0.02), train_phase, scope='bn_3')
	conv3 = tf.nn.relu(conv3)
	conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
	conv3 = tf.nn.dropout(conv3, keep_prob)
	w_c4 = tf.Variable(w_alpha*tf.random_normal([3, 3, 64, 64]))
	b_c4 = tf.Variable(b_alpha*tf.random_normal([64]))
	conv4 = tf.nn.bias_add(tf.nn.conv2d(conv3, w_c4, strides=[1, 1, 1, 1], padding='SAME'), b_c4)
	conv4 = batch_norm(conv4, tf.constant(0.0, shape=[64]), tf.random_normal(shape=[64], mean=1.0, stddev=0.02), train_phase, scope='bn_4')
	conv4 = tf.nn.relu(conv4)
	conv4 = tf.nn.max_pool(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
	conv4 = tf.nn.dropout(conv4, keep_prob)
 
	# Fully connected layer
	w_d = tf.Variable(w_alpha*tf.random_normal([2*16*64, 1024]))
	b_d = tf.Variable(b_alpha*tf.random_normal([1024]))
	dense = tf.reshape(conv4, [-1, w_d.get_shape().as_list()[0]])
	dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))
	dense = tf.nn.dropout(dense, keep_prob)
 
	w_out = tf.Variable(w_alpha*tf.random_normal([1024, MAX_CAPTCHA*CHAR_SET_LEN]))
	b_out = tf.Variable(b_alpha*tf.random_normal([MAX_CAPTCHA*CHAR_SET_LEN]))
	out = tf.add(tf.matmul(dense, w_out), b_out)
	return out
           

最後執行訓練,使用sigmoid分類,每100次計算一次準确率,如果準确率超過80%,則儲存模型并結束訓練

# 訓練
def train_crack_captcha_cnn():
	output = crack_captcha_cnn()
	# loss
	#loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=Y))
	loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=Y))
 # 最後一層用來分類的softmax和sigmoid有什麼不同?
	# optimizer 為了加快訓練 learning_rate應該開始大,然後慢慢衰
	optimizer = tf.train.AdamOptimizer(learning_rate=0.002).minimize(loss)
 
	predict = tf.reshape(output, [-1, MAX_CAPTCHA, CHAR_SET_LEN])
	max_idx_p = tf.argmax(predict, 2)
	max_idx_l = tf.argmax(tf.reshape(Y, [-1, MAX_CAPTCHA, CHAR_SET_LEN]), 2)
	correct_pred = tf.equal(max_idx_p, max_idx_l)
	accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
 
	saver = tf.train.Saver()
	with tf.Session() as sess:
		sess.run(tf.global_variables_initializer())
 
		step = 0
		while True:
			batch_x, batch_y = get_next_batch(64)
			_, loss_ = sess.run([optimizer, loss], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.75, train_phase:True})
			print(step, loss_)
			
			# 每100 step計算一次準确率
			if step % 100 == 0 and step != 0:
				batch_x_test, batch_y_test = get_next_batch(100)
				acc = sess.run(accuracy, feed_dict={X: batch_x_test, Y: batch_y_test, keep_prob: 1., train_phase:False})
				print "第%s步,訓練準确率為:%s" % (step, acc)
				# 如果準确率大80%,儲存模型,完成訓練
				if acc > 0.8:
					saver.save(sess, "crack_capcha.model", global_step=step)
					break
			step += 1
           

執行結果,筆者在大概500次訓練後,得到準确率84.3%的結果

pip install tensorflow 清華_基于tensorflow 實作端到端的OCR:二代身份證号識别

筆者在一開始訓練的時候圖檔大小是64 x 512的,訓練的時候發現訓練速度很慢,而且訓練的loss不收斂一直保持在33左右,縮小圖檔為32 x 256後解決,不知道為啥,猜測要麼是網絡層級不夠,或者特征層數不夠吧。

小目标完成後,為了最終目标的完成,後續可能嘗試方法2,去識别不定長的中文字元圖檔,不過要先去了解LSTM網絡和 CTC模型了。

下載下傳位址:https://github.com/jimmyleaf/ocr_tensorflow_cnn/archive/master.zip