做人群密度估计的demo,基本复现了 crowdnet,效果一般,具体细节看论文实现
from matplotlib import pyplot as plt
from paddle import fluid
import paddle
import json
import random, sys, time
# from scipy import misc
import numpy as np
import cv2
import scipy.spatial, scipy.ndimage
json_stage1_path = '/home/aistudio/data/cus_data/annotation_train_stage1.json'
json_stage2_path = '/home/aistudio/data/cus_data/annotation_train_stage2.json'
def create_density_map(annos, imshape):
gt = np.zeros(imshape, dtype='uint8')
for dot in annos:
try:
gt[int(dot[1]), int(dot[0])] = 1
except IndexError:
print (dot[1], dot[0], sys.exc_info())
density = np.zeros(gt.shape, dtype=np.float32)
gt_count = np.count_nonzero(gt)
if gt_count == 0:
return density
pts = np.array(zip(np.nonzero(gt)[1], np.nonzero(gt)[0]))
# leafsize = 2048
# tree = scipy.spatial.KDTree(pts.copy(), leafsize=leafsize)
# k = gt_count
# if gt_count>=5:
# k = 5
# distances, locations = tree.query(pts, k=k, eps=20.)
for i, pt in enumerate(pts):
pt2d = np.zeros(gt.shape, dtype=np.float32)
pt2d[pt[1], pt[0]] = 1.
if gt_count > 1:
# d_avg = distances[i].sum() / float(k-1)
# sigma = d_avg * 0.18 /
# # sigma = distances[i][1] * 1
# print(sigma)
sigma=10
else:
sigma = np.average(np.array(gt.shape)) / 2. /2.
density += scipy.ndimage.filters.gaussian_filter(pt2d, sigma, mode='constant')
return density
with open(json_stage1_path, 'r') as f:
dict1 = json.loads(f.read())
with open(json_stage2_path, 'r') as f:
dict2 = json.loads(f.read())
if dict1 is not None:
l1 = [(key['name'], key['type'], key['annotation'], key['num'], key['id'], key['ignore_region']) for key in
dict1['annotations']] # 2859
print(len(l1))
if dict2 is not None:
l2 = [(key['name'], key['type'], key['annotation'], key['num'], key['id'], key['ignore_region']) for key in
dict2['annotations']] # 2859
print(len(l2))
l1.extend(l2) # 6477
print(len(l1))
train_data, test_data = l1[:6000], l1[6000:] # [xxxx][yyyy]
index_np = np.empty(shape=(len(l1), 2), dtype='|S41')
resize_shape = (256,256)
for i, d in enumerate(l1):
# print(d[0])
img = cv2.imread('/home/aistudio/data/cus_data/train/' + d[0].split('/')[-1])
# print(img)
if d[5]:
pts = np.array([[anno['x'], anno['y']] for anno in d[5][0]], dtype=np.int32)
cv2.fillPoly(img, [pts], (0, 0, 0))
original_shape = img.shape[:2]
img = cv2.resize(img, resize_shape).astype('float32') / 255.0
annos = []
if d[1] == 'bbox':
annos = [((anno['x'] + anno['w'] / 2) * resize_shape[1] / original_shape[1],
(anno['y'] + anno['h'] / 9) * resize_shape[0] / original_shape[0]) for anno in d[2]]
# 这里的x axis=1,y axis=0 , 但是下面的createmap的时候会换过来用,不必担心,
elif d[1] == 'dot':
annos = [(anno['x'] * resize_shape[1] / original_shape[1],
anno['y'] * resize_shape[0] / original_shape[0])
for anno in d[2]]
map_img = create_density_map(annos=annos, imshape=resize_shape)
# ori_img, map_img = random_transform(img, map_img, **random_transform_args)
# ori_img = np.transpose(img, [2, 0, 1])
# map_img = map_img[np.newaxis, ...]
name = d[0].split('/')[-1].split('.')[0]
index_np[i] = [name, str(d[3])]
np.savez('/home/aistudio/data/cus_data/all_img/' + name + '.npz',
ori=img, map=map_img)
print(i)
print(index_np[0])
np.save('/home/aistudio/data/cus_data/index_np.npy', index_np)
from matplotlib import pyplot as plt
from paddle import fluid
import paddle
import json
import random, sys
# from scipy import misc
import numpy as np
import cv2
import scipy.spatial, scipy.ndimage
json_path = '/home/linhai/PycharmProjects/baidu_star_2018 _train/annotation/annotation_train_stage1.json'
json_stage2_path = '/home/aistudio/data/data675/baidu_star_2018/annotation/annotation_train_stage2.json'
# 2859
import paddle.fluid as fluid
import numpy as np
# from crowdnet import shallow_deconv_net, crowd_deconv_net, shallow_deconv_without_bn, crowd_deconv_without_bn
# from image_reader_annotation import img_reader_generator_with_ignore
config = fluid.CheckpointConfig(
checkpoint_dir="/tmp/ckpt", max_num_checkpoints=2,
epoch_interval=2, step_interval=50)
BATCH = 16
use_cuda = 1
resize_shape = (256, 256)
def crowd_deconv_without_bn(img):
x = img
# x = fluid.nets.img_conv_group(
# input=img,
# conv_num_filter=[64] * 2,
# conv_filter_size=3,
# pool_size=2,
# pool_stride=[2, 1],
# conv_act='relu',
# conv_with_batchnorm=True,
# conv_batchnorm_drop_rate=[0.0, 0.25],
# pool_type='max',
# )
x = fluid.layers.conv2d(input=x, num_filters=64, filter_size=3, padding=1, act='relu') # padding='same')
# print(x.shape)
# x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=64, filter_size=3, padding=1, act='relu') # padding='same')
# print(x.shape)
# x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.pool2d(input=x, pool_size=2, pool_stride=2) # padding='same')
print(x.shape)
x = fluid.layers.dropout(x=x, dropout_prob=0.25)
x = fluid.layers.conv2d(input=x, num_filters=128, filter_size=3, padding=1, act='relu') # padding='same')
# print(x.shape)
# x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=128, filter_size=3, padding=1, act='relu') # padding='same')
# print(x.shape)
# x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.pool2d(input=x, pool_size=2, pool_stride=2) # padding='same')
print(x.shape)
x = fluid.layers.dropout(x=x, dropout_prob=0.25)
x = fluid.layers.conv2d(input=x, num_filters=256, filter_size=3, padding=1, act='relu') # padding='same')
# x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=256, filter_size=3, padding=1, act='relu') # padding='same')
# x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=256, filter_size=3, padding=1, act='relu') # padding='same')
# x = fluid.layers.batch_norm(input=x, act='relu')
print(x.shape)
x = fluid.layers.pool2d(input=x, pool_size=2, pool_stride=2) # padding='same')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
print(x.shape)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1, act='relu') # padding='same')
# x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1, act='relu') # padding='same')
# x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1,act='relu' ) # padding='same')
# x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.pool2d(input=x, pool_size=3, pool_stride=1, pool_padding=1) # padding='same')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1, act='relu') # padding='same')
# x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1, act='relu') # padding='same')
# x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1) # padding='same')
x = fluid.layers.batch_norm(input=x, act='relu')
# x = fluid.layers.pool2d(input=x, pool_size=3, pool_stride=2, pool_padding=1) # padding='same')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
print('clowd_net output shape:',x.shape)
return x
def shallow_deconv_without_bn(img, kernel_size=(5, 5)):
x = img
x = fluid.layers.conv2d(input=x, num_filters=24, filter_size=kernel_size, padding=2, act='relu')
# print(x.shape )
# x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.pool2d(input=x, pool_type='avg', pool_size=5, pool_stride=2, pool_padding=2) # padding='same')
# print(x.shape )
x = fluid.layers.conv2d(input=x, num_filters=24, filter_size=kernel_size, padding=2, act='relu')
# x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.pool2d(input=x, pool_type='avg', pool_size=5, pool_stride=2, pool_padding=2) # padding='same')
# print(x.shape )
x = fluid.layers.conv2d(input=x, num_filters=24, filter_size=kernel_size, padding=2, )
x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.pool2d(input=x, pool_type='avg', pool_size=5, pool_stride=2, pool_padding=2) # padding='same')
print('shallow_net output shape:',x.shape)
return x
def img_reader_generator(batch=BATCH, resize_shape=resize_shape, is_train=True):
index = np.load('/home/aistudio/data/cus_data/index_np.npy')
l1, l2 = index[:6000], index[6000:]
def train():
np.random.shuffle(l1)
for i in range(6000 / batch):
# t1 = time.clock()
# ran = np.random.randint(0, 4, shape=batch)
data = []
for x in l1[i * batch:(i + 1) * batch]: # x[0]=name x[1] =num
x1 = np.load('/home/aistudio/data/cus_data/all_img/' + x[0] + '.npz')
ori_img, map_img = x1['ori'], x1['map']
if random.random() > .5:
ori_img = ori_img[:,::-1]
map_img = map_img[:,::-1]
ori_img =np.transpose(ori_img, [2, 0, 1])
map_img = map_img[np.newaxis, ...]
data.append((ori_img, map_img, np.float(x[1])))
# print('time', time.clock() - t1)
yield data
def test():
for i in range(477 / batch):
data = []
temp_list = l2[i * batch:(i + 1) * batch]
for x in temp_list: # x[0]=name x[1] =num
x1 = np.load('/home/aistudio/data/cus_data/all_img/' + x[0] + '.npz')
ori_img, map_img = x1['ori'], x1['map']
data.append((ori_img, map_img, np.float(x[1])))
if (i + 2) * batch > 477:
for x in l2[(i + 1) * batch:]:
x1 = np.load('/home/aistudio/data/cus_data/all_img/' + x[0] + '.npz')
ori_img, map_img = x1['ori'], x1['map']
data.append((ori_img, map_img, np.float(x[1])))
yield data
if is_train:
return train
else:
return test
config = fluid.CheckpointConfig(
checkpoint_dir="/tmp/ckpt", max_num_checkpoints=2,
epoch_interval=2, step_interval=50)
BATCH = 16
use_cuda = 1
resize_shape = (256, 256)
json_path2 = '/home/linhai/PycharmProjects/baidu_star_2018 _train/annotation/annotation_train_stage1.json'
save_dirname = 'model/paddle.deconv_beta01_600e.model'
check_point_dirname = 'model/check_point_dirname'
optimizer_func = lambda: fluid.optimizer.Adam(learning_rate=0.0001)
def train_program():
img_data = fluid.layers.data(name='img_data', shape=[3, 256, 256], dtype='float32')
img_label = fluid.layers.data(name='img_label', shape=[1, 256, 256], dtype='float32')
img_num = fluid.layers.data(name='img_num', shape=[1], dtype='float32')
print(img_label)
# net_out1 = crowd_deconv_net(img_data) # !!!!!!!!!!!!!!!!!!!!!!!!!
# net_out2 = shallow_deconv_net(img_data)
net_out1 = crowd_deconv_without_bn(img_data) # !!!!!!!!!!!!!!!!!!!!!!!!!
net_out2 = shallow_deconv_without_bn(img_data)
concat_out = fluid.layers.concat([net_out1, net_out2], axis=1)
concat_out = fluid.layers.batch_norm(input=concat_out, act='relu')
conv_end = fluid.layers.conv2d(input=concat_out, num_filters=1, filter_size=1)
print(conv_end.shape)
map_out = fluid.layers.resize_bilinear(conv_end, out_shape=(256, 256))
print(map_out.shape)
cost = fluid.layers.square_error_cost(input=map_out, label=img_label)
avg_cost1 = fluid.layers.mean(cost)
sum_ = fluid.layers.reduce_sum(map_out, dim=[1, 2, 3])
sum_ = fluid.layers.reshape(sum_, [-1, 1])
loss_ = fluid.layers.abs(fluid.layers.elementwise_sub(sum_, img_num))
loss = fluid.layers.mean(fluid.layers.elementwise_div(loss_, img_num))
sum_loss = loss + avg_cost1 * 6e5
# acc = fluid.layers.accuracy()
return [sum_loss, loss]
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
train_reader = img_reader_generator(batch=BATCH, resize_shape=resize_shape, is_train=True)
test_reader = img_reader_generator(batch=BATCH, resize_shape=resize_shape, is_train=False)
trainer = fluid.Trainer(
train_func=train_program,
optimizer_func=optimizer_func,
place=place,
checkpoint_config=config)
feed_order = ['img_data', 'img_label', 'img_num']
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
# if event.step % == 0:
print("Step:{0}, Epoch:{1} sum_loss:{2} loss:{3}".format(
event.step, event.epoch, map(np.array, event.metrics)[0][0], event.metrics[1][0]))
elif isinstance(event, fluid.EndEpochEvent):
print('Test :')
avg_cost = trainer.test(
reader=test_reader,
feed_order=feed_order,
)
# print(avg_cost)
print('Epoch{0}, Test Loss {1}, \n'.format(event.epoch, avg_cost))
trainer.save_params(save_dirname)
# fluid.io.load_params(save_dirname)
trainer.train(num_epochs=400,
event_handler=event_handler,
reader=train_reader,
feed_order=feed_order
)