天天看点

NLP-关于数据集处理的相关代码

1.将几个文件中的数据合并为一个文件

将要合并的几个文件放入一个文件夹下

import os
#获取目标文件夹的路径
# filedir=os.getcwd()+'/corpus'
#获取当前文件夹中文件名称列表
# filenames=os.listdir(filedir)
#遍历文件名
# for filename in filenames:
    filepath=filedir+'/'+filename
    #遍历单个文件,读取行数
        for line in open(filepath, encoding='utf-8').readlines():
              data.append(line)
        with open('train.txt', 'w',encoding='utf-8') as fout:
              for i in range(0, len(data)):
                   fout.write(data[i])
           

2.将对话数据划分为问题-回复对

for filename in filenames:
    filepath=filedir+'/'+filename
    #遍历单个文件,读取行数
    for line in open(filepath,encoding='utf-8').readlines():
        line = line.strip()
        if line == '===':
            continue
        data.append((line.replace('Q: ', '').replace('A: ', '')))
    with open('train.txt', 'w') as fout:
        for i in range(0, len(data), 2):
            fout.write(data[i] + '\t' + data[i + 1] +'\t'+ '1'+'\n')

        for i in range(len(data)-2,0,-2):
            fout.write(data[i] + '\t' + data[i-1] + '\t' + '0' + '\n')
           

3.划分测试集训练集

import random

def train_test_split(infile, test_rate=0.1):
    with open('train.txt', 'w',encoding='utf-8') as f_train, \
            open('test.txt', 'w',encoding='utf-8') as f_test:
        for line in open(infile,encoding='utf-8'):
            if random.random() > test_rate:
                f_train.write(line)
            else:
                f_test.write(line)


if __name__ == '__main__':
    train_test_split('./corpus.txt')