import re import jieba.analyse import codecs import pandas as pd def simplification_text(xianbingshi): """提取文本""" xianbingshi_simplification = [] with codecs.open(xianbingshi,'r','utf8') as f: for line in f : line = line.strip() line_write = re.findall('(?=\b\&;).*?(?=\e\&;)',line) for line in line_write: xianbingshi_simplification.append(line) with codecs.open(r'C:\Users\Administrator.SC-201812211013\PycharmProjects\untitled29\yiwoqu\code\xianbingshi_write.txt','w','utf8') as f: for line in xianbingshi_simplification: f.write(line + '\n') def jieba_text(): """""" word_list = [] data = open(r"C:\Users\Administrator.SC-201812211013\PycharmProjects\untitled29\xianbingshi_write.txt", encoding='utf-8').read() seg_list = jieba.cut(data, cut_all=False) # 精確模式 for i in seg_list: word_list.append(i.strip()) data_quchong = pd.DataFrame({'a':word_list}) data_quchong.drop_duplicates(subset=['a'],keep='first',inplace=True) word_list = data_quchong['a'].tolist() with codecs.open('word.txt','w','utf8')as w: for line in word_list: w.write(line + '\n') def word_messy(word): """詞語提煉""" word_sub_list = [] with codecs.open(word,'r','utf8') as f: for line in f: line_sub = re.sub("^[1-9]\d*\.\d*|^[A-Za-z0-9]+$|^[0-9]*$|^(-?\d+)(\.\d+)?$|^[A-Za-z0-9]{4,40}.*?",'',line) word_sub_list.append(line_sub) word_sub_list.sort() with codecs.open('word.txt','w','utf8')as w: for line in word_sub_list: w.write(line.strip("\n") + '\n') if __name__ == '__main__': xianbingshi = r'C:\Users\Administrator.SC-201812211013\PycharmProjects\untitled29\yiwoqu\xianbingshi_sub_sen_all(1).txt' # simplification_text(xianbingshi) # word = r'C:\Users\Administrator.SC-201812211013\PycharmProjects\untitled29\word.txt' simplification_text(xianbingshi)
補(bǔ)充:python 進(jìn)行結(jié)巴分詞 并且用re去掉符號
# 把停用詞做成字典 stopwords = {} fstop = open('stop_words.txt', 'r',encoding='utf-8',errors='ingnore') for eachWord in fstop: stopwords[eachWord.strip()] = eachWord.strip() #停用詞典 fstop.close() f1=open('all.txt','r',encoding='utf-8',errors='ignore') f2=open('allutf11.txt','w',encoding='utf-8') line=f1.readline() while line: line = line.strip() #去前后的空格 line = re.sub(r"[0-9\s+\.\!\/_,$%^*()?;;:-【】+\"\']+|[+——!,;:。?、~@#¥%……*()]+", " ", line) #去標(biāo)點(diǎn)符號 seg_list=jieba.cut(line,cut_all=False) #結(jié)巴分詞 outStr="" for word in seg_list: if word not in stopwords: outStr+=word outStr+=" " f2.write(outStr) line=f1.readline() f1.close() f2.close()
以上為個(gè)人經(jīng)驗(yàn),希望能給大家一個(gè)參考,也希望大家多多支持腳本之家。
標(biāo)簽:常州 成都 六盤水 蘭州 江蘇 駐馬店 山東 宿遷
巨人網(wǎng)絡(luò)通訊聲明:本文標(biāo)題《python文本處理的方案(結(jié)巴分詞并去除符號)》,本文關(guān)鍵詞 python,文本,處理,的,方案,;如發(fā)現(xiàn)本文內(nèi)容存在版權(quán)問題,煩請?zhí)峁┫嚓P(guān)信息告之我們,我們將及時(shí)溝通與處理。本站內(nèi)容系統(tǒng)采集于網(wǎng)絡(luò),涉及言論、版權(quán)與本站無關(guān)。