使用gensim和SVM对THUCNews数据集进行分类

闲逛CSDN的时候看到的这个,正好之前也用过Gensim和SVM,拿来练练手,原版的代码写的太流水式了,所以考虑重构了一遍,Learning by doing嘛。

gensim是什么?

官方文档介绍的很清楚,主要用来做LDA和训练Word2Vec的,本文中是将文档训练成LSI向量,再运用SVM进行分类。

具体流程

整体结构其实很简单,分词 –> 计算词频 –> 计算tf-idf值 –> 计算LSI向量 –> 对LSI向量拆分训练集和测试集 –> 调用sklearn里的SVM对训练集进行训练 –> 用测试集进行测试。(上面每一步都需要把模型存储起来以后可直接调用)

首先,我们构造一个类,定义初始变量,主要是需要保存模型的路径,如下:

1
2
3
4
5
6
7
8
9
10
class classfication(object):
def __init__(self, path, sample):
self.path = path # Thucnews文档地址
self.sample = sample # 采样率
self.dictionary_path = "model/dictionary.dict"
self.tfIdfPath = "model/tfidf"
self.lsiModel = "model/fullLsi.model"
self.lsiPath = "model/lsi"
self.predictor = "model/predictor.model"
self.tag = os.listdir(self.path) # Thucnews文档标签

然后获取文档每一个类别里每一个文件的路径。

1
2
3
4
5
6
7
def _fullTagFile(self):
self.tagFile = {}
for tag in self.tag:
fullPath = os.path.join(self.path, tag)
fileName = glob.glob(os.path.join(fullPath, "*.txt"))
self.tagFile[tag] = fileName
return self.tagFile

然后是分词,分词这里用的是jieba分词,当然也可以用Hanlp进行分词,还需要对停用词(stop words)进行处理。下面的代码’\u3000’为全角空格,另外需要将所有文档单词分词后放入1个List中

1
2
3
4
5
6
7
8
9
10
11
def _segement(self, filepath):
words_list = []
# 暂时只去除标点
#stops_words = set(list(string.punctuation + "!,。'';·「」`~@#¥%&×()-+\\<>"))
stops_words = set([i.strip() for i in codecs.open("stop_words.txt", encoding="utf-8").readlines()])
with codecs.open(filepath, encoding="utf-8") as fp:
for line in fp:
line = line.replace("\u3000", "").replace("\n", "")
words_list.extend([i for i in jieba.cut(line, cut_all=False)
if i not in stops_words])
return words_list

下面的获取词典、TF-IDF、LSI向量方法类似,就只介绍一个

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
def _getDictionary(self):
dictionary = corpora.Dictionary()
for tag in self.fullTagFile:
tagPath = self.fullTagFile[tag]
for i, filepath in enumerate(tagPath):
if i % self.sample == 0: # 采样率
word_list = self._segement(filepath)
dictionary.add_documents([word_list])
N += 1
if N % 1000 == 0:
print('{t} *** {i} \t docs has been dealed'
.format(i=N, t=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
small_freq_ids = [tokenid for tokenid, docfreq in dictionary.dfs.items() if docfreq < 5]
dictionary.filter_tokens(small_freq_ids)
dictionary.compactify()
dictionary.save(self.dictionary_path)
return dictionary

最后在train方法里汇总,上面都需要将中间结果进行保存,再后续使用可以直接从文件读取模型,节省时间,如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
def train(self):
self.fullTagFile = self._fullTagFile()
# 加载词典
if os.path.exists(self.dictionary_path):
dictionary = corpora.Dictionary.load(self.dictionary_path)
else:
dictionary = self._getDictionary()
# 加载tf-idf
tagTfidf = {}
if not os.path.exists(self.tfIdfPath):
tagTfidf = self._getTfIdf(dictionary)
else:
filePath = glob.glob(os.path.join(self.tfIdfPath, "*.mm"))
for file in filePath:
tag = os.path.split(file)[-1].split(".")[0]
tagTfidf[tag] = corpora.MmCorpus(file)
# 加载lsi-model,得到每一个文档的lsi向量
corpus_lsi = {}
if not os.path.exists(self.lsiPath):
corpus_lsi = self._getLsi(dictionary, tagTfidf)
else:
filePath = glob.glob(os.path.join(self.tfIdfPath, "*.mm"))
for file in filePath:
tag = os.path.split(file)[-1].split(".")[0]
corpus_lsi[tag] = corpora.MmCorpus(file)
# 完整的lsi模型,获得新句子的lsi向量
if os.path.exists(self.lsiModel):
with open(self.lsiModel, 'rb') as fp:
lsi_model = pickle.load(fp)
else:
corpus = []
for value in tagTfidf.values():
corpus.extend(value)
lsi_model = models.LsiModel(corpus=corpus, id2word=dictionary, num_topics=50)
# 加载分类器
if not os.path.exists(self.predictor):
predictor = self._getPredictor(corpus_lsi)
else:
with open(self.predictor, 'rb') as fp:
predictor = pickle.load(fp)
return predictor, dictionary, lsi_model

全部代码如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
# -*- encoding:utf-8 -*-
import jieba
from gensim import corpora
from gensim import models
from scipy.sparse import csr_matrix
from sklearn import svm
from sklearn.model_selection import train_test_split
import os
import logging
import pickle
import codecs
import glob
from collections import defaultdict
import string
import datetime
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
)
class classfication(object):
def __init__(self, path, sample):
self.path = path
self.sample = sample
self.dictionary_path = "model/dictionary.dict"
self.tfIdfPath = "model/tfidf"
self.lsiModel = "model/fullLsi.model"
self.lsiPath = "model/lsi"
self.predictor = "model/predictor.model"
self.tag = os.listdir(self.path)
def _fullTagFile(self):
self.tagFile = {}
for tag in self.tag:
fullPath = os.path.join(self.path, tag)
fileName = glob.glob(os.path.join(fullPath, "*.txt"))
self.tagFile[tag] = fileName
return self.tagFile
def _segement(self, filepath):
words_list = []
# 暂时只去除标点
#stops_words = set(list(string.punctuation + "!,。'';·「」`~@#¥%&×()-+\\<>"))
stops_words = set([i.strip() for i in codecs.open("stop_words.txt", encoding="utf-8").readlines()])
with codecs.open(filepath, encoding="utf-8") as fp:
for line in fp:
line = line.replace("\u3000", "").replace("\n", "")
words_list.extend([i for i in jieba.cut(line, cut_all=False)
if i not in stops_words])
return words_list
def _getDictionary(self):
globa N
dictionary = corpora.Dictionary()
for tag in self.fullTagFile:
tagPath = self.fullTagFile[tag]
for i, filepath in enumerate(tagPath):
if i % self.sample == 0:
word_list = self._segement(filepath)
dictionary.add_documents([word_list])
N += 1
if N % 1000 == 0:
print('{t} *** {i} \t docs has been dealed'
.format(i=N, t=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
small_freq_ids = [tokenid for tokenid, docfreq in dictionary.dfs.items() if docfreq < 5]
dictionary.filter_tokens(small_freq_ids)
dictionary.compactify()
dictionary.save(self.dictionary_path)
return dictionary
def _getTfIdf(self, dictionary):
global N
tagTfidf = defaultdict(list)
tfIdfModel = models.TfidfModel(dictionary=dictionary)
for tag in self.tagFile:
tagPath = self.tagFile[tag]
for i, filepath in enumerate(tagPath):
if i % self.sample == 0:
word_list = self._segement(filepath)
doc2bow = dictionary.doc2bow(word_list)
doc_tfidf = tfIdfModel[doc2bow]
tagTfidf[tag].append(doc_tfidf)
N += 1
if N % 1000 == 0:
print('{t} *** {i} \t docs has been dealed'
.format(i=N, t=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
if not os.path.isdir(self.tfIdfPath):
os.makedirs(self.tfIdfPath)
for tag in tagTfidf:
corpora.MmCorpus.serialize(os.path.join(self.tfIdfPath, "%s.mm" % tag),
tagTfidf.get(tag), id2word=dictionary)
return tagTfidf
def _getLsi(self, dictionary, tagTfidf):
corpus = []
for value in tagTfidf.values():
corpus.extend(value)
lsi_model = models.LsiModel(corpus=corpus, id2word=dictionary, num_topics=50)
with open(self.lsiModel, mode="wb") as fp:
pickle.dump(lsi_model, fp)
corpus_lsi = {}
if not os.path.isdir(self.lsiPath):
os.makedirs(self.lsiPath)
for tag in tagTfidf:
corpus = [lsi_model[doc] for doc in tagTfidf.get(tag)]
corpus_lsi[tag] = corpus
corpora.MmCorpus.serialize(os.path.join(self.lsiPath, '%s.mm' % tag),
corpus, id2word=dictionary)
return corpus_lsi
def _getPredictor(self, corpus_lsi):
corpus_lsi_total = []
tag_list_all = []
for index, tag in enumerate(self.tag):
temp = corpus_lsi[tag]
corpus_lsi_total.extend(temp)
tag_list_all.extend([index] * len(temp))
# 这里需要注意,将gensim里矩阵转化成numpy里所用的
lsi_matrix = self._csr_matrix(corpus_lsi_total)
x_train, x_test, y_train, y_test = train_test_split(lsi_matrix, tag_list_all, test_size=0.2, random_state=422)
clf = svm.LinearSVC()
clf_res = clf.fit(x_train, y_train)
# 测试集的正确率并返回训练后的模型
x_test_pred = clf_res.predict(x_test)
accuracy = sum([1 for i, j in zip(x_test_pred, y_test) if i == j]) / len(x_test)
print('=== 分类训练完毕,分类结果如下 ===')
print('测试集正确率: {e}'.format(e=accuracy))
with open(self.predictor, "wb") as fp:
pickle.dump(clf_res, fp)
return clf_res
def _csr_matrix(self, corpus_lsi, type="train"):
data = []
rows = []
columns = []
line_count = 0
if type == "train":
for line in corpus_lsi:
for elem in line:
rows.append(line_count)
columns.append(elem[0])
data.append(elem[1])
line_count += 1
lsi_array = csr_matrix((data, (rows, columns))).toarray()
elif type == "test":
for item in corpus_lsi:
data.append(item[1])
columns.append(item[0])
rows.append(0)
lsi_array = csr_matrix((data, (rows, columns))).toarray()
return lsi_array
def train(self):
self.fullTagFile = self._fullTagFile()
# 加载词典
if os.path.exists(self.dictionary_path):
dictionary = corpora.Dictionary.load(self.dictionary_path)
else:
dictionary = self._getDictionary()
# 加载tf-idf
tagTfidf = {}
if not os.path.exists(self.tfIdfPath):
tagTfidf = self._getTfIdf(dictionary)
else:
filePath = glob.glob(os.path.join(self.tfIdfPath, "*.mm"))
for file in filePath:
tag = os.path.split(file)[-1].split(".")[0]
tagTfidf[tag] = corpora.MmCorpus(file)
# 加载lsi-model
corpus_lsi = {}
if not os.path.exists(self.lsiPath):
corpus_lsi = self._getLsi(dictionary, tagTfidf)
else:
filePath = glob.glob(os.path.join(self.tfIdfPath, "*.mm"))
for file in filePath:
tag = os.path.split(file)[-1].split(".")[0]
corpus_lsi[tag] = corpora.MmCorpus(file)
# 完整的lsi模型
if os.path.exists(self.lsiModel):
with open(self.lsiModel, 'rb') as fp:
lsi_model = pickle.load(fp)
else:
corpus = []
for value in tagTfidf.values():
corpus.extend(value)
lsi_model = models.LsiModel(corpus=corpus, id2word=dictionary, num_topics=50)
# 加载分类器
if not os.path.exists(self.predictor):
predictor = self._getPredictor(corpus_lsi)
else:
with open(self.predictor, 'rb') as fp:
predictor = pickle.load(fp)
return predictor, dictionary, lsi_model
def predict(self, sentences):
predictor, dictionary, lsi_model = self.train()
demo_doc = list(jieba.cut(sentences, cut_all=False))
demo_bow = dictionary.doc2bow(demo_doc)
tfidf_model = models.TfidfModel(dictionary=dictionary)
demo_tfidf = tfidf_model[demo_bow]
demo_lsi = lsi_model[demo_tfidf]
demo_matrix = self._csr_matrix(demo_lsi, type="test")
x = predictor.predict(demo_matrix)
print(self.tag[x[0]])
if __name__ == '__main__':
train = classfication(r"E:\迅雷下载\THUCNews\THUCNews", sample=10)
#train = classfication("THUCNews", sample=1)
test = train.predict(''' 股价现在怎么样了 ''')

抽样10%的文档,最后结果在87%左右,只能说差强人意。
另外局限性也很大,对短句的判断能力很差,不过这个和训练样本有关,下次试试对短文本的聚类效果。