基于Bayes和KNN的newsgroup 18828文本分类器的Python实现

时间:2022-03-23 16:14:30

向@yangliuy大牛学习NLP,这篇博客是数据挖掘-基于贝叶斯算法及KNN算法的newsgroup18828文本分类器的JAVA实现(上)的Python实现。入门为主,没有太多自己的东西。

1. 数据集

Newsgroup新闻文档集,含有20000篇左右的Usenet文档,平均分配在20个新闻组,即有20个文件夹。现在用的Newsgroup18828新闻文档集是经过处理的,即每篇文档只属于一个新闻组。

2. 预处理,对每篇文档进行文本处理,为后续构造字典、提取特征词做准备

# -*- coding: utf-8 -*-
from numpy import *
from os import listdir,mkdir,path
import re
from nltk.corpus import stopwords
import nltk
import operator
##############################################################
## 1. 创建新文件夹,存放预处理后的文本数据
##############################################################
def createFiles():
srcFilesList = listdir('originSample')
for i in range(len(srcFilesList)):
if i==0: continue
dataFilesDir = 'originSample/' + srcFilesList[i] # 20个文件夹每个的路径
dataFilesList = listdir(dataFilesDir)
targetDir = 'processedSample_includeNotSpecial/' + srcFilesList[i] # 20个新文件夹每个的路径
if path.exists(targetDir)==False:
mkdir(targetDir)
else:
print '%s exists' % targetDir
for j in range(len(dataFilesList)):
createProcessFile(srcFilesList[i],dataFilesList[j]) # 调用createProcessFile()在新文档中处理文本
print '%s %s' % (srcFilesList[i],dataFilesList[j])
##############################################################
## 2. 建立目标文件夹,生成目标文件
## @param srcFilesName 某组新闻文件夹的文件名,比如alt.atheism
## @param dataFilesName 文件夹下某个数据文件的文件名
## @param dataList 数据文件按行读取后的字符串列表
##############################################################
def createProcessFile(srcFilesName,dataFilesName):
srcFile = 'originSample/' + srcFilesName + '/' + dataFilesName
targetFile= 'processedSample_includeNotSpecial/' + srcFilesName\
+ '/' + dataFilesName
fw = open(targetFile,'w')
dataList = open(srcFile).readlines()
for line in dataList:
resLine = lineProcess(line) # 调用lineProcess()处理每行文本
for word in resLine:
fw.write('%s\n' % word) #一行一个单词
fw.close()
##############################################################
##3. 对每行字符串进行处理,主要是去除非字母字符,转换大写为小写,去除停用词
## @param line 待处理的一行字符串
## @return words 按非字母分隔后的单词所组成的列表
##############################################################
def lineProcess(line):
stopwords = nltk.corpus.stopwords.words('english') #去停用词
porter = nltk.PorterStemmer() #词干分析
splitter = re.compile('[^a-zA-Z]') #去除非字母字符,形成分隔
words = [porter.stem(word.lower()) for word in splitter.split(line)\
if len(word)>0 and\
word.lower() not in stopwords]
return words

3. 构造字典sortedNewWordMap

########################################################
## 统计每个词的总的出现次数
## @param strDir
## @param wordMap
## return newWordMap 返回字典,<key, value>结构,按key排序,value都大于4,即都是出现次数大于4的词
#########################################################
def countWords():
wordMap = {}
newWordMap = {}
fileDir = 'processedSample_includeNotSpecial'
sampleFilesList = listdir(fileDir)
for i in range(len(sampleFilesList)):
sampleFilesDir = fileDir + '/' + sampleFilesList[i]
sampleList = listdir(sampleFilesDir)
for j in range(len(sampleList)):
sampleDir = sampleFilesDir + '/' + sampleList[j]
for line in open(sampleDir).readlines():
word = line.strip('\n')
wordMap[word] = wordMap.get(word,0.0) + 1.0
#只返回出现次数大于4的单词
for key, value in wordMap.items():
if value > 4:
newWordMap[key] = value
sortedNewWordMap = sorted(newWordMap.iteritems())
print 'wordMap size : %d' % len(wordMap)
print 'newWordMap size : %d' % len(sortedNewWordMap)
return sortedNewWordMap
############################################################
##打印属性字典
###########################################################
def printWordMap():
print 'Print Word Map'
countLine=0
fr = open('D:\\04_Python\\Test1\\Ex2_bayesian\\docVector\\allDicWordCountMap.txt','w')
sortedWordMap = countWords()
for item in sortedWordMap:
fr.write('%s %.1f\n' % (item[0],item[1]))
countLine += 1
print 'sortedWordMap size : %d' % countLine

4. 特征词选取,再生成20个文件夹,每个文件夹的每篇文档中存放本篇中的特征词,即本篇中出现的字典中的词。

#####################################################
##特征词选取
####################################################
def filterSpecialWords():
fileDir = 'processedSample_includeNotSpecial'
wordMapDict = {}
sortedWordMap = countWords()
for i in range(len(sortedWordMap)):
wordMapDict[sortedWordMap[i][0]]=sortedWordMap[i][0]
sampleDir = listdir(fileDir)
for i in range(len(sampleDir)):
targetDir = 'processedSampleOnlySpecial_2' + '/' + sampleDir[i]
srcDir = 'processedSample_includeNotSpecial' + '/' + sampleDir[i]
if path.exists(targetDir) == False:
mkdir(targetDir)
sample = listdir(srcDir)
for j in range(len(sample)):
targetSampleFile = targetDir + '/' + sample[j]
fr=open(targetSampleFile,'w')
srcSampleFile = srcDir + '/' + sample[j]
for line in open(srcSampleFile).readlines():
word = line.strip('\n')
if word in wordMapDict.keys():
fr.write('%s\n' % word)
fr.close()

5. 创建每次迭代所用的训练样例集合(20个文件夹)和测试样例集合(20个文件夹),并生成标注数据集合。

##########################################################
## 创建训练样例集合和测试样例集合
## @param indexOfSample 第k次实验
## @param classifyRightCate 第k次实验的测试集中,<doc rightCategory>数据
## @param trainSamplePercent 训练集与测试集的分割比例
############################################################
def createTestSample(indexOfSample,classifyRightCate,trainSamplePercent=0.9):
fr = open(classifyRightCate,'w')
fileDir = 'processedSampleOnlySpecial'
sampleFilesList=listdir(fileDir)
for i in range(len(sampleFilesList)):
sampleFilesDir = fileDir + '/' + sampleFilesList[i]
sampleList = listdir(sampleFilesDir)
m = len(sampleList)
testBeginIndex = indexOfSample * ( m * (1-trainSamplePercent) )
testEndIndex = (indexOfSample + 1) * ( m * (1-trainSamplePercent) )
for j in range(m):
# 序号在规定区间内的作为测试样本,需要为测试样本生成类别-序号文件,最后加入分类的结果,
    # 一行对应一个文件,方便统计准确率 
            if (j > testBeginIndex) and (j < testEndIndex):
                fr.write('%s %s\n' % (sampleList[j],sampleFilesList[i])) # 写入内容:每篇文档序号 它所在的文档名称即分类
targetDir = 'TestSample'+str(indexOfSample)+\
'/'+sampleFilesList[i]
else:
targetDir = 'TrainSample'+str(indexOfSample)+\
'/'+sampleFilesList[i]
if path.exists(targetDir) == False:
mkdir(targetDir)
sampleDir = sampleFilesDir + '/' + sampleList[j]
sample = open(sampleDir).readlines()
sampleWriter = open(targetDir+'/'+sampleList[j],'w')
for line in sample:
sampleWriter.write('%s\n' % line.strip('\n'))
sampleWriter.close()
fr.close() # 调用以上函数生成标注集,训练和测试集合
  def test():
      for i in range(10):
          classifyRightCate = 'classifyRightCate' + str(i) + '.txt'
          createTestSample(i,classifyRightCate)

6. 贝叶斯算法实现

每个测试样例属于某个类别的概率 = 某个类别中出现样例中词的概率的乘积(类条件概率) * 出现某个类别的概率(先验概率)

p(cate|doc) = p(word| cate) * p(cate)

具体计算类条件概率和先验概率时,朴素贝叶斯分类有两种模型:

1) 多元分布模型(muiltinomial model)

以单词为粒度,不仅仅计算特征词出现/不出现,还要计算出现的次数

  

类条件概率 p(word | cate) = (类cate下单词word出现在所有文档中的次数之和 + 1) / (类cate下单词总数 + 训练样本中不重复的特征词总数)

先验概率 p(cate) = 类cate下单词总数 / 训练样本中的特征词总数

2) 伯努利模型(Bernoulli Model)

以文件为粒度

类条件概率 p(word | cate) = (类cate下出现word的文件总数 + 1) / (类cate下的文件总数 + 2)

先验概率 p(cate) = (类cate下的文件总数) / (整个训练样本文件总数)

引用@yangliuy:根据《Introduction to Information Retrieval 》,多元分布模型计算准确率更高,所以分类器选用多元分布模型计算

6.1 统计

########################################################################
## 统计训练样本中,每个目录下每个单词的出现次数, 及每个目录下的单词总数
## @param 训练样本集目录
## @return cateWordsProb <类目_单词 ,某单词出现次数>
## @return cateWordsNum <类目,单词总数>
#########################################################################
def getCateWordsProb(strDir):
#strDir = TrainSample0
cateWordsNum = {}
cateWordsProb = {}
cateDir = listdir(strDir)
for i in range(len(cateDir)):
count = 0 # 记录每个目录下(即每个类下)单词总数
sampleDir = strDir + '/' + cateDir[i]
sample = listdir(sampleDir)
for j in range(len(sample)):
sampleFile = sampleDir + '/' + sample[j]
words = open(sampleFile).readlines()
for line in words:
count = count + 1
word = line.strip('\n')
keyName = cateDir[i] + '_' + word
cateWordsProb[keyName] = cateWordsProb.get(keyName,0)+1 # 记录每个目录下(即每个类下)每个单词的出现次数
cateWordsNum[cateDir[i]] = count
print 'cate %d contains %d' % (i,cateWordsNum[cateDir[i]])
print 'cate-word size: %d' % len(cateWordsProb)
return cateWordsProb, cateWordsNum

6.2 用bayes对测试文档做分类

##########################################
## 用贝叶斯对测试文档分类
## @param traindir 训练集目录
## @param testdir 测试集目录
## @param classifyResultFileNew 分类结果文件
## @return 返回该测试样本在该类别的概率
##########################################
def NBprocess(traindir,testdir,classifyResultFileNew):
crWriter = open(classifyResultFileNew,'w')
# traindir = 'TrainSample0'
# testdir = 'TestSample0'
#返回类k下词C的出现次数,类k总词数
cateWordsProb, cateWordsNum = getCateWordsProb(traindir) #训练集的总词数
trainTotalNum = sum(cateWordsNum.values())
print 'trainTotalNum: %d' % trainTotalNum #开始对测试样例做分类
testDirFiles = listdir(testdir)
for i in range(len(testDirFiles)):
testSampleDir = testdir + '/' + testDirFiles[i]
testSample = listdir(testSampleDir)
for j in range(len(testSample)):
testFilesWords = []
sampleDir = testSampleDir + '/' + testSample[j]
lines = open(sampleDir).readlines()
for line in lines:
word = line.strip('\n')
testFilesWords.append(word) maxP = 0.0
trainDirFiles = listdir(traindir)
for k in range(len(trainDirFiles)):
p = computeCateProb(trainDirFiles[k], testFilesWords,\
cateWordsNum, trainTotalNum, cateWordsProb)
if k==0:
maxP = p
bestCate = trainDirFiles[k]
continue
if p > maxP:
maxP = p
bestCate = trainDirFiles[k]
crWriter.write('%s %s\n' % (testSample[j],bestCate))
crWriter.close() #################################################
## @param traindir 类k
## @param testFilesWords 某个测试文档
## @param cateWordsNum 训练集类k下单词总数 <类目,单词总数>
## @param totalWordsNum 训练集单词总数
## @param cateWordsProb 训练集类k下词c出现的次数 <类目_单词 ,某单词出现次数>
## 计算 条件概率 =(类k中单词i的数目+0.0001)/(类k中单词总数+训练样本中所有类单词总数)
## 计算 先验概率 =(类k中单词总数)/(训练样本中所有类单词总数)
#################################################
def computeCateProb(traindir,testFilesWords,cateWordsNum,\
totalWordsNum,cateWordsProb):
prob = 0
wordNumInCate = cateWordsNum[traindir] # 类k下单词总数 <类目,单词总数>
for i in range(len(testFilesWords)):
keyName = traindir + '_' + testFilesWords[i]
if cateWordsProb.has_key(keyName):
testFileWordNumInCate = cateWordsProb[keyName] # 类k下词c出现的次数
else: testFileWordNumInCate = 0.0
xcProb = log((testFileWordNumInCate + 0.0001) / \ # 求对数避免很多很小的数相乘下溢出
(wordNumInCate + totalWordsNum))
prob = prob + xcProb
res = prob + log(wordNumInCate) - log(totalWordsNum)
return res

7. 计算准确率

def computeAccuracy(rightCate,resultCate,k):
rightCateDict = {}
resultCateDict = {}
rightCount = 0.0 for line in open(rightCate).readlines():
(sampleFile,cate) = line.strip('\n').split(' ')
rightCateDict[sampleFile] = cate for line in open(resultCate).readlines():
(sampleFile,cate) = line.strip('\n').split(' ')
resultCateDict[sampleFile] = cate for sampleFile in rightCateDict.keys():
#print 'rightCate: %s resultCate: %s' % \
# (rightCateDict[sampleFile],resultCateDict[sampleFile])
#print 'equal or not: %s' % (rightCateDict[sampleFile]==resultCateDict[sampleFile]) if (rightCateDict[sampleFile]==resultCateDict[sampleFile]):
rightCount += 1.0
print 'rightCount : %d rightCate: %d' % (rightCount,len(rightCateDict))
accuracy = rightCount/len(rightCateDict)
print 'accuracy %d : %f' % (k,accuracy)
return accuracy

8.

#############################################################################
## 生成每次迭代的测试用例、标注集
def step1():
for i in range(10):
classifyRightCate = 'classifyRightCate' + str(i) + '.txt'
createTestSample(i,classifyRightCate)
##############################################################################
## bayes对测试文档做分类
def step2():
for i in range(10):
traindir = 'TrainSample' + str(i)
testdir = 'TestSample' + str(i)
classifyResultFileNew = 'classifyResultFileNew' + str(i) + '.txt'
NBprocess(traindir,testdir,classifyResultFileNew)
##############################################################################
## 计算准确率
def step3():
accuracyOfEveryExp = []
for i in range(10):
rightCate = 'classifyRightCate'+str(i)+'.txt'
resultCate = 'classifyResultFileNew'+str(i)+'.txt'
accuracyOfEveryExp.append(computeAccuracy(rightCate,resultCate,i))
return accuracyOfEveryExp

输出结果:

WordMap size32189
Cate_Word: 162649
cate alt.atheism contains 130141.0
cate comp.graphics contains 145322.0
cate comp.os.ms-windows.misc contains 348719.0
cate comp.sys.ibm.pc.hardware contains 96505.0
cate comp.sys.mac.hardware contains 88902.0
cate comp.windows.x contains 131896.0
cate misc.forsale contains 75843.0
cate rec.autos contains 109281.0
cate rec.motorcycles contains 99047.0
cate rec.sport.baseball contains 111705.0
cate rec.sport.hockey contains 135429.0
cate sci.crypt contains 147705.0
cate sci.electronics contains 101945.0
cate sci.med contains 153708.0
cate sci.space contains 135170.0
cate soc.religion.christian contains 174490.0
cate talk.politics.guns contains 155503.0
cate talk.politics.mideast contains 219330.0
cate talk.politics.misc contains 162621.0
cate talk.religion.misc contains 103775.0
totalWordsNum: 2827037.0
rightCount: 1513.0resultCate: 1870
The accuracy for Naive Bayesian Classifier in 0th Exp is :0.8090909090909091

@yangliuy大牛谈到几点要注意的,我觉得也是很价值的

(1) 计算概率用到了BigDecimal类实现任意精度计算

python中处理任意精度,我觉得以下两种方法可参考:

基于Bayes和KNN的newsgroup 18828文本分类器的Python实现

基于Bayes和KNN的newsgroup 18828文本分类器的Python实现
(2) 用交叉验证法做十次分类实验,对准确率取平均值
(3) cateWordsProb key为“类目_单词”, value为该类目下该单词的出现次数,避免重复计算

这样安排的字典结构很方便统计