朴素贝叶斯
朴素贝叶斯概述
朴素贝叶斯:朴素贝叶斯就是计算未分类的数据在各类别中的概率,概率最大的类别便是数据所属的类别。
优点:在数据较少的情况下仍然有效,可以处理多类别问题
缺点:对于输入数据的准备方式较为敏感
使用数据类型:标称型数据
使用朴素贝叶斯进行分类
准备数据:从文本中构建词向量
import numpy as np
def loadDataSet():
postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0,1,0,1,0,1] #1代表侮辱性文字,0代表正常性言论
return postingList,classVec
def createVocabList(dataSet):
vocabSet = set([]) #创建一个空集
for document in dataSet:
vocabSet = vocabSet | set(document) #两个集合的并集
return list(vocabSet) # 不重复词
def setOfWords2Vec(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList: #文档单词是否是否出现在词汇表中
returnVec[vocabList.index(word)] = 1
else: print("the word: %s is not in my Vocabulary!" % word)
return returnVec #词向量
listOPosts,listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
print(myVocabList)
print(setOfWords2Vec(myVocabList,listOPosts[0]))
print(setOfWords2Vec(myVocabList,listOPosts[3]))
训练算法:从词向量计算概率
def trainNB0(trainMatrix,trainCategory):
numTrainDocs = len(trainMatrix) #文件数
numWords = len(trainMatrix[0]) #词汇表单词数
pAbusive = sum(trainCategory)/float(numTrainDocs) #侮辱类概率
p0Num = np.zeros(numWords); p1Num = np.zeros(numWords) #单词出现次数列表
p0Denom = 0.0; p1Denom = 0.0 #整个数据集单词出现总数
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = p1Num/p1Denom
p0Vect = p0Num/p0Denom
return p0Vect,p1Vect,pAbusive
#p0Vect表示在正常留言中词汇表出现的概率,p1Vect表示在侮辱性留言中词汇表出现的概率
trainMat = []
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
print(trainMat)
p0V,p1V,pAb=trainNB0(trainMat,listClasses)
print(p0V,p1V,pAb)
测试算法:根据现实情况修改分类器
def trainNB0(trainMatrix,trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive = sum(trainCategory)/float(numTrainDocs) #侮辱类概率
p0Num = np.ones(numWords); p1Num = np.ones(numWords) #避免乘积为零
p0Denom = 2.0; p1Denom = 2.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
#取对数避免下溢出
p1Vect = np.log(p1Num/p1Denom)
p0Vect = np.log(p0Num/p0Denom)
return p0Vect,p1Vect,pAbusive
trainMat = []
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
print(trainMat)
p0V,p1V,pAb=trainNB0(trainMat,listClasses)
print(p0V,p1V,pAb)
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
p1 = sum(vec2Classify * p1Vec) + np.log(pClass1)
p0 = sum(vec2Classify * p0Vec) + np.log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
def testingNB():
listOPosts,listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
trainMat=[]
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V,p1V,pAb = trainNB0(np.array(trainMat),np.array(listClasses))
testEntry = ['love', 'my', 'dalmation']
thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
print(testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
testEntry = ['stupid', 'garbage']
thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
print(testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
testingNB()
准备数据:文档词袋模型
词集模型:将每个词的出现与否作为一个特征
词袋模型:将每个词的出现次数作为一个特征
#类似于函数setOfWord2Vec
def bagOfWords2VecMN(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec
示例1:使用朴素贝叶斯过滤垃圾邮件
准备数据:切分文本
mySent = 'This book is the best book on Python on M.L. I have ever laid eyes upon.'
import re
listOfTokens = re.split(r'\W*',mySent)
print([tok.lower() for tok in listOfTokens if len(tok) > 0])
#['this', 'book', 'is', 'the', 'best', 'book', 'on', 'python', 'on', 'm', 'l', 'i', 'have', 'ever', 'laid', 'eyes', 'upon']
测试算法:使用朴素贝叶斯进行交叉验证
def spamTest():
import random
docList=[]; classList = []; fullText =[]
for i in range(1,26):
wordList = textParse(open('./Desktop/email/spam/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(1) #垃圾邮件
wordList = textParse(open('./Desktop/email/ham/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList) #创建词汇表
trainingSet = list(range(50)); testSet=[] #创建训练集和测试集
for i in range(10): #留存交叉验证
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex]) #随机选择10个文件作为测试集
del(trainingSet[randIndex])
trainMat=[]; trainClasses = []
for docIndex in trainingSet: #计算分类所需的概率
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex])) #构建词向量
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(np.array(trainMat),np.array(trainClasses))
errorCount = 0
for docIndex in testSet: #分类
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(np.array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print("classification error",docList[docIndex])
print('the error rate is: ',float(errorCount)/len(testSet)) #错误率
#return vocabList,fullText
实例2:使用朴素贝叶斯分类器从个人广告中获取区域倾向
收集数据:导入RSS源
def calcMostFreq(vocabList,fullText):
import operator
freqDict = {}
for token in vocabList:
freqDict[token]=fullText.count(token)
sortedFreq = sorted(freqDict.items(), key=operator.itemgetter(1), reverse=True)
return sortedFreq[:30] #出现次数排名最高的单词
def localWords(feed1,feed0):
import random
docList=[]; classList = []; fullText =[]
minLen = min(len(feed1['entries']),len(feed0['entries']))
for i in range(minLen):
wordList = textParse(feed1['entries'][i]['summary']) #解析为字符串列表
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(feed0['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList) #创建词汇表
top30Words = calcMostFreq(vocabList,fullText)
for pairW in top30Words:
if pairW[0] in vocabList: vocabList.remove(pairW[0]) #移除出现次数最高的单词
trainingSet = list(range(2*minLen)); testSet=[] #create test set
for i in range(5):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat=[]; trainClasses = []
for docIndex in trainingSet:
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(np.array(trainMat),np.array(trainClasses))
errorCount = 0
for docIndex in testSet:
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(np.array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print('the error rate is: ',float(errorCount)/len(testSet))
return vocabList,p0V,p1V
import feedparser
ny=feedparser.parse('http://www.nasa.gov/rss/dyn/image_of_the_day.rss')
sf=feedparser.parse('https://sports.yahoo.com/nba/teams/hou/rss.xml')
vocabList,pSF,pNY=localWords(ny,sf)
vocabList,pSF,pNY=localWords(ny,sf)
vocabList,pSF,pNY=localWords(ny,sf)
分析数据:显示地域相关的用词
def getTopWords(ny,sf):
import operator
vocabList,p0V,p1V=localWords(ny,sf)
topNY=[]; topSF=[]
for i in range(len(p0V)):
if p0V[i] > -6.0 : topSF.append((vocabList[i],p0V[i]))
if p1V[i] > -6.0 : topNY.append((vocabList[i],p1V[i]))
sortedSF = sorted(topSF, key=lambda pair: pair[1], reverse=True)
print("SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**")
for item in sortedSF:
print(item[0])
sortedNY = sorted(topNY, key=lambda pair: pair[1], reverse=True)
print("NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**")
for item in sortedNY:
print(item[0])
getTopWords(ny,sf)