朴素贝叶斯程序
# -*-coding:utf-8-*-
"""
Author: sunchang
Desc:
代码4-7 朴素贝叶斯实现对异常账户检测
"""
import numpy as np
class NaiveBayesian:
def __init__(self, alpha):
self.classP = dict()
self.classP_feature = dict()
self.alpha = alpha # 平滑值
# 加载数据集
def createData(self):
data = np.array(
[
[320, 204, 198, 265],
[253, 53, 15, 2243],
[53, 32, 5, 325],
[63, 50, 42, 98],
[1302, 523, 202, 5430],
[32, 22, 5, 143],
[105, 85, 70, 322],
[872, 730, 840, 2762],
[16, 15, 13, 52],
[92, 70, 21, 693],
]
)
labels = np.array([1, 0, 0, 1, 0, 0, 1, 1, 1, 0])#是否是异常用户的标签(1:异常 0:正常)
return data, labels
# 计算高斯分布函数值
#求P(xi|yk)
def gaussian(self, mu, sigma, x):
return 1.0 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))
# 计算某个特征列对应的均值和标准差
def calMuAndSigma(self, feature):
mu = np.mean(feature)
sigma = np.std(feature) # np.var()方差 np.std()标准差
return (mu, sigma)
# 训练朴素贝叶斯算法模型
def train(self, data, labels):
numData = len(labels) #样本个数
numFeaturs = len(data[0]) #X维度个数
# 是异常用户的概率
#p(y1)
self.classP[1] = (
(sum(labels) + self.alpha) * 1.0 / (numData + self.alpha * len(set(labels)))#创建一个无序不重复元素集,删除重复数据
)
# 不是异常用户的概率
#Py(0)
self.classP[0] = 1 - self.classP[1]
# 用来存放每个label下每个特征标签下对应的高斯分布中的均值和方差
# { label1:{ feature1:{ mean:0.2, var:0.8 }, feature2:{} }, label2:{...} }
#{0: {0: (346.4, 484.05479028721527), 1: (140.0, 192.22174694867383), 2: (49.6, 76.44501291778293), 3: (1766.8, 1975.568819353049)}}
#{1:{0: (275.2, 316.0249357250152), 1: (216.8, 264.3689845651339), 2: (232.6, 310.2009671164808), 3: (699.8, 1035.9788414827783)}}
self.classP_feature = dict()
# 遍历每个特征标签
for c in set(labels):
self.classP_feature[c] = {}
for i in range(numFeaturs):#(0,1,2,3)
feature = data[np.equal(labels, c)][:, i]
self.classP_feature[c][i] = self.calMuAndSigma(feature)
# 预测新用户是否是异常用户
def predict(self, x):
label = -1 # 初始化类别
maxP = 0 #初始最大概率0
# 遍历所有的label值
for key in self.classP.keys():#self.classP {1: 0.5, 0: 0.5}
label_p = self.classP[key]
currentP = 1.0
feature_p = self.classP_feature[key]
j = 0
for fp in feature_p.keys():
currentP *= self.gaussian(feature_p[fp][0], feature_p[fp][1], x[j]) #currentP=P(yk|x) =分子= p(xi|yk)迭乘
j += 1
# 如果计算出来的概率大于初始的最大概率,则进行最大概率赋值 和对应的类别记录
if currentP * label_p > maxP:
maxP = currentP * label_p
label = key
return label
if __name__ == "__main__":
nb = NaiveBayesian(1.0)
data, labels = nb.createData()
nb.train(data, labels)
label = nb.predict(np.array([134, 84, 235, 349]))
print("未知类型用户对应的行为数据为:[134,84,235,349],该用户的可能类型为:{}".format(label))