python实现感知机模型的示例

时间:2022-09-14 17:01:55
?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
from sklearn.linear_model import Perceptron
import argparse #一个好用的参数传递模型
import numpy as np
from sklearn.datasets import load_iris #数据集
from sklearn.model_selection import train_test_split #训练集和测试集分割
from loguru import logger #日志输出,不清楚用法
 
#python is also oop
class PerceptronToby():
  """
  n_epoch:迭代次数
  learning_rate:学习率
  loss_tolerance:损失阈值,即损失函数达到极小值的变化量
  """
  def __init__(self, n_epoch = 500, learning_rate = 0.1, loss_tolerance = 0.01):
    self._n_epoch = n_epoch
    self._lr = learning_rate
    self._loss_tolerance = loss_tolerance
  
  """训练模型,即找到每个数据最合适的权重以得到最小的损失函数"""
  def fit(self, X, y):
    # X:训练集,即数据集,每一行是样本,每一列是数据或标签,一样本包括一数据和一标签
    # y:标签,即1或-1
    n_sample, n_feature = X.shape #剥离矩阵的方法真帅
 
    #均匀初始化参数
    rnd_val = 1/np.sqrt(n_feature)
    rng = np.random.default_rng()
    self._w = rng.uniform(-rnd_val,rnd_val,size = n_feature)
    #偏置初始化为0
    self._b = 0
 
    #开始训练了,迭代n_epoch次
    num_epoch = 0 #记录迭代次数
    prev_loss = 0 #前损失值
    while True:
      curr_loss = 0 #现在损失值
      wrong_classify = 0 #误分类样本
 
      #一次迭代对每个样本操作一次
      for i in range(n_sample):
        #输出函数
        y_pred = np.dot(self._w,X[i]) + self._b
        #损失函数
        curr_loss += -y[i] * y_pred
        # 感知机只对误分类样本进行参数更新,使用梯度下降法
        if y[i] * y_pred <= 0:
          self._w += self._lr * y[i] * X[i]
          self._b += self._lr * y[i]
          wrong_classify += 1
 
      num_epoch += 1
      loss_diff = curr_loss - prev_loss
      prev_loss = curr_loss
      # 训练终止条件:
      # 1. 训练epoch数达到指定的epoch数时停止训练
      # 2. 本epoch损失与上一个epoch损失差异小于指定的阈值时停止训练
      # 3. 训练过程中不再存在误分类点时停止训练
      if num_epoch >= self._n_epoch or abs(loss_diff) < self._loss_tolerance or wrong_classify == 0:
        break
 
 
  """预测模型,顾名思义"""
  def predict(self, x):
    """给定输入样本,预测其类别"""
    y_pred = np.dot(self._w, x) + self._b
    return 1 if y_pred >= 0 else -1
 
#主函数
def main():
  #参数数组生成
  parser = argparse.ArgumentParser(description="感知机算法实现命令行参数")
  parser.add_argument("--nepoch", type=int, default=500, help="训练多少个epoch后终止训练")
  parser.add_argument("--lr", type=float, default=0.1, help="学习率")
  parser.add_argument("--loss_tolerance", type=float, default=0.001, help="当前损失与上一个epoch损失之差的绝对值小于该值时终止训练")
  args = parser.parse_args()
  #导入数据
  X, y = load_iris(return_X_y=True)
  # print(y)
  y[:50] = -1
  # 分割数据
  xtrain, xtest, ytrain, ytest = train_test_split(X[:100], y[:100], train_size=0.8, shuffle=True)
  # print(xtest)
  #调用并训练模型
  model = PerceptronToby(args.nepoch, args.lr, args.loss_tolerance)
  model.fit(xtrain, ytrain)
 
  n_test = xtest.shape[0]
  # print(n_test)
  n_right = 0
  for i in range(n_test):
    y_pred = model.predict(xtest[i])
    if y_pred == ytest[i]:
      n_right += 1
    else:
      logger.info("该样本真实标签为:{},但是toby模型预测标签为:{}".format(ytest[i], y_pred))
  logger.info("toby模型在测试集上的准确率为:{}%".format(n_right * 100 / n_test))
 
  skmodel = Perceptron(max_iter=args.nepoch)
  skmodel.fit(xtrain, ytrain)
  logger.info("sklearn模型在测试集上准确率为:{}%".format(100 * skmodel.score(xtest, ytest)))
if __name__ == "__main__":
  main()```

视频参考地址

以上就是python实现感知机模型的示例的详细内容,更多关于python 实现感知机模型的示例代码的资料请关注服务器之家其它相关文章!

原文链接:https://www.cnblogs.com/xiaolongdejia/p/13712742.html