SVM支持向量机Tensorflow实现

时间:2025-03-29 09:01:04


一、tensorflow实现SVM

# -- coding: utf-8 --

import  as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets

# 获取数据
iris = datasets.load_iris()
x_vals = ([[x[0], x[3]] for x in ])
y_vals = ([1 if y == 0 else -1 for y in ])

# 分离训练和测试集
train_indices = (len(x_vals),int(len(x_vals)*0.8),replace=False)
test_indices = (list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]

batch_size = 100

# 初始化feedin
x_data = (shape=[None, 2], dtype=tf.float32)
y_target = (shape=[None, 1], dtype=tf.float32)

# 创建权值参数
A = (tf.random_normal(shape=[2, 1]))
b = (tf.random_normal(shape=[1, 1]))

A2 = (tf.random_normal(shape=[2, 1]))
b2 = (tf.random_normal(shape=[1, 1]))

# 定义线性模型: y = Ax + b
model_output = ((x_data, A), b)
model_output2 = ((x_data, A2), b2)

# Declare vector L2 'norm' function squared
l2_norm = tf.reduce_sum((A))

# Loss = max(0, 1-pred*actual) + alpha * L2_norm(A)^2
alpha = ([0.01])
classification_term = tf.reduce_mean((0., (1., (model_output, y_target))))
classification_term2 = tf.reduce_mean((0., (1., (model_output2, y_target))))
loss = (classification_term, (alpha, l2_norm))
loss2 = (classification_term2,[0])

my_opt = (0.01)
train_step = my_opt.minimize(loss)

my_opt2 = (0.01)
train_step2 = my_opt2.minimize(loss2)

with () as sess:
    init = tf.global_variables_initializer()
    (init)

# Training loop
    loss_vec = []
    train_accuracy = []
    test_accuracy = []
    for i in range(20000):
        rand_index = (len(x_vals_train), size=batch_size)
        rand_x = x_vals_train[rand_index]
        rand_y = ([y_vals_train[rand_index]])
        (train_step, feed_dict={x_data: rand_x, y_target: rand_y})
        (train_step2, feed_dict={x_data: rand_x, y_target: rand_y})



    [[a1], [a2]] = (A)
    [[b]] = (b)
    slope = -a2/a1
    y_intercept = b/a1
    best_fit = []

    [[a12], [a22]] = (A2)
    [[b2]] = (b2)
    slope2 = -a22/a12
    y_intercept2 = b2/a12
    best_fit2 = []

    x1_vals = [d[1] for d in x_vals]

    for i in x1_vals:
     best_fit.append(slope*i+y_intercept)
     best_fit2.append(slope2*i+y_intercept2)


# Separate I. setosa
    setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == 1]
    setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == 1]
    not_setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == -1]
    not_setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == -1]

    (setosa_x, setosa_y, 'o', label='I. setosa')
    (not_setosa_x, not_setosa_y, 'x', label='Non-setosa')
    (x1_vals, best_fit, 'r-', label='Linear Separator + w', linewidth=3)
    (x1_vals, best_fit2, 'r-', label='Linear Separator', color='b', linewidth=3)
    ([0, 10])
    (loc='lower right')
    ('Sepal Length vs Pedal Width')
    ('Pedal Width')
    ('Sepal Length')
    ()
代码中创建了两个线性模型,再计算损失函数loss时候,一个加了||w||平方,一个没加,所以绘图的时候会有两条线,红色线条实现了支持向量到现行模型距离最大化,可以更好的预测未知模型。

二、tensorflow实现SVM,并保存使用模型

训练代码:

# -- coding: utf-8 --

import  as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets

# 获取数据
iris = datasets.load_iris()
x_vals = ([[x[0], x[3]] for x in ])
y_vals = ([1 if y == 0 else -1 for y in ])

# 分离训练和测试集
train_indices = (len(x_vals),int(len(x_vals)*0.8),replace=False)
test_indices = (list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]

batch_size = 100

# 初始化feedin
x_data = (shape=[None, 2], dtype=tf.float32)
y_target = (shape=[None, 1], dtype=tf.float32)

# 创建权值参数
A = (tf.random_normal(shape=[2, 1]))
b = (tf.random_normal(shape=[1, 1]))

# 定义线性模型: y = Ax + b
model_output = ((x_data, A), b)

# Declare vector L2 'norm' function squared
l2_norm = tf.reduce_sum((A))

# Loss = max(0, 1-pred*actual) + alpha * L2_norm(A)^2
alpha = ([0.01])
classification_term = tf.reduce_mean((0., (1., (model_output, y_target))))
loss = (classification_term, (alpha, l2_norm))

my_opt = (0.01)
train_step = my_opt.minimize(loss)

#持久化
saver = ()

with () as sess:
    init = tf.global_variables_initializer()
    (init)

# Training loop
    for i in range(20000):
        rand_index = (len(x_vals_train), size=batch_size)
        rand_x = x_vals_train[rand_index]
        rand_y = ([y_vals_train[rand_index]])
        (train_step, feed_dict={x_data: rand_x, y_target: rand_y})
    (sess, "./model/")

使用判断代码:

# -- coding: utf-8 --

import  as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets


# 获取数据
iris = datasets.load_iris()
x_vals = ([[x[0], x[3]] for x in ])
y_vals = ([1 if y == 0 else -1 for y in ])

# 分离训练和测试集
test_indices = (len(x_vals),int(len(x_vals)*0.8),replace=False)
x_vals_test = x_vals[test_indices]
y_vals_test = y_vals[test_indices]

# 初始化feedin
x_data = (shape=[None, 2], dtype=tf.float32)
y_target = (shape=[None, 1], dtype=tf.float32)

# 创建权值参数
A = (tf.random_normal(shape=[2, 1]))
b = (tf.random_normal(shape=[1, 1]))

# 定义线性模型: y = Ax + b
model_output = ((x_data, A), b)

#判断准确度
result = (0., (model_output, y_target))

saver = ()

with () as sess:
    (sess, "./model/")
    y_test = (y_vals_test, (120,1))
    array = (result, feed_dict={x_data: x_vals_test, y_target: y_test})
    num = (array)
    zero_num = (num==[0])
    print(num)
    print(zero_num)