tensorflow下實現SVM模型

 
import tensorflow as tf
import numpy as np
from sklearn import datasets
from sklearn.cluster import KMeans
from sklearn import svm
import math
from sklearn.metrics import precision_score
import matplotlib.pyplot as plt

iris = datasets.load_iris()
x_vals = np.array(iris['data'])
x_features = np.array(iris["feature_names"])
y_vals = np.array(iris['target'])
y_features = np.array(iris['target_names'])

X = iris["data"][:100]
Y = iris["target"][:100]
for i in range(len(Y)):
    if Y[i] == 0:
        Y[i] = -1
Z = zip(X, Y)
data = []
for z in zip(X, Y):
    data.append(z)
data = np.array(data)
np.random.shuffle(data)

validation_rate = 0.2
x_data = [data1[0] for data1 in data]
y_data = [data1[1] for data1 in data]
x_train = x_data[math.floor(len(x_vals)*validation_rate):]
y_train = y_data[math.floor(len(x_vals)*validation_rate):]
x_dev = x_data[: math.floor(len(x_vals)*validation_rate)]
y_dev = y_data[: math.floor(len(x_vals)*validation_rate)]

x_train = np.array(x_train)
y_train = np.array(y_train)
x_dev = np.array(x_dev)
y_dev = np.array(y_dev)

learning_rate = 0.001
alpha = tf.constant([0.03])
X_data = tf.placeholder(shape=[None, 4], dtype=tf.float32)
Y_data = tf.placeholder(shape=[None, ], dtype=tf.float32)
A = tf.Variable(tf.random_normal(shape=[4, 1]))
b = tf.Variable(tf.constant(0.01, shape=[1,1]))
model_output = tf.matmul(X_data, A) + b
l2_norm = tf.reduce_mean(tf.square(A))
classification_term = tf.reduce_mean(tf.maximum(0., tf.subtract(1.0, tf.multiply(model_output, Y_data))))
loss = tf.add(classification_term, tf.multiply(0.1, l2_norm)) 
my_opt = tf.train.AdamOptimizer(learning_rate).minimize(loss)
tra_loss = []
dev_loss = []
with tf.Session() as sess:
    tf.global_variables_initializer().run()
    for step in range(1200):
        loss_tr, _ = sess.run([loss, my_opt], feed_dict={X_data: x_train, Y_data: y_train})
        tra_loss.append(loss_tr)
        loss_dev = sess.run([loss], feed_dict={X_data: x_dev, Y_data: y_dev})
        dev_loss.append(loss_dev)

    predict = sess.run(model_output,  feed_dict={X_data: x_dev, Y_data: y_dev})
    pre_norm = tf.sign(predict)
    pre_norm = sess.run(pre_norm)
    for k, v in zip(y_dev, pre_norm):
        print(k, int(v))
plt.plot(tra_loss, linewidth=3, linestyle="--", color="orange")
plt.plot(dev_loss, linewidth=3, color="black")
plt.show()


 
-1 -1
-1 -1
1 1
1 1
-1 -1
1 1
1 1
-1 -1
1 1
1 1
-1 -1
-1 -1
1 1
-1 -1
1 1
1 1
1 1
-1 -1
-1 -1
-1 -1
-1 -1
1 1
-1 -1
1 1
-1 -1
1 1
-1 -1
1 1
-1 -1
-1 -1