提交 679b7082 编写于 作者: wit-df's avatar wit-df

上传新文件

上级 e7c9812c
import numpy as np
import random
def one_hot(y):
# 样本数量
m = len(y)
# 标签类别数量
n = len(set(y))
# one-hot编码
result = np.zeros((m, n))
for i in range(m):
result[i][y[i]] = 1
return result
class Logistic_Regression:
def __init__(self, epochs=10, lr=0.01) -> None:
self.epochs = epochs
self.lr = lr
def softmax(self, X, w):
f = np.exp(X.dot(w))
s = f.sum(axis=1).reshape(-1, 1)
return f / s
# 用随机梯度下降法训练
def fit(self, X, y):
y_proba = one_hot(y)
m, n = X.shape
k = len(set(y))
w = np.zeros((n, k))
for _ in range(self.epochs):
for i in range(m):
idx = np.random.randint(0, m)
x = X[idx].reshape(1, -1)
gradient = x.T.dot(self.softmax(x, w) - y_proba[idx])
w -= self.lr * gradient
self.w = w
# 预测值(概率形式)
def predict_proba(self, X):
return self.softmax(X, self.w)
# 预测值(标签)
def predict(self, X):
return self.predict_proba(X).argmax(axis=1)
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册