本文共 6740 字,大约阅读时间需要 22 分钟。
??????Artificial Neuron??????????????????1950???????Perceptron???????????????Frank Rosenblatt?1958????
?????????Perceptron Learning Rule????????????????????????????????????????
Iris ???????????????3?????Iris-setosa?Iris-virginica?Iris-versicolor??????4????????????????????
???Iris?????????
import matplotlib.pyplot as pltimport numpy as np# ???100???y = df.iloc[0:100, 4].valuesy = np.where(y == 'Iris-setosa', -1, 1)# ???????????X = df.iloc[0:100, [0, 2]].valuesplt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='setosa')plt.scatter(X[50:100, 0], X[50:100, 1], color='blue', marker='x', label='versicolor')plt.xlabel('???? [cm]')plt.ylabel('???? [cm]')plt.legend(loc='upper left')plt.show() ??????????????????
from IPython.display import Imageclass Perceptron(object): def __init__(self, eta=0.01, n_iter=50, random_state=1): self.eta = eta self.n_iter = n_iter self.random_state = random_state def fit(self, X, y): rgen = np.random.RandomState(self.random_state) self.w_ = rgen.normal(loc=0.0, scale=0.01, size=1 + X.shape[1]) self.errors_ = [] for _ in range(self.n_iter): errors = 0 for xi, target in zip(X, y): update = self.eta * (target - self.predict(xi)) self.w_[1:] += update * xi self.w_[0] += update errors += int(update != 0.0) self.errors_.append(errors) return self def net_input(self, X): return np.dot(X, self.w_[1:]) + self.w_[0] def predict(self, X): return np.where(self.net_input(X) >= 0.0, 1, -1)ppn = Perceptron(eta=0.1, n_iter=10)ppn.fit(X, y)plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')plt.xlabel('Epochs')plt.ylabel('Number of updates')plt.show() ???????????????
from matplotlib.colors import ListedColormapdef plot_decision_regions(X, y, classifier, resolution=0.02): markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y))]) x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) for idx, cl in enumerate(np.unique(y)): plt.scatter(X[y == cl, 0], X[y == cl, 1], alpha=0.8, c=colors[idx], marker=markers[idx], label=cl, edgecolor='black') plt.xlabel('???? [cm]') plt.ylabel('???? [cm]') plt.legend(loc='upper left') plt.tight_layout() plt.show() ??????????????Adaline?????????
class AdalineGD(object): def __init__(self, eta=0.01, n_iter=50, random_state=1): self.eta = eta self.n_iter = n_iter self.random_state = random_state def fit(self, X, y): rgen = np.random.RandomState(self.random_state) self.w_ = rgen.normal(loc=0.0, scale=0.01, size=1 + X.shape[1]) self.cost_ = [] for i in range(self.n_iter): net_input = self.net_input(X) output = self.activation(net_input) errors = (y - output) self.w_[1:] += self.eta * X.T.dot(errors) self.w_[0] += self.eta * errors.sum() cost = (errors**2).sum() / 2.0 self.cost_.append(cost) return self def net_input(self, X): return np.dot(X, self.w_[1:]) + self.w_[0] def activation(self, X): return X def predict(self, X): return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)ada1 = AdalineGD(n_iter=10, eta=0.01).fit(X, y)plt.plot(range(1, len(ada1.cost_) + 1), np.log10(ada1.cost_), marker='o')plt.xlabel('Epochs')plt.ylabel('log(Sum-squared-error)')plt.set_title('Adaline - Learning rate 0.01')plt.show() ?????Gradient Descent????????????????????????????Adaline????????
# ?????X_std = np.copy(X)X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()ada_gd = AdalineGD(n_iter=15, eta=0.01)ada_gd.fit(X_std, y)plot_decision_regions(X_std, y, classifier=ada_gd)plt.title('Adaline - Gradient Descent')plt.xlabel('???? [???]')plt.ylabel('???? [???]')plt.legend(loc='upper left')plt.tight_layout()plt.show()plt.plot(range(1, len(ada_gd.cost_) + 1), ada_gd.cost_, marker='o')plt.xlabel('Epochs')plt.ylabel('Sum-squared-error')plt.tight_layout()plt.show() ???????Stochastic Gradient Descent???????????????????????????????????????Adaline????????
class AdalineSGD(object): def __init__(self, eta=0.01, n_iter=10, shuffle=True, random_state=None): self.eta = eta self.n_iter = n_iter self.shuffle = shuffle self.random_state = random_state def fit(self, X, y): self._initialize_weights(X.shape[1]) self.cost_ = [] for i in range(self.n_iter): if self.shuffle: X, y = self._shuffle(X, y) cost = [] for xi, target in zip(X, y): output = self.net_input(xi) error = target - output self.w_[1:] += self.eta * xi.dot(error) self.w_[0] += self.eta * error cost.append(0.5 * error**2) avg_cost = sum(cost) / len(y) self.cost_.append(avg_cost) return self def _initialize_weights(self, m): self.rgen = np.random.RandomState(self.random_state) self.w_ = self.rgen.normal(loc=0.0, scale=0.01, size=1 + m) self.w_initialized = True def _shuffle(self, X, y): r = self.rgen.permutation(len(y)) return X[r], y[r] def net_input(self, X): return np.dot(X, self.w_[1:]) + self.w_[0] def activation(self, X): return X def predict(self, X): return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)ada_sgd = AdalineSGD(n_iter=15, eta=0.01, random_state=1)ada_sgd.fit(X_std, y)plot_decision_regions(X_std, y, classifier=ada_sgd)plt.title('Adaline - Stochastic Gradient Descent')plt.xlabel('???? [???]')plt.ylabel('???? [???]')plt.legend(loc='upper left')plt.tight_layout()plt.show()plt.plot(range(1, len(ada_sgd.cost_) + 1), ada_sgd.cost_, marker='o')plt.xlabel('Epochs')plt.ylabel('????')plt.tight_layout()plt.show() ????????????????????Python???????????????????????????Iris???????????????????????????????
转载地址:http://ccbu.baihongyu.com/