AI智能
改变未来

吴恩达机器学习作业(一)_python实现


必做部分:(主要参考了黄海广老师的文档)

import numpy as npimport matplotlib.pyplot as pltimport pandas as pddf = pd.read_csv('ex1data1.txt', names=['population', 'profit'])data = df#def normalize_feature(df):#return df.apply(lambda column: (column - column.mean()) / column.std())#特征缩放def get_X(df):#读取特征ones = pd.DataFrame({'ones': np.ones(len(df))})#ones是m行1列的dataframedata = pd.concat([ones, df], axis=1)  # 合并数据,根据列合并 axis: 需要合并链接的轴,0是行,1是列return data.iloc[:, :-1]def linear_cost(theta , X , y):m = X.shape[0]  #样本数inner = X @ theta - y   #与目标的差值即h(theta),inner算出来为一行square_sum = inner.T @ inner    #h(theta)的平方cost = square_sum/(2*m)return costdef gradient(theta, X, y):m = X.shape[0]inner = X.T @ (X@theta - y) #X仅有仅有一个特征,恒为1的不算,即该语句算的是更新theta1时,损失函数对theta1的求导return inner/mdef batch_gradient_decent(theta, X, y, epoch, alpha=0.02):cost_data = [linear_cost(theta, X, y)]for _ in range(epoch):   #_仅是一个循环标志,在循环中不会用到theta = theta - alpha * gradient(theta, X, y)cost_data.append(linear_cost(theta, X, y))return theta, cost_dataX = get_X(df)y = df.values[:, 1]theta = np.zeros(df.shape[1])epoch = 6000final_theta, cost_data = batch_gradient_decent(theta, X, y, epoch)b = final_theta[0]k = final_theta[1]plt.scatter(data.population, data.profit, label="Training data")plt.plot(data.population, data.population*k + b, label="Prediction")plt.xlabel('population')plt.ylabel('profit')plt.legend(loc=2)forecast = float(input('population'))predict_profit = forecast*k+bprint(predict_profit)plt.scatter(forecast, predict_profit, marker='+', c='red')plt.show()


我预测值(forecast)输入的23,用红色标记了

选作部分

import numpy as npimport matplotlib.pyplot as pltimport pandas as pdax = plt.axes(projection='3d')df = pd.read_csv('ex1data2.txt', names=['square', 'bedrooms', 'price'])def normalize_feature(df):return df.apply(lambda column: (column - column.mean()) / column.std())def get_X(df):#读取特征ones = pd.DataFrame({'ones': np.ones(len(df))})#ones是m行1列的dataframedata = pd.concat([ones, df], axis=1)  # 合并数据,根据列合并 axis: 需要合并链接的轴,0是行,1是列return data.iloc[:, :-1]def lr_cost(theta, X, y):m = X.shape[0]#m为样本数inner = X @ theta - y  # R(m*1),X @ theta等价于X.dot(theta)square_sum = inner.T @ innercost = square_sum / (2 * m)return costdef gradient(theta, X, y):m = X.shape[0] #样本个数inner = X.T @ (X @ theta - y)  # (m,n).T @ (m, 1) -> (n, 1),X @ theta等价于X.dot(theta)return inner / mdef batch_gradient_decent(theta, X, y, epoch, alpha=0.01):cost_data = [lr_cost(theta, X, y)]for _ in range(epoch):theta = theta - alpha * gradient(theta, X, y)cost_data.append(lr_cost(theta, X, y))return theta, cost_datadef normalEqn(X, y): #正规方程theta = np.linalg.inv(X.T@X)@X.T@y#X.T@X等价于X.T.dot(X)return thetadata = normalize_feature(df)  #特征缩放y = data.values[:, 2]X = get_X(data)ax.scatter(X['square'], X['bedrooms'], y, alpha=0.3)plt.xlabel('square')plt.ylabel('bedrooms')ax.set_zlabel(r'$prices$')epoch = 500alpha = 0.01theta = np.zeros(X.shape[1])   #在该问题中X有三个特征(1,square,bedrooms),所以theta初始为三个零final_theta, cost_data = batch_gradient_decent(theta, X, y, epoch, alpha=alpha)D = final_theta[0]A = final_theta[1]B = final_theta[2]Z = A*X['square'] + B*X['bedrooms'] + Dax.plot_trisurf(X['square'], X['bedrooms'], Z,linewidth=0, antialiased=False)predict_square = float(input('square:'))predict_square = ((predict_square - df.square.mean())/df.square.std())predict_bedrooms = float(input('bedrooms'))predict_bedrooms = ((predict_bedrooms - df.bedrooms.mean())/df.bedrooms.std())p = A * predict_square + B*predict_bedrooms + Dax.scatter(predict_square, predict_bedrooms, marker='+', c='red')p = p * df.price.std() + df.price.mean()print('I predict the prices is :')print(p)plt.show()


输入为
square=1635
bedrooms=3
I predict the prices is :
292611.913236568
理论上说预测的点应该在所画的平面上,但实际却不在,可能是因为我平面画的不对。预测结果可以使用。

赞(0) 打赏
未经允许不得转载:爱站程序员基地 » 吴恩达机器学习作业(一)_python实现