使用梯度下降迭代更新值
#梯度下降迭代更新值 alpha=0.1代表默认步长 def gradient_descent(x,y,alpha=0.1,theta0=0,theta1=0): max_epochs = 1000 #迭代次数1000 counter = 0 c = cost(theta1,theta0,pga.distance,pga.accuracy) costs = [c] convergence_thres = 0.00001 #定义下降趋势设置临界值精度 cprev = c+ 10 theta0s = [theta0] theta1s = [theta1] #判断目标函数值大于临界精度或者小于迭代次数,继续迭代 while (np.abs(cprev-c) > convergence_thres) and (counter < max_epochs): cprev = c update0 = alpha*partial_cost_theta0(theta0,theta1,x,y) #alpha乘以 theta0求的偏导值 update1 = alpha*partial_cost_theta1(theta0,theta1,x,y) #alpha乘以 theta1求的偏导值 #更新theta值,梯度下降 theta0 -= update0 theta1 -= update1 #添加到列表中 theta0s.append(theta0) theta1s.append(theta1) #计算新的cost值 c = cost(theta0,theta1,pga.distance,pga.accuracy) costs.append(c) counter += 1 return {'theta0':theta0,'theta1':theta1,'costs':costs} #实现当迭代次数越多,计算的目标函数cost值越低并趋于平缓,从而实现找到目标函数趋近的最低值 print('theta1=',gradient_descent(pga.distance,pga.accuracy)['theta1']) descend = gradient_descent(pga.distance,pga.accuracy,alpha=0.01) plt.scatter(range(len(descend['costs'])),descend['costs']) plt.xlabel('costs_len') plt.ylabel('costs') plt.show() #输出 theta1= -0.5984131176478865数据分析与机器学习之线性回归与逻辑回归(六) (10)
内容版权声明:除非注明,否则皆为本站原创文章。