NumPy实现线性回归

2024-09-01 17:04
文章标签 实现 回归 线性 numpy

本文主要是介绍NumPy实现线性回归,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!

1 单变量线性回归

1.1 sklearn实现(最小二乘法)

import osimport pandas as pd
import matplotlib.pyplot as plt
import syscurrent_dir=os.getcwd()
path=current_dir+'\\'+"Salary Data.csv"def plot_data(path):table=pd.read_csv(path)experience = table["Experience Years"]salary = table["Salary"]plt.figure(figsize=(8,6))plt.scatter(experience,salary,color="blue",label="Data points")plt.title("experience vs year")plt.xlabel("Experience (Years)")plt.ylabel("Salary")plt.grid(True)plt.legend()plt.show()
plot_data(path)table=pd.read_csv(path)
y=table['Salary']
x=table[ ['Experience Years'] ]  # x.shape=(40,1)
z=table['Experience Years']    # z.shape=(40,)from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,train_size=0.7,random_state=2529)
# (28, 1) (28,) (12, 1) (12,)from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(x_train,y_train)print( model.intercept_ )  # 26596.961311068262
print( model.coef_ )       # [9405.61663234]from sklearn.metrics import mean_squared_error, r2_score
y_pred = model.predict(x_test)mse = mean_squared_error(y_test, y_pred)
print( "mse = ", mse )          # 24141421.671440993
r2 = r2_score(y_test, y_pred)
print( "r2 = ", r2 )            # 0.960233432146844y_whole_pred=model.predict(x)
# x.iloc[:,0]可以写成x, 或者x["Experience Years"]
plt.scatter(x.iloc[:,0],y,color="blue",label="Data points")
plt.plot(x,y_whole_pred,color="red",linewidth=2, label='linear regression')plt.xlabel("Experience (Years)")
plt.ylabel("Salary")
plt.legend()
plt.show()

1.2 NumPy实现(梯度下降法)

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from sklearn.model_selection import train_test_split
import sysdef plot_data(path):table=pd.read_csv(path)experience = table["Experience Years"]salary = table["Salary"]plt.figure(figsize=(8,6))plt.scatter(experience,salary,color="blue",label="Data points")plt.title("experience vs year")plt.xlabel("Experience (Years)")plt.ylabel("Salary")plt.grid(True)plt.legend()plt.show()class MyLinearReg:def __init__(self,lr = 0.01, epochs = 1000):self.lr = lrself.epochs = epochsself.w = Noneself.b = Noneself.loss_history = []def fit(self,X,y):m,n = X.shapeself.w = np.zeros(n)self.b = 0for epoch in range(self.epochs):# x(m,n) * w(n,), numpy广播机制矩阵向量乘法y_pred = X @ self.w + self.b  # y_pred(m,)loss = (y_pred - y)           # loss(m,)dcost_dw = (1/m) * (X.T @ loss)dcost_b = (1/m) *  lossdcost_b = np.sum(dcost_b)self.w -= self.lr * dcost_dwself.b -= self.lr * dcost_bsquare_loss = (y_pred-y)**2mean_loss = np.mean(square_loss)self.loss_history.append(mean_loss)if epoch % 100 == 99 :print(f"Epoch {epoch} loss: {mean_loss}")print("Trainning finished.")print("Final parameters:","Slope w=",self.w," Bias b=",self.b)# Final parameters: Slope w= [9853.19132896]  Bias b= 23780.770014707407def predict(self,X):return X @ self.w + self.bdef get_params(self):return self.w, self.b# plot_data(path)
current_dir=os.getcwd()
path=current_dir+'\\'+"Salary Data.csv"
table=pd.read_csv(path)
x = table["Experience Years"].values # x(40,)
y = table["Salary"].values           # y(40,)
#(32,),(8,)(32,)(8,)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
# (32,) (32,) (8,) (8,)x_train=x_train.reshape(-1,1)
x_test=x_test.reshape(-1,1)
model = MyLinearReg()
model.fit(x_train, y_train)y_pred = model.predict(x_test)from sklearn.metrics import mean_squared_error, r2_scoremse = mean_squared_error(y_test, y_pred)
print( "mse = ", mse )          # mse =  43053815.910611115
r2 = r2_score(y_test, y_pred)
print( "r2 = ", r2 )           # r2 =  0.9165907194371214X=x.reshape(-1,1)
y_whole_pred=model.predict(X)
# x.iloc[:,0]可以写成x, 或者x["Experience Years"]
plt.scatter(x,y,color="blue",label="Data points")
plt.plot(x,y_whole_pred,color="red",linewidth=2, label='linear regression')plt.xlabel("Experience (Years)")
plt.ylabel("Salary")
plt.legend()
plt.show()
Epoch 99 loss: 111815444.20061775
Epoch 199 loss: 81534511.03025383
Epoch 299 loss: 61760636.04682423
Epoch 399 loss: 48848017.74472436
Epoch 499 loss: 40415896.49608463
Epoch 599 loss: 34909602.800390095
Epoch 699 loss: 31313915.621658318
Epoch 799 loss: 28965881.353634194
Epoch 899 loss: 27432581.973080143
Epoch 999 loss: 26431315.92580659
Trainning finished.
Final parameters: Slope w= [9853.19132896]  Bias b= 23780.770014707407
mse =  43053815.910611115
r2 =  0.9165907194371214

2 多变量线性回归

2.1 sklearn实现(最小二乘法)

import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import sysdef draw_PairwiseScatter(x,y):num_features = x.shape[1]plt.figure(figsize=(15,10))for i in range(num_features):plt.subplot(3,5,i+1) # 子图的索引从1开始plt.scatter(x[:,i],y,marker='o', color="green", s=15,alpha=0.5)plt.xlabel("Feature {}".format(i+1))plt.ylabel("Label")plt.title("Featurs {} vs Target".format(i+1))plt.tight_layout()plt.show()
def draw_real_pred(x,y,model):y_pred_whole =  model.predict(x)num_features = x.shape[1]plt.figure( figsize=(15,10) )for i in range(num_features):plt.subplot(3,5,i+1)plt.scatter(x[:,i],y,marker='o',color="green", s=15,alpha=0.5)plt.scatter(x[:,i],y_pred_whole,marker="o", color="red", s=15,alpha=0.5)plt.xlabel("Feature {}".format(i+1))plt.ylabel("Label")plt.title("Featurs {} vs Target".format(i+1))plt.tight_layout()plt.show()current_dir = os.getcwd()
path = current_dir + '\\' + "Boston.csv"
house = pd.read_csv(path)y = house['MEDV']                           #  (506,)
X = house.drop(['MEDV'], axis = 1)   #  (506,13)
X=np.array(X)
y=np.array(y)draw_PairwiseScatter(X,y)from sklearn.linear_model import LinearRegression
model = LinearRegression()from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, y, train_size = 0.7, random_state = 2529)
#     (354, 13) (152, 13) (354,) (152,)# Ordinary Least Squares 不是梯度下降,不用标准化数据
# theta = (X.T * X)-1 * X.T * y: 最小二乘法
model.fit(x_train,y_train)
print(model.intercept_)
print(model.coef_)y_pred = model.predict(x_test)from sklearn.metrics import  mean_absolute_error, r2_score
print( "mean_absolute_error(y_pred,y_test):", mean_absolute_error(y_pred,y_test) )print ( model.score(x_test,y_test) )
r2 = r2_score(y_test, y_pred)
print(r2)  # 0.6551914852365524draw_real_pred(X,y,model)

2.2 NumPy实现(梯度下降法) 

import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import sysdef draw_PairwiseScatter(x,y):num_features = x.shape[1]plt.figure(figsize=(15,10))for i in range(num_features):plt.subplot(3,5,i+1)plt.scatter(x[:,i],y,marker='o', color="green", s=15,alpha=0.5)plt.xlabel("Feature {}".format(i+1))plt.ylabel("Label")plt.title("Featurs {} vs Target".format(i+1))plt.tight_layout()plt.show()def draw_real_pred(x,y,model):y_pred_whole =  model.predict(x)num_features = x.shape[1]plt.figure(figsize=(15,10))for i in range(num_features):plt.subplot(3,5,i+1)plt.scatter(x[:,i],y,marker='o',color="green", s=15,alpha=0.5)plt.scatter(x[:,i],y_pred_whole,marker='o', color="red", s=15,alpha=0.5)plt.xlabel("Feature {}".format(i+1))plt.ylabel("Label")plt.title("Featurs {} vs Target".format(i+1))plt.tight_layout()plt.show()class MultipleLinear:def __init__(self,learning_rate=0.01, epochs=1000):self.learning_rate = learning_rateself.epochs = epochsself. theta = Noneself.cost_history = Nonedef fit(self,X,y):X = np.hstack( ( np.ones((X.shape[0],1)), X ) )m,n = X.shapeself.theta = np.zeros(n)self.cost_history = []for epoch in range(self.epochs):y_pred = X @ self.thetagradient = X.T @ (y_pred - y)self.theta -= self.learning_rate * gradient * (1/m)cost = self.compute_cost(X,y)self.cost_history.append(cost)if epoch % 100 == 99:print(f"Epoch {epoch} cost: {cost}")print("Training complete")print ( self.theta )def predict(self,X):m,n = X.shapeX = np.hstack( (np.ones((m,1)), X) )return  X @ self.thetadef compute_cost(self,X,y):m = X.shape[0]y_pred = X @ self.thetasq_errors = (y_pred - y)**2cost = 1 / (2 * m) * np.sum(sq_errors)return costcurrent_dir = os.getcwd()
path = current_dir + '\\' + "Boston.csv"
house = pd.read_csv(path)y = house['MEDV']                           #  (506,)
X = house.drop(['MEDV'], axis = 1)   #  (506,13)
X=np.array(X)
y=np.array(y)draw_PairwiseScatter(X,y)from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# (404, 13) (102, 13) (404,) (102,)from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)  # (404,13)
x_test_scaled = scaler.transform(x_test)        # (102,13)model = MultipleLinear()
model.fit(x_train_scaled, y_train)y_pred = model.predict(x_test_scaled)from sklearn.metrics import  r2_score
r2 = r2_score(y_test,y_pred)
print("r2 = ",r2)       # r2 =  0.6543244875135051draw_real_pred(X,y,model)

这篇关于NumPy实现线性回归的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!



http://www.chinasem.cn/article/1127533

相关文章

SpringBoot集成redisson实现延时队列教程

《SpringBoot集成redisson实现延时队列教程》文章介绍了使用Redisson实现延迟队列的完整步骤,包括依赖导入、Redis配置、工具类封装、业务枚举定义、执行器实现、Bean创建、消费... 目录1、先给项目导入Redisson依赖2、配置redis3、创建 RedissonConfig 配

Python的Darts库实现时间序列预测

《Python的Darts库实现时间序列预测》Darts一个集统计、机器学习与深度学习模型于一体的Python时间序列预测库,本文主要介绍了Python的Darts库实现时间序列预测,感兴趣的可以了解... 目录目录一、什么是 Darts?二、安装与基本配置安装 Darts导入基础模块三、时间序列数据结构与

Python使用FastAPI实现大文件分片上传与断点续传功能

《Python使用FastAPI实现大文件分片上传与断点续传功能》大文件直传常遇到超时、网络抖动失败、失败后只能重传的问题,分片上传+断点续传可以把大文件拆成若干小块逐个上传,并在中断后从已完成分片继... 目录一、接口设计二、服务端实现(FastAPI)2.1 运行环境2.2 目录结构建议2.3 serv

C#实现千万数据秒级导入的代码

《C#实现千万数据秒级导入的代码》在实际开发中excel导入很常见,现代社会中很容易遇到大数据处理业务,所以本文我就给大家分享一下千万数据秒级导入怎么实现,文中有详细的代码示例供大家参考,需要的朋友可... 目录前言一、数据存储二、处理逻辑优化前代码处理逻辑优化后的代码总结前言在实际开发中excel导入很

SpringBoot+RustFS 实现文件切片极速上传的实例代码

《SpringBoot+RustFS实现文件切片极速上传的实例代码》本文介绍利用SpringBoot和RustFS构建高性能文件切片上传系统,实现大文件秒传、断点续传和分片上传等功能,具有一定的参考... 目录一、为什么选择 RustFS + SpringBoot?二、环境准备与部署2.1 安装 RustF

Nginx部署HTTP/3的实现步骤

《Nginx部署HTTP/3的实现步骤》本文介绍了在Nginx中部署HTTP/3的详细步骤,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友们下面随着小编来一起学... 目录前提条件第一步:安装必要的依赖库第二步:获取并构建 BoringSSL第三步:获取 Nginx

MyBatis Plus实现时间字段自动填充的完整方案

《MyBatisPlus实现时间字段自动填充的完整方案》在日常开发中,我们经常需要记录数据的创建时间和更新时间,传统的做法是在每次插入或更新操作时手动设置这些时间字段,这种方式不仅繁琐,还容易遗漏,... 目录前言解决目标技术栈实现步骤1. 实体类注解配置2. 创建元数据处理器3. 服务层代码优化填充机制详

Python实现Excel批量样式修改器(附完整代码)

《Python实现Excel批量样式修改器(附完整代码)》这篇文章主要为大家详细介绍了如何使用Python实现一个Excel批量样式修改器,文中的示例代码讲解详细,感兴趣的小伙伴可以跟随小编一起学习一... 目录前言功能特性核心功能界面特性系统要求安装说明使用指南基本操作流程高级功能技术实现核心技术栈关键函

Java实现字节字符转bcd编码

《Java实现字节字符转bcd编码》BCD是一种将十进制数字编码为二进制的表示方式,常用于数字显示和存储,本文将介绍如何在Java中实现字节字符转BCD码的过程,需要的小伙伴可以了解下... 目录前言BCD码是什么Java实现字节转bcd编码方法补充总结前言BCD码(Binary-Coded Decima

SpringBoot全局域名替换的实现

《SpringBoot全局域名替换的实现》本文主要介绍了SpringBoot全局域名替换的实现,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友们下面随着小编来一... 目录 项目结构⚙️ 配置文件application.yml️ 配置类AppProperties.Ja