Linear Regression Mca Lab - Jupyter Notebook
Linear Regression Mca Lab - Jupyter Notebook
In [3]: boston.head()
(https://getlin
Out[3]:
Unnamed: 0 crim zn indus chas nox rm age dis rad tax ptratio black lstat medv
0 1 0.00632 18.0 2.31 0 0.538 6.575 65.2 4.0900 1 296 15.3 396.90 4.98 24.0
1 2 0.02731 0.0 7.07 0 0.469 6.421 78.9 4.9671 2 242 17.8 396.90 9.14 21.6
2 3 0.02729 0.0 7.07 0 0.469 7.185 61.1 4.9671 2 242 17.8 392.83 4.03 34.7
3 4 0.03237 0.0 2.18 0 0.458 6.998 45.8 6.0622 3 222 18.7 394.63 2.94 33.4
4 5 0.06905 0.0 2.18 0 0.458 7.147 54.2 6.0622 3 222 18.7 396.90 5.33 36.2
In [5]: y= boston[["medv"]]
In [6]: x = boston[["crim"]]
In [9]: #before making model we have to divide our data set into train n test data
from sklearn.model_selection import train_test_split
In [10]: #before making model we have to divide our data set into traion n test data
In [12]: lr = LinearRegression()
#lr instantiate object, STORE INSTANCE OF LINEAR REGRESSION IN Lr
Out[13]: LinearRegression()
In [14]: y_pred=lr.predict(x_test)
In [15]: y_test.head()
Out[15]:
medv
216 23.3
293 23.9
51 20.5
2 34.7
151 19.6
In [16]: y_pred[0:5]
#it has residual errors
Out[16]: array([[23.80451205],
[23.78965233],
[23.80540644],
[23.81185568],
[23.22266879]])
In [19]: mean_squared_error(y_test, y_pred) #pass actual and predicated value , which is in y_pred
Out[19]: 81.57800347209668
In [20]: x = boston[["lstat"]]
(https://getlin
Out[23]: LinearRegression()
In [24]: y_pred=lr2.predict(x_test)
In [25]: y_test.head()
Out[25]:
medv
217 28.7
346 17.2
278 29.1
324 25.0
396 12.5
In [26]: y_pred[0:5]
Out[26]: array([[25.58101115],
[22.70333202],
[27.9951715 ],
[29.02843213],
[16.23338229]])
Out[27]: 34.66868885942193
In [ ]: