initial commit
This commit is contained in:
commit
2fa4337ac4
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
venv
|
19
README.md
Normal file
19
README.md
Normal file
@ -0,0 +1,19 @@
|
||||
# Lab1
|
||||
|
||||
Ice Cream Sales: https://www.kaggle.com/datasets/raphaelmanayon/temperature-and-ice-cream-sales
|
||||
|
||||
Apple Quality: https://www.kaggle.com/datasets/nelgiriyewithana/apple-quality
|
||||
|
||||
## Informacija apie atsiskaitymą
|
||||
* Laboratorinių darbų atsiskaitymui 1 atlikite Lab1, Lab21, Lab22 ir Lab23 nurodytus uždavinius.
|
||||
* Neuroninių tinklų modelius, skaičiavimus, rezultatus, komentarus ir išvadas pateikite interaktyviose JupyterLab užrašų knygutėse.
|
||||
* Laboratorinio darbo pristatymo metu mokėkite paaiškinti atliekamus veiksmus ir gaunamus rezultatus.
|
||||
* Modelius, kurių apmokymas užtrunka ilgai, išsaugokite.
|
||||
* Atsiskaitymo metu mokėkite pakeisti programos kodą.
|
||||
* Gerai įsigilinkite į teorinius metodų, kuriuos taikote laboratorinių darbų uždavinių sprendimui, aspektus. Atsiskaitymo metu mokėkite juos paaiškinti.
|
||||
|
||||
## Vertinimo kriterijai
|
||||
* Pateiktų uždavinių sprendimas ir rezultatų pateikimas JupyterLab aplinkoje - 30%.
|
||||
* Gautų rezultatų korektiškumas - 30%.
|
||||
* Praktinės užduoties atlikimas - 20%.
|
||||
* Teorinių aspektų supratimas - 20%.
|
366
assets/Ice Cream Sales - temperatures.csv
Normal file
366
assets/Ice Cream Sales - temperatures.csv
Normal file
@ -0,0 +1,366 @@
|
||||
Temperature,Ice Cream Profits
|
||||
39,13.17
|
||||
40,11.88
|
||||
41,18.82
|
||||
42,18.65
|
||||
43,17.02
|
||||
43,15.88
|
||||
44,19.07
|
||||
44,19.57
|
||||
45,21.62
|
||||
45,22.34
|
||||
45,19.23
|
||||
46,21.25
|
||||
46,19.81
|
||||
47,22.12
|
||||
48,24.22
|
||||
48,24.68
|
||||
48,23.78
|
||||
48,26.41
|
||||
48,25.01
|
||||
48,22.29
|
||||
49,27.81
|
||||
49,23.54
|
||||
50,22.89
|
||||
50,25.68
|
||||
50,27.29
|
||||
50,27.64
|
||||
50,27.31
|
||||
51,21.93
|
||||
51,32.18
|
||||
52,30.67
|
||||
52,28.05
|
||||
52,28.82
|
||||
52,27.87
|
||||
52,29.39
|
||||
53,32.60
|
||||
53,31.62
|
||||
53,25.71
|
||||
53,28.48
|
||||
54,30.09
|
||||
54,33.58
|
||||
54,29.75
|
||||
54,31.94
|
||||
54,33.71
|
||||
54,28.37
|
||||
54,27.41
|
||||
54,27.99
|
||||
54,30.37
|
||||
55,27.68
|
||||
55,29.53
|
||||
55,33.91
|
||||
55,34.19
|
||||
56,33.22
|
||||
56,34.47
|
||||
57,30.89
|
||||
57,35.80
|
||||
58,33.44
|
||||
58,36.79
|
||||
58,31.56
|
||||
58,35.13
|
||||
58,36.11
|
||||
58,32.39
|
||||
59,38.18
|
||||
59,29.69
|
||||
59,38.47
|
||||
59,37.74
|
||||
59,36.71
|
||||
59,32.29
|
||||
59,37.50
|
||||
59,35.33
|
||||
60,35.06
|
||||
60,36.25
|
||||
60,40.25
|
||||
60,39.69
|
||||
60,40.95
|
||||
61,37.96
|
||||
61,38.10
|
||||
61,38.21
|
||||
61,37.30
|
||||
61,39.53
|
||||
61,37.42
|
||||
61,39.42
|
||||
61,38.16
|
||||
61,37.66
|
||||
62,39.04
|
||||
62,41.44
|
||||
62,40.19
|
||||
62,37.93
|
||||
63,50.17
|
||||
63,44.15
|
||||
63,41.58
|
||||
63,40.59
|
||||
63,39.17
|
||||
64,40.57
|
||||
64,40.28
|
||||
64,41.21
|
||||
64,44.85
|
||||
64,40.94
|
||||
64,40.14
|
||||
64,38.57
|
||||
64,44.07
|
||||
64,44.10
|
||||
65,47.36
|
||||
65,45.38
|
||||
65,41.09
|
||||
65,43.78
|
||||
65,42.72
|
||||
65,42.10
|
||||
65,43.28
|
||||
65,44.31
|
||||
65,42.71
|
||||
66,43.03
|
||||
66,42.16
|
||||
66,46.74
|
||||
66,47.68
|
||||
66,44.48
|
||||
66,47.52
|
||||
66,44.98
|
||||
66,45.07
|
||||
66,45.42
|
||||
66,47.36
|
||||
66,48.26
|
||||
66,51.75
|
||||
66,45.05
|
||||
66,40.65
|
||||
67,48.65
|
||||
67,45.26
|
||||
67,46.04
|
||||
67,44.85
|
||||
67,42.94
|
||||
67,50.62
|
||||
68,45.65
|
||||
68,49.37
|
||||
68,45.89
|
||||
68,50.74
|
||||
68,47.17
|
||||
68,49.60
|
||||
68,41.68
|
||||
68,46.90
|
||||
68,47.35
|
||||
68,47.73
|
||||
68,43.73
|
||||
68,47.47
|
||||
69,51.38
|
||||
69,41.74
|
||||
69,49.88
|
||||
69,47.78
|
||||
69,42.50
|
||||
69,48.77
|
||||
70,49.46
|
||||
70,50.87
|
||||
70,49.12
|
||||
70,49.95
|
||||
70,50.31
|
||||
70,49.32
|
||||
70,52.67
|
||||
70,52.05
|
||||
70,48.82
|
||||
71,53.33
|
||||
71,54.59
|
||||
71,53.77
|
||||
71,49.60
|
||||
71,52.17
|
||||
71,46.74
|
||||
71,53.04
|
||||
71,49.34
|
||||
71,55.04
|
||||
72,57.18
|
||||
72,51.26
|
||||
72,53.78
|
||||
72,51.55
|
||||
72,50.01
|
||||
72,53.59
|
||||
72,52.47
|
||||
72,48.96
|
||||
72,53.57
|
||||
72,50.79
|
||||
73,52.13
|
||||
73,52.42
|
||||
73,54.67
|
||||
73,51.82
|
||||
73,53.21
|
||||
73,54.40
|
||||
73,55.01
|
||||
73,54.08
|
||||
73,53.97
|
||||
74,55.28
|
||||
74,54.36
|
||||
74,53.62
|
||||
74,50.65
|
||||
74,55.52
|
||||
74,58.61
|
||||
74,50.64
|
||||
74,54.28
|
||||
74,53.95
|
||||
74,53.44
|
||||
74,57.10
|
||||
74,54.26
|
||||
75,55.34
|
||||
75,53.71
|
||||
75,57.84
|
||||
75,55.91
|
||||
75,58.62
|
||||
75,58.85
|
||||
76,52.84
|
||||
76,56.59
|
||||
76,59.43
|
||||
76,59.69
|
||||
76,53.83
|
||||
76,59.41
|
||||
76,53.17
|
||||
76,53.48
|
||||
76,59.94
|
||||
76,60.31
|
||||
76,60.33
|
||||
77,53.82
|
||||
77,53.07
|
||||
77,59.48
|
||||
77,54.10
|
||||
77,56.33
|
||||
77,59.87
|
||||
77,60.75
|
||||
77,56.43
|
||||
77,60.86
|
||||
77,55.07
|
||||
77,58.39
|
||||
77,58.72
|
||||
77,57.52
|
||||
77,56.33
|
||||
77,57.47
|
||||
78,58.13
|
||||
78,60.46
|
||||
78,60.33
|
||||
78,60.89
|
||||
78,62.58
|
||||
78,61.22
|
||||
78,59.62
|
||||
78,58.31
|
||||
78,59.12
|
||||
78,57.93
|
||||
78,57.25
|
||||
78,62.20
|
||||
79,59.70
|
||||
79,64.82
|
||||
79,57.06
|
||||
79,62.52
|
||||
79,59.93
|
||||
79,61.71
|
||||
79,59.49
|
||||
79,67.42
|
||||
79,56.34
|
||||
79,59.69
|
||||
79,57.44
|
||||
79,64.63
|
||||
80,55.47
|
||||
80,61.22
|
||||
80,62.79
|
||||
80,59.91
|
||||
80,61.59
|
||||
80,63.46
|
||||
80,64.45
|
||||
80,65.42
|
||||
80,61.82
|
||||
81,64.36
|
||||
81,58.11
|
||||
81,59.47
|
||||
81,65.86
|
||||
81,61.52
|
||||
81,62.12
|
||||
81,64.23
|
||||
81,62.36
|
||||
81,62.32
|
||||
82,64.97
|
||||
82,66.15
|
||||
82,64.02
|
||||
82,63.41
|
||||
82,61.85
|
||||
82,65.49
|
||||
82,64.39
|
||||
82,66.06
|
||||
82,64.86
|
||||
82,62.85
|
||||
82,66.57
|
||||
83,65.54
|
||||
83,62.58
|
||||
83,63.29
|
||||
83,64.38
|
||||
83,60.78
|
||||
83,65.66
|
||||
84,66.61
|
||||
84,65.12
|
||||
84,63.13
|
||||
84,63.35
|
||||
84,65.40
|
||||
84,65.41
|
||||
84,68.28
|
||||
84,64.10
|
||||
84,66.26
|
||||
84,63.63
|
||||
84,67.58
|
||||
84,68.54
|
||||
84,65.20
|
||||
85,67.93
|
||||
85,67.88
|
||||
85,69.71
|
||||
85,64.22
|
||||
85,61.82
|
||||
85,68.28
|
||||
85,62.99
|
||||
85,64.96
|
||||
85,65.99
|
||||
85,70.30
|
||||
85,64.31
|
||||
86,69.59
|
||||
86,68.35
|
||||
86,69.66
|
||||
86,71.46
|
||||
86,69.90
|
||||
86,69.19
|
||||
86,67.97
|
||||
86,64.85
|
||||
87,70.43
|
||||
87,68.48
|
||||
87,70.29
|
||||
87,65.19
|
||||
87,68.00
|
||||
87,70.64
|
||||
88,69.67
|
||||
88,74.69
|
||||
88,69.78
|
||||
89,73.16
|
||||
89,71.51
|
||||
89,73.32
|
||||
89,74.09
|
||||
90,71.12
|
||||
90,67.58
|
||||
90,77.39
|
||||
90,75.11
|
||||
90,74.80
|
||||
90,73.94
|
||||
90,75.94
|
||||
91,79.31
|
||||
91,81.81
|
||||
91,75.58
|
||||
92,78.20
|
||||
92,75.60
|
||||
92,75.04
|
||||
92,77.41
|
||||
93,79.76
|
||||
93,77.18
|
||||
94,80.94
|
||||
94,75.70
|
||||
95,78.20
|
||||
95,80.75
|
||||
95,80.97
|
||||
95,80.98
|
||||
96,80.02
|
||||
96,82.83
|
||||
96,80.95
|
||||
97,82.50
|
||||
98,84.12
|
||||
99,85.13
|
||||
99,87.08
|
||||
99,89.29
|
||||
101,81.91
|
||||
101,85.02
|
|
4001
assets/apple_quality.csv
Normal file
4001
assets/apple_quality.csv
Normal file
File diff suppressed because it is too large
Load Diff
2411
assets/beers.csv
Normal file
2411
assets/beers.csv
Normal file
File diff suppressed because it is too large
Load Diff
BIN
assets/ktu.png
Normal file
BIN
assets/ktu.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 22 KiB |
660
examples/Lab1.ipynb
Normal file
660
examples/Lab1.ipynb
Normal file
File diff suppressed because one or more lines are too long
624
examples/Lab21.ipynb
Normal file
624
examples/Lab21.ipynb
Normal file
File diff suppressed because one or more lines are too long
260
examples/Lab22.ipynb
Normal file
260
examples/Lab22.ipynb
Normal file
@ -0,0 +1,260 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c123b4de-9116-4150-8de9-8b171666d8b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Vienasluoksniai tinklai - tiesinės regresijos uždavinys\n",
|
||||
"\n",
|
||||
"Tiesinės lygties pritaikymas duotam duomenų rinkiniui n-matėje erdvėje vadinamas tiesine regresija. Toliau pateiktame paveikslėlyje parodytas tiesinės regresijos pavyzdys. Paprastai tariant, bandoma rasti geriausias $w$ ir $b$ parametrų reikšmes, kurios geriausiai atitiktų duomenų rinkinį. Tuomet, gavę geriausią įmanomą įvertį, galime prognozuoti $y$ reikšmes, turėdami $x$.\n",
|
||||
"\n",
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "4171cf08-31ca-4608-b187-f2ac40a6009b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from matplotlib.pylab import rcParams\n",
|
||||
"from sklearn.datasets import make_regression\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"from sklearn.metrics import r2_score \n",
|
||||
"\n",
|
||||
"%matplotlib inline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "73eef68e-fb6c-4129-b87c-c3e268a7de76",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def plot_graph(X, y, pred_line=None, losses=None):\n",
|
||||
" \n",
|
||||
" plots = 2 if losses!=None else 1\n",
|
||||
" \n",
|
||||
" fig = plt.figure(figsize=(8 * plots, 6))\n",
|
||||
" \n",
|
||||
" ax1 = fig.add_subplot(1, plots, 1)\n",
|
||||
" ax1.scatter(X, y, alpha=0.8) # Plot the original set of datapoints\n",
|
||||
" \n",
|
||||
" if(pred_line != None):\n",
|
||||
" x_line, y_line = pred_line['x_line'], pred_line['y_line']\n",
|
||||
" ax1.plot(x_line, y_line, linewidth=2, markersize=12, color='red', alpha=0.8) # Plot the randomly generated line\n",
|
||||
" ax1.set_title('Predicted Line on set of Datapoints')\n",
|
||||
" else:\n",
|
||||
" ax1.set_title('Plot of Datapoints generated')\n",
|
||||
" \n",
|
||||
" ax1.set_xlabel('x')\n",
|
||||
" ax1.set_ylabel('y')\n",
|
||||
" \n",
|
||||
" if(losses!=None):\n",
|
||||
" ax2 = fig.add_subplot(1, plots, 2)\n",
|
||||
" ax2.plot(np.arange(len(losses)), losses, marker='o')\n",
|
||||
" \n",
|
||||
" ax2.set_xlabel('Epoch')\n",
|
||||
" ax2.set_ylabel('Loss')\n",
|
||||
" ax2.set_title('Loss')\n",
|
||||
"\n",
|
||||
" plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "64294a9a-2018-4a37-8dbf-2b7a242fa72c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def plot_pred_line(X, y, w, b,losses=None):\n",
|
||||
" # Generate a set of datapoints on x for creating a line.\n",
|
||||
" # We shall consider the range of X_train for generating the line so that the line superposes the datapoints.\n",
|
||||
" x_line = np.linspace(np.min(X), np.max(X), 10) \n",
|
||||
" \n",
|
||||
" # Calculate the corresponding y with the parameter values of m & b\n",
|
||||
" y_line = w * x_line + b \n",
|
||||
" \n",
|
||||
" plot_graph(X, y, pred_line={'x_line': x_line, 'y_line':y_line}, losses=losses)\n",
|
||||
" \n",
|
||||
" return "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "ac81d49e-6e39-4dd1-97cc-49d850877744",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def forward_prop(X, w, b):\n",
|
||||
" #y_pred = w * X + b\n",
|
||||
" y_pred = np.reshape(np.sum(w*X,1),(X.shape[0],1)) + b\n",
|
||||
" return y_pred"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "3f35616b-0a5c-41be-811b-6f15d436a10c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def compute_loss(y, y_pred):\n",
|
||||
" loss = np.mean((y_pred - y)**2)\n",
|
||||
" \n",
|
||||
" return loss"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "06a40434-da8f-4176-96d5-ab162bc843e5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def grad_desc(w, b, X_train, y_train, y_pred):\n",
|
||||
" dw = np.mean(2*(y_pred - y_train) * X_train)\n",
|
||||
" db = np.mean(2*(y_pred - y_train))\n",
|
||||
" \n",
|
||||
" return dw, db"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "31386096-b418-4041-83f6-d689a87da77d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def back_prop(X_train, y_train, y_pred, w, b, l_r):\n",
|
||||
" dw, db = grad_desc(w, b, X_train, y_train, y_pred)\n",
|
||||
" \n",
|
||||
" w -= l_r * dw\n",
|
||||
" b -= l_r * db\n",
|
||||
" \n",
|
||||
" return w, b"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "8fe4faa0-96c3-40f9-b025-6f81002ba483",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'make_regression' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[1], line 13\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[38;5;66;03m# Number of iterations for updates - Define during explanation\u001b[39;00m\n\u001b[1;32m 11\u001b[0m epochs \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m300\u001b[39m\n\u001b[0;32m---> 13\u001b[0m X, y \u001b[38;5;241m=\u001b[39m \u001b[43mmake_regression\u001b[49m(n_samples\u001b[38;5;241m=\u001b[39mM, n_features\u001b[38;5;241m=\u001b[39mn, n_informative\u001b[38;5;241m=\u001b[39mn, n_targets\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m, random_state\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m42\u001b[39m, noise\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m10\u001b[39m)\n\u001b[1;32m 14\u001b[0m y \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mreshape(y,(y\u001b[38;5;241m.\u001b[39msize, \u001b[38;5;241m1\u001b[39m))\n\u001b[1;32m 16\u001b[0m m \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mrandom\u001b[38;5;241m.\u001b[39mnormal(scale\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m10\u001b[39m)\n",
|
||||
"\u001b[0;31mNameError\u001b[0m: name 'make_regression' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Sample size\n",
|
||||
"M = 200\n",
|
||||
"\n",
|
||||
"# No. of input features\n",
|
||||
"n = 1\n",
|
||||
"\n",
|
||||
"# Learning Rate - Define during explanation\n",
|
||||
"l_r = 0.01\n",
|
||||
"\n",
|
||||
"# Number of iterations for updates - Define during explanation\n",
|
||||
"epochs = 300\n",
|
||||
"\n",
|
||||
"X, y = make_regression(n_samples=M, n_features=n, n_informative=n, n_targets=1, random_state=42, noise=10)\n",
|
||||
"y = np.reshape(y,(y.size, 1))\n",
|
||||
"\n",
|
||||
"m = np.random.normal(scale=10)\n",
|
||||
"b = np.random.normal(scale=10)\n",
|
||||
"w = np.random.normal(scale=10, size=(X.shape[1],)) \n",
|
||||
"\n",
|
||||
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n",
|
||||
"\n",
|
||||
"losses = []\n",
|
||||
"\n",
|
||||
"for i in range(epochs):\n",
|
||||
" y_pred = forward_prop(X_train, w, b)\n",
|
||||
" \n",
|
||||
" #print(y_pred)\n",
|
||||
" \n",
|
||||
" loss = compute_loss(y_train, y_pred)\n",
|
||||
" losses.append(loss)\n",
|
||||
"\n",
|
||||
" m, b = back_prop(X_train, y_train, y_pred, w, b, l_r)\n",
|
||||
"\n",
|
||||
" if(i%10==0):\n",
|
||||
" print('Epoch: ', i)\n",
|
||||
" print('Loss = ', loss)\n",
|
||||
" plot_pred_line(X_train, y_train, w, b, losses)\n",
|
||||
"\n",
|
||||
"del losses[:]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "b7966eca-eba4-420d-90e3-4ffdda5b44cb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Prediction: \n",
|
||||
"Loss = 111.2313597749226\n",
|
||||
"R2 = 0.9746%\n",
|
||||
"\n",
|
||||
"w = [87.50631143]\n",
|
||||
"b = 2.1422612255336815\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print('Prediction: ')\n",
|
||||
"y_pred = forward_prop(X_test, w, b)\n",
|
||||
"loss = compute_loss(y_test, y_pred)\n",
|
||||
"#print(np.hstack([y_test,y_pred]))\n",
|
||||
"print('Loss = ', loss)\n",
|
||||
"r2 = r2_score(y_pred, y_test)\n",
|
||||
"print('R2 = {}%'.format(round(r2, 4)))\n",
|
||||
"\n",
|
||||
"print('\\nw = ', w)\n",
|
||||
"print('b = ', b)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
110615
examples/Lab23.ipynb
Normal file
110615
examples/Lab23.ipynb
Normal file
File diff suppressed because one or more lines are too long
2411
examples/beers.csv
Normal file
2411
examples/beers.csv
Normal file
File diff suppressed because it is too large
Load Diff
BIN
examples/ktu.png
Normal file
BIN
examples/ktu.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 22 KiB |
397
lab1.ipynb
Normal file
397
lab1.ipynb
Normal file
File diff suppressed because one or more lines are too long
101
lab2-tmp.py
Normal file
101
lab2-tmp.py
Normal file
@ -0,0 +1,101 @@
|
||||
from dataclasses import dataclass
|
||||
import numpy as np
|
||||
from typing import Literal
|
||||
|
||||
@dataclass
|
||||
class NeuralNetworkLayer:
|
||||
weights: np.array
|
||||
bias: np.array
|
||||
activation: Literal["relu", "sigmoid"]
|
||||
|
||||
@dataclass
|
||||
class NeuralNetwork:
|
||||
layers: list[NeuralNetworkLayer]
|
||||
|
||||
def init_network(architecture, seed=1):
|
||||
np.random.seed(seed)
|
||||
|
||||
layers = []
|
||||
for i in range(len(architecture) - 1):
|
||||
layer_input_size = architecture[i]["size"]
|
||||
layer_output_size = architecture[i+1]["size"]
|
||||
|
||||
layers.append(NeuralNetworkLayer(
|
||||
np.random.randn(layer_output_size, layer_input_size) * 0.01,
|
||||
np.random.randn(layer_output_size, 1) * 0.1,
|
||||
architecture[i+1]["activation"]
|
||||
))
|
||||
|
||||
return NeuralNetwork(layers)
|
||||
|
||||
def relu(Z):
|
||||
return np.maximum(0, Z)
|
||||
|
||||
def sigmoid(Z):
|
||||
return 1/(1+np.exp(-Z))
|
||||
|
||||
def single_layer_forward_propagation(A_prev, W_curr, b_curr, activation="relu"):
|
||||
Z_curr = np.dot(W_curr, A_prev) + b_curr
|
||||
|
||||
if activation == "relu":
|
||||
activation_func = relu
|
||||
elif activation == "sigmoid":
|
||||
activation_func = sigmoid
|
||||
else:
|
||||
raise Exception(f"Non-supported activation function: '{activation}'")
|
||||
|
||||
return activation_func(Z_curr), Z_curr
|
||||
|
||||
def get_cost_value(Y_hat, Y):
|
||||
# number of examples
|
||||
m = Y_hat.shape[1]
|
||||
# calculation of the cost according to the formula
|
||||
cost = -1 / m * (np.dot(Y, np.log(Y_hat).T) + np.dot(1 - Y, np.log(1 - Y_hat).T))
|
||||
return np.squeeze(cost)
|
||||
|
||||
def full_forward_propagation(X, network: NeuralNetwork):
|
||||
A_values = []
|
||||
Z_values = []
|
||||
|
||||
A_curr = X
|
||||
for layer in network.layers:
|
||||
A_prev = A_curr
|
||||
|
||||
W_curr = layer.weights
|
||||
b_curr = layer.bias
|
||||
A_curr, Z_curr = single_layer_forward_propagation(A_prev, W_curr, b_curr, layer.activation)
|
||||
|
||||
A_values.append(A_curr)
|
||||
Z_values.append(Z_curr)
|
||||
|
||||
return A_curr, A_values, Z_values
|
||||
|
||||
def train(X, Y, network: NeuralNetwork, epochs, learning_rate, verbose=False, callback=None):
|
||||
print(X)
|
||||
print(Y)
|
||||
|
||||
cost_history = []
|
||||
accuracy_history = []
|
||||
|
||||
for i in range(epochs):
|
||||
Y_hat, A_values, Z_values = full_forward_propagation(X, network)
|
||||
|
||||
return cost_history, accuracy_history
|
||||
|
||||
def main(architecture):
|
||||
network = init_network(architecture)
|
||||
|
||||
X_train = np.array([[0,0],[0,1],[1,0],[1,1]])
|
||||
Y_train = np.array([0,1,1,1])
|
||||
cost_history, accuracy_history = train(X_train.T, np.transpose(Y_train.reshape((Y_train.shape[0], 1))), network, 1000, 0.1)
|
||||
|
||||
|
||||
|
||||
main(architecture = [
|
||||
{"size": 2},
|
||||
{"size": 25, "activation": "relu"},
|
||||
{"size": 50, "activation": "relu"},
|
||||
{"size": 50, "activation": "relu"},
|
||||
{"size": 25, "activation": "relu"},
|
||||
{"size": 1, "activation": "sigmoid"}
|
||||
])
|
2028
lab2.ipynb
Normal file
2028
lab2.ipynb
Normal file
File diff suppressed because one or more lines are too long
BIN
requirements.txt
Normal file
BIN
requirements.txt
Normal file
Binary file not shown.
Loading…
Reference in New Issue
Block a user