1
0

initial commit

This commit is contained in:
Rokas Puzonas 2024-03-07 02:23:42 +02:00
commit 2fa4337ac4
16 changed files with 123894 additions and 0 deletions

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
venv

19
README.md Normal file
View File

@ -0,0 +1,19 @@
# Lab1
Ice Cream Sales: https://www.kaggle.com/datasets/raphaelmanayon/temperature-and-ice-cream-sales
Apple Quality: https://www.kaggle.com/datasets/nelgiriyewithana/apple-quality
## Informacija apie atsiskaitymą
* Laboratorinių darbų atsiskaitymui 1 atlikite Lab1, Lab21, Lab22 ir Lab23 nurodytus uždavinius.
* Neuroninių tinklų modelius, skaičiavimus, rezultatus, komentarus ir išvadas pateikite interaktyviose JupyterLab užrašų knygutėse.
* Laboratorinio darbo pristatymo metu mokėkite paaiškinti atliekamus veiksmus ir gaunamus rezultatus.
* Modelius, kurių apmokymas užtrunka ilgai, išsaugokite.
* Atsiskaitymo metu mokėkite pakeisti programos kodą.
* Gerai įsigilinkite į teorinius metodų, kuriuos taikote laboratorinių darbų uždavinių sprendimui, aspektus. Atsiskaitymo metu mokėkite juos paaiškinti.
## Vertinimo kriterijai
* Pateiktų uždavinių sprendimas ir rezultatų pateikimas JupyterLab aplinkoje - 30%.
* Gautų rezultatų korektiškumas - 30%.
* Praktinės užduoties atlikimas - 20%.
* Teorinių aspektų supratimas - 20%.

View File

@ -0,0 +1,366 @@
Temperature,Ice Cream Profits
39,13.17
40,11.88
41,18.82
42,18.65
43,17.02
43,15.88
44,19.07
44,19.57
45,21.62
45,22.34
45,19.23
46,21.25
46,19.81
47,22.12
48,24.22
48,24.68
48,23.78
48,26.41
48,25.01
48,22.29
49,27.81
49,23.54
50,22.89
50,25.68
50,27.29
50,27.64
50,27.31
51,21.93
51,32.18
52,30.67
52,28.05
52,28.82
52,27.87
52,29.39
53,32.60
53,31.62
53,25.71
53,28.48
54,30.09
54,33.58
54,29.75
54,31.94
54,33.71
54,28.37
54,27.41
54,27.99
54,30.37
55,27.68
55,29.53
55,33.91
55,34.19
56,33.22
56,34.47
57,30.89
57,35.80
58,33.44
58,36.79
58,31.56
58,35.13
58,36.11
58,32.39
59,38.18
59,29.69
59,38.47
59,37.74
59,36.71
59,32.29
59,37.50
59,35.33
60,35.06
60,36.25
60,40.25
60,39.69
60,40.95
61,37.96
61,38.10
61,38.21
61,37.30
61,39.53
61,37.42
61,39.42
61,38.16
61,37.66
62,39.04
62,41.44
62,40.19
62,37.93
63,50.17
63,44.15
63,41.58
63,40.59
63,39.17
64,40.57
64,40.28
64,41.21
64,44.85
64,40.94
64,40.14
64,38.57
64,44.07
64,44.10
65,47.36
65,45.38
65,41.09
65,43.78
65,42.72
65,42.10
65,43.28
65,44.31
65,42.71
66,43.03
66,42.16
66,46.74
66,47.68
66,44.48
66,47.52
66,44.98
66,45.07
66,45.42
66,47.36
66,48.26
66,51.75
66,45.05
66,40.65
67,48.65
67,45.26
67,46.04
67,44.85
67,42.94
67,50.62
68,45.65
68,49.37
68,45.89
68,50.74
68,47.17
68,49.60
68,41.68
68,46.90
68,47.35
68,47.73
68,43.73
68,47.47
69,51.38
69,41.74
69,49.88
69,47.78
69,42.50
69,48.77
70,49.46
70,50.87
70,49.12
70,49.95
70,50.31
70,49.32
70,52.67
70,52.05
70,48.82
71,53.33
71,54.59
71,53.77
71,49.60
71,52.17
71,46.74
71,53.04
71,49.34
71,55.04
72,57.18
72,51.26
72,53.78
72,51.55
72,50.01
72,53.59
72,52.47
72,48.96
72,53.57
72,50.79
73,52.13
73,52.42
73,54.67
73,51.82
73,53.21
73,54.40
73,55.01
73,54.08
73,53.97
74,55.28
74,54.36
74,53.62
74,50.65
74,55.52
74,58.61
74,50.64
74,54.28
74,53.95
74,53.44
74,57.10
74,54.26
75,55.34
75,53.71
75,57.84
75,55.91
75,58.62
75,58.85
76,52.84
76,56.59
76,59.43
76,59.69
76,53.83
76,59.41
76,53.17
76,53.48
76,59.94
76,60.31
76,60.33
77,53.82
77,53.07
77,59.48
77,54.10
77,56.33
77,59.87
77,60.75
77,56.43
77,60.86
77,55.07
77,58.39
77,58.72
77,57.52
77,56.33
77,57.47
78,58.13
78,60.46
78,60.33
78,60.89
78,62.58
78,61.22
78,59.62
78,58.31
78,59.12
78,57.93
78,57.25
78,62.20
79,59.70
79,64.82
79,57.06
79,62.52
79,59.93
79,61.71
79,59.49
79,67.42
79,56.34
79,59.69
79,57.44
79,64.63
80,55.47
80,61.22
80,62.79
80,59.91
80,61.59
80,63.46
80,64.45
80,65.42
80,61.82
81,64.36
81,58.11
81,59.47
81,65.86
81,61.52
81,62.12
81,64.23
81,62.36
81,62.32
82,64.97
82,66.15
82,64.02
82,63.41
82,61.85
82,65.49
82,64.39
82,66.06
82,64.86
82,62.85
82,66.57
83,65.54
83,62.58
83,63.29
83,64.38
83,60.78
83,65.66
84,66.61
84,65.12
84,63.13
84,63.35
84,65.40
84,65.41
84,68.28
84,64.10
84,66.26
84,63.63
84,67.58
84,68.54
84,65.20
85,67.93
85,67.88
85,69.71
85,64.22
85,61.82
85,68.28
85,62.99
85,64.96
85,65.99
85,70.30
85,64.31
86,69.59
86,68.35
86,69.66
86,71.46
86,69.90
86,69.19
86,67.97
86,64.85
87,70.43
87,68.48
87,70.29
87,65.19
87,68.00
87,70.64
88,69.67
88,74.69
88,69.78
89,73.16
89,71.51
89,73.32
89,74.09
90,71.12
90,67.58
90,77.39
90,75.11
90,74.80
90,73.94
90,75.94
91,79.31
91,81.81
91,75.58
92,78.20
92,75.60
92,75.04
92,77.41
93,79.76
93,77.18
94,80.94
94,75.70
95,78.20
95,80.75
95,80.97
95,80.98
96,80.02
96,82.83
96,80.95
97,82.50
98,84.12
99,85.13
99,87.08
99,89.29
101,81.91
101,85.02
1 Temperature Ice Cream Profits
2 39 13.17
3 40 11.88
4 41 18.82
5 42 18.65
6 43 17.02
7 43 15.88
8 44 19.07
9 44 19.57
10 45 21.62
11 45 22.34
12 45 19.23
13 46 21.25
14 46 19.81
15 47 22.12
16 48 24.22
17 48 24.68
18 48 23.78
19 48 26.41
20 48 25.01
21 48 22.29
22 49 27.81
23 49 23.54
24 50 22.89
25 50 25.68
26 50 27.29
27 50 27.64
28 50 27.31
29 51 21.93
30 51 32.18
31 52 30.67
32 52 28.05
33 52 28.82
34 52 27.87
35 52 29.39
36 53 32.60
37 53 31.62
38 53 25.71
39 53 28.48
40 54 30.09
41 54 33.58
42 54 29.75
43 54 31.94
44 54 33.71
45 54 28.37
46 54 27.41
47 54 27.99
48 54 30.37
49 55 27.68
50 55 29.53
51 55 33.91
52 55 34.19
53 56 33.22
54 56 34.47
55 57 30.89
56 57 35.80
57 58 33.44
58 58 36.79
59 58 31.56
60 58 35.13
61 58 36.11
62 58 32.39
63 59 38.18
64 59 29.69
65 59 38.47
66 59 37.74
67 59 36.71
68 59 32.29
69 59 37.50
70 59 35.33
71 60 35.06
72 60 36.25
73 60 40.25
74 60 39.69
75 60 40.95
76 61 37.96
77 61 38.10
78 61 38.21
79 61 37.30
80 61 39.53
81 61 37.42
82 61 39.42
83 61 38.16
84 61 37.66
85 62 39.04
86 62 41.44
87 62 40.19
88 62 37.93
89 63 50.17
90 63 44.15
91 63 41.58
92 63 40.59
93 63 39.17
94 64 40.57
95 64 40.28
96 64 41.21
97 64 44.85
98 64 40.94
99 64 40.14
100 64 38.57
101 64 44.07
102 64 44.10
103 65 47.36
104 65 45.38
105 65 41.09
106 65 43.78
107 65 42.72
108 65 42.10
109 65 43.28
110 65 44.31
111 65 42.71
112 66 43.03
113 66 42.16
114 66 46.74
115 66 47.68
116 66 44.48
117 66 47.52
118 66 44.98
119 66 45.07
120 66 45.42
121 66 47.36
122 66 48.26
123 66 51.75
124 66 45.05
125 66 40.65
126 67 48.65
127 67 45.26
128 67 46.04
129 67 44.85
130 67 42.94
131 67 50.62
132 68 45.65
133 68 49.37
134 68 45.89
135 68 50.74
136 68 47.17
137 68 49.60
138 68 41.68
139 68 46.90
140 68 47.35
141 68 47.73
142 68 43.73
143 68 47.47
144 69 51.38
145 69 41.74
146 69 49.88
147 69 47.78
148 69 42.50
149 69 48.77
150 70 49.46
151 70 50.87
152 70 49.12
153 70 49.95
154 70 50.31
155 70 49.32
156 70 52.67
157 70 52.05
158 70 48.82
159 71 53.33
160 71 54.59
161 71 53.77
162 71 49.60
163 71 52.17
164 71 46.74
165 71 53.04
166 71 49.34
167 71 55.04
168 72 57.18
169 72 51.26
170 72 53.78
171 72 51.55
172 72 50.01
173 72 53.59
174 72 52.47
175 72 48.96
176 72 53.57
177 72 50.79
178 73 52.13
179 73 52.42
180 73 54.67
181 73 51.82
182 73 53.21
183 73 54.40
184 73 55.01
185 73 54.08
186 73 53.97
187 74 55.28
188 74 54.36
189 74 53.62
190 74 50.65
191 74 55.52
192 74 58.61
193 74 50.64
194 74 54.28
195 74 53.95
196 74 53.44
197 74 57.10
198 74 54.26
199 75 55.34
200 75 53.71
201 75 57.84
202 75 55.91
203 75 58.62
204 75 58.85
205 76 52.84
206 76 56.59
207 76 59.43
208 76 59.69
209 76 53.83
210 76 59.41
211 76 53.17
212 76 53.48
213 76 59.94
214 76 60.31
215 76 60.33
216 77 53.82
217 77 53.07
218 77 59.48
219 77 54.10
220 77 56.33
221 77 59.87
222 77 60.75
223 77 56.43
224 77 60.86
225 77 55.07
226 77 58.39
227 77 58.72
228 77 57.52
229 77 56.33
230 77 57.47
231 78 58.13
232 78 60.46
233 78 60.33
234 78 60.89
235 78 62.58
236 78 61.22
237 78 59.62
238 78 58.31
239 78 59.12
240 78 57.93
241 78 57.25
242 78 62.20
243 79 59.70
244 79 64.82
245 79 57.06
246 79 62.52
247 79 59.93
248 79 61.71
249 79 59.49
250 79 67.42
251 79 56.34
252 79 59.69
253 79 57.44
254 79 64.63
255 80 55.47
256 80 61.22
257 80 62.79
258 80 59.91
259 80 61.59
260 80 63.46
261 80 64.45
262 80 65.42
263 80 61.82
264 81 64.36
265 81 58.11
266 81 59.47
267 81 65.86
268 81 61.52
269 81 62.12
270 81 64.23
271 81 62.36
272 81 62.32
273 82 64.97
274 82 66.15
275 82 64.02
276 82 63.41
277 82 61.85
278 82 65.49
279 82 64.39
280 82 66.06
281 82 64.86
282 82 62.85
283 82 66.57
284 83 65.54
285 83 62.58
286 83 63.29
287 83 64.38
288 83 60.78
289 83 65.66
290 84 66.61
291 84 65.12
292 84 63.13
293 84 63.35
294 84 65.40
295 84 65.41
296 84 68.28
297 84 64.10
298 84 66.26
299 84 63.63
300 84 67.58
301 84 68.54
302 84 65.20
303 85 67.93
304 85 67.88
305 85 69.71
306 85 64.22
307 85 61.82
308 85 68.28
309 85 62.99
310 85 64.96
311 85 65.99
312 85 70.30
313 85 64.31
314 86 69.59
315 86 68.35
316 86 69.66
317 86 71.46
318 86 69.90
319 86 69.19
320 86 67.97
321 86 64.85
322 87 70.43
323 87 68.48
324 87 70.29
325 87 65.19
326 87 68.00
327 87 70.64
328 88 69.67
329 88 74.69
330 88 69.78
331 89 73.16
332 89 71.51
333 89 73.32
334 89 74.09
335 90 71.12
336 90 67.58
337 90 77.39
338 90 75.11
339 90 74.80
340 90 73.94
341 90 75.94
342 91 79.31
343 91 81.81
344 91 75.58
345 92 78.20
346 92 75.60
347 92 75.04
348 92 77.41
349 93 79.76
350 93 77.18
351 94 80.94
352 94 75.70
353 95 78.20
354 95 80.75
355 95 80.97
356 95 80.98
357 96 80.02
358 96 82.83
359 96 80.95
360 97 82.50
361 98 84.12
362 99 85.13
363 99 87.08
364 99 89.29
365 101 81.91
366 101 85.02

4001
assets/apple_quality.csv Normal file

File diff suppressed because it is too large Load Diff

2411
assets/beers.csv Normal file

File diff suppressed because it is too large Load Diff

BIN
assets/ktu.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

660
examples/Lab1.ipynb Normal file

File diff suppressed because one or more lines are too long

624
examples/Lab21.ipynb Normal file

File diff suppressed because one or more lines are too long

260
examples/Lab22.ipynb Normal file
View File

@ -0,0 +1,260 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "c123b4de-9116-4150-8de9-8b171666d8b9",
"metadata": {},
"source": [
"# Vienasluoksniai tinklai - tiesinės regresijos uždavinys\n",
"\n",
"Tiesinės lygties pritaikymas duotam duomenų rinkiniui n-matėje erdvėje vadinamas tiesine regresija. Toliau pateiktame paveikslėlyje parodytas tiesinės regresijos pavyzdys. Paprastai tariant, bandoma rasti geriausias $w$ ir $b$ parametrų reikšmes, kurios geriausiai atitiktų duomenų rinkinį. Tuomet, gavę geriausią įmanomą įvertį, galime prognozuoti $y$ reikšmes, turėdami $x$.\n",
"\n",
"![Tiesinė regresija](linear-regression.gif)"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "4171cf08-31ca-4608-b187-f2ac40a6009b",
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt\n",
"from matplotlib.pylab import rcParams\n",
"from sklearn.datasets import make_regression\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.metrics import r2_score \n",
"\n",
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "73eef68e-fb6c-4129-b87c-c3e268a7de76",
"metadata": {},
"outputs": [],
"source": [
"def plot_graph(X, y, pred_line=None, losses=None):\n",
" \n",
" plots = 2 if losses!=None else 1\n",
" \n",
" fig = plt.figure(figsize=(8 * plots, 6))\n",
" \n",
" ax1 = fig.add_subplot(1, plots, 1)\n",
" ax1.scatter(X, y, alpha=0.8) # Plot the original set of datapoints\n",
" \n",
" if(pred_line != None):\n",
" x_line, y_line = pred_line['x_line'], pred_line['y_line']\n",
" ax1.plot(x_line, y_line, linewidth=2, markersize=12, color='red', alpha=0.8) # Plot the randomly generated line\n",
" ax1.set_title('Predicted Line on set of Datapoints')\n",
" else:\n",
" ax1.set_title('Plot of Datapoints generated')\n",
" \n",
" ax1.set_xlabel('x')\n",
" ax1.set_ylabel('y')\n",
" \n",
" if(losses!=None):\n",
" ax2 = fig.add_subplot(1, plots, 2)\n",
" ax2.plot(np.arange(len(losses)), losses, marker='o')\n",
" \n",
" ax2.set_xlabel('Epoch')\n",
" ax2.set_ylabel('Loss')\n",
" ax2.set_title('Loss')\n",
"\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "64294a9a-2018-4a37-8dbf-2b7a242fa72c",
"metadata": {},
"outputs": [],
"source": [
"def plot_pred_line(X, y, w, b,losses=None):\n",
" # Generate a set of datapoints on x for creating a line.\n",
" # We shall consider the range of X_train for generating the line so that the line superposes the datapoints.\n",
" x_line = np.linspace(np.min(X), np.max(X), 10) \n",
" \n",
" # Calculate the corresponding y with the parameter values of m & b\n",
" y_line = w * x_line + b \n",
" \n",
" plot_graph(X, y, pred_line={'x_line': x_line, 'y_line':y_line}, losses=losses)\n",
" \n",
" return "
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "ac81d49e-6e39-4dd1-97cc-49d850877744",
"metadata": {},
"outputs": [],
"source": [
"def forward_prop(X, w, b):\n",
" #y_pred = w * X + b\n",
" y_pred = np.reshape(np.sum(w*X,1),(X.shape[0],1)) + b\n",
" return y_pred"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "3f35616b-0a5c-41be-811b-6f15d436a10c",
"metadata": {},
"outputs": [],
"source": [
"def compute_loss(y, y_pred):\n",
" loss = np.mean((y_pred - y)**2)\n",
" \n",
" return loss"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "06a40434-da8f-4176-96d5-ab162bc843e5",
"metadata": {},
"outputs": [],
"source": [
"def grad_desc(w, b, X_train, y_train, y_pred):\n",
" dw = np.mean(2*(y_pred - y_train) * X_train)\n",
" db = np.mean(2*(y_pred - y_train))\n",
" \n",
" return dw, db"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "31386096-b418-4041-83f6-d689a87da77d",
"metadata": {},
"outputs": [],
"source": [
"def back_prop(X_train, y_train, y_pred, w, b, l_r):\n",
" dw, db = grad_desc(w, b, X_train, y_train, y_pred)\n",
" \n",
" w -= l_r * dw\n",
" b -= l_r * db\n",
" \n",
" return w, b"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "8fe4faa0-96c3-40f9-b025-6f81002ba483",
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'make_regression' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[1], line 13\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[38;5;66;03m# Number of iterations for updates - Define during explanation\u001b[39;00m\n\u001b[1;32m 11\u001b[0m epochs \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m300\u001b[39m\n\u001b[0;32m---> 13\u001b[0m X, y \u001b[38;5;241m=\u001b[39m \u001b[43mmake_regression\u001b[49m(n_samples\u001b[38;5;241m=\u001b[39mM, n_features\u001b[38;5;241m=\u001b[39mn, n_informative\u001b[38;5;241m=\u001b[39mn, n_targets\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m, random_state\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m42\u001b[39m, noise\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m10\u001b[39m)\n\u001b[1;32m 14\u001b[0m y \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mreshape(y,(y\u001b[38;5;241m.\u001b[39msize, \u001b[38;5;241m1\u001b[39m))\n\u001b[1;32m 16\u001b[0m m \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mrandom\u001b[38;5;241m.\u001b[39mnormal(scale\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m10\u001b[39m)\n",
"\u001b[0;31mNameError\u001b[0m: name 'make_regression' is not defined"
]
}
],
"source": [
"# Sample size\n",
"M = 200\n",
"\n",
"# No. of input features\n",
"n = 1\n",
"\n",
"# Learning Rate - Define during explanation\n",
"l_r = 0.01\n",
"\n",
"# Number of iterations for updates - Define during explanation\n",
"epochs = 300\n",
"\n",
"X, y = make_regression(n_samples=M, n_features=n, n_informative=n, n_targets=1, random_state=42, noise=10)\n",
"y = np.reshape(y,(y.size, 1))\n",
"\n",
"m = np.random.normal(scale=10)\n",
"b = np.random.normal(scale=10)\n",
"w = np.random.normal(scale=10, size=(X.shape[1],)) \n",
"\n",
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n",
"\n",
"losses = []\n",
"\n",
"for i in range(epochs):\n",
" y_pred = forward_prop(X_train, w, b)\n",
" \n",
" #print(y_pred)\n",
" \n",
" loss = compute_loss(y_train, y_pred)\n",
" losses.append(loss)\n",
"\n",
" m, b = back_prop(X_train, y_train, y_pred, w, b, l_r)\n",
"\n",
" if(i%10==0):\n",
" print('Epoch: ', i)\n",
" print('Loss = ', loss)\n",
" plot_pred_line(X_train, y_train, w, b, losses)\n",
"\n",
"del losses[:]"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "b7966eca-eba4-420d-90e3-4ffdda5b44cb",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Prediction: \n",
"Loss = 111.2313597749226\n",
"R2 = 0.9746%\n",
"\n",
"w = [87.50631143]\n",
"b = 2.1422612255336815\n"
]
}
],
"source": [
"print('Prediction: ')\n",
"y_pred = forward_prop(X_test, w, b)\n",
"loss = compute_loss(y_test, y_pred)\n",
"#print(np.hstack([y_test,y_pred]))\n",
"print('Loss = ', loss)\n",
"r2 = r2_score(y_pred, y_test)\n",
"print('R2 = {}%'.format(round(r2, 4)))\n",
"\n",
"print('\\nw = ', w)\n",
"print('b = ', b)\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.16"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

110615
examples/Lab23.ipynb Normal file

File diff suppressed because one or more lines are too long

2411
examples/beers.csv Normal file

File diff suppressed because it is too large Load Diff

BIN
examples/ktu.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

397
lab1.ipynb Normal file

File diff suppressed because one or more lines are too long

101
lab2-tmp.py Normal file
View File

@ -0,0 +1,101 @@
from dataclasses import dataclass
import numpy as np
from typing import Literal
@dataclass
class NeuralNetworkLayer:
weights: np.array
bias: np.array
activation: Literal["relu", "sigmoid"]
@dataclass
class NeuralNetwork:
layers: list[NeuralNetworkLayer]
def init_network(architecture, seed=1):
np.random.seed(seed)
layers = []
for i in range(len(architecture) - 1):
layer_input_size = architecture[i]["size"]
layer_output_size = architecture[i+1]["size"]
layers.append(NeuralNetworkLayer(
np.random.randn(layer_output_size, layer_input_size) * 0.01,
np.random.randn(layer_output_size, 1) * 0.1,
architecture[i+1]["activation"]
))
return NeuralNetwork(layers)
def relu(Z):
return np.maximum(0, Z)
def sigmoid(Z):
return 1/(1+np.exp(-Z))
def single_layer_forward_propagation(A_prev, W_curr, b_curr, activation="relu"):
Z_curr = np.dot(W_curr, A_prev) + b_curr
if activation == "relu":
activation_func = relu
elif activation == "sigmoid":
activation_func = sigmoid
else:
raise Exception(f"Non-supported activation function: '{activation}'")
return activation_func(Z_curr), Z_curr
def get_cost_value(Y_hat, Y):
# number of examples
m = Y_hat.shape[1]
# calculation of the cost according to the formula
cost = -1 / m * (np.dot(Y, np.log(Y_hat).T) + np.dot(1 - Y, np.log(1 - Y_hat).T))
return np.squeeze(cost)
def full_forward_propagation(X, network: NeuralNetwork):
A_values = []
Z_values = []
A_curr = X
for layer in network.layers:
A_prev = A_curr
W_curr = layer.weights
b_curr = layer.bias
A_curr, Z_curr = single_layer_forward_propagation(A_prev, W_curr, b_curr, layer.activation)
A_values.append(A_curr)
Z_values.append(Z_curr)
return A_curr, A_values, Z_values
def train(X, Y, network: NeuralNetwork, epochs, learning_rate, verbose=False, callback=None):
print(X)
print(Y)
cost_history = []
accuracy_history = []
for i in range(epochs):
Y_hat, A_values, Z_values = full_forward_propagation(X, network)
return cost_history, accuracy_history
def main(architecture):
network = init_network(architecture)
X_train = np.array([[0,0],[0,1],[1,0],[1,1]])
Y_train = np.array([0,1,1,1])
cost_history, accuracy_history = train(X_train.T, np.transpose(Y_train.reshape((Y_train.shape[0], 1))), network, 1000, 0.1)
main(architecture = [
{"size": 2},
{"size": 25, "activation": "relu"},
{"size": 50, "activation": "relu"},
{"size": 50, "activation": "relu"},
{"size": 25, "activation": "relu"},
{"size": 1, "activation": "sigmoid"}
])

2028
lab2.ipynb Normal file

File diff suppressed because one or more lines are too long

BIN
requirements.txt Normal file

Binary file not shown.