Skip to content
Snippets Groups Projects
Commit 599d410b authored by LE Quy thanh's avatar LE Quy thanh
Browse files

init

parent a4981103
No related branches found
No related tags found
No related merge requests found
%% Cell type:code id:544f5ea2-7b56-4b95-9e06-92b95fd0a818 tags:
``` python
import numpy as np
# Số lượng mẫu (rows) bạn muốn tạo
n_samples = 10000
# Tạo dữ liệu cho X
# Feature 1: số ngẫu nhiên giữa 0 và 1
feature_1_X = np.random.choice([0, 1], size=n_samples)
# Feature 2: số thực ngẫu nhiên từ 1 đến 100
feature_2_X = np.random.uniform(1, 100, n_samples)
# Ghép các feature lại thành X
X = np.column_stack((feature_1_X, feature_2_X))
# Tạo dữ liệu cho y
# Feature 1 của y: ngược lại với feature 1 của X
feature_1_Y = 1 - feature_1_X # Nếu X_1 = 0 thì Y_1 = 1, nếu X_1 = 1 thì Y_1 = 0
time_delay = 4
# Feature 2 của y: bằng tổng của feature_2_X và feature_3_X
feature_2_Y = feature_2_X + time_delay
# Ghép các feature lại thành y
y = np.column_stack((feature_1_Y, feature_2_Y))
# # In một số mẫu dữ liệu để kiểm tra
print("X[:5]:\n", X[:5]) # In 5 mẫu đầu tiên của X
print("\ny[:5]:\n", y[:5]) # In 5 mẫu đầu tiên của y
```
%% Output
X[:5]:
[[ 1. 42.1501505 ]
[ 0. 31.90862547]
[ 1. 53.36449373]
[ 0. 72.54994444]
[ 1. 95.84360064]]
y[:5]:
[[ 0. 46.1501505 ]
[ 1. 35.90862547]
[ 0. 57.36449373]
[ 1. 76.54994444]
[ 0. 99.84360064]]
%% Cell type:code id:4a41a07d-0687-471f-9482-dacadc0a64d3 tags:
``` python
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
# Định nghĩa mô hình mạng nơ-ron
class MultiTaskModel(nn.Module):
def __init__(self):
super(MultiTaskModel, self).__init__()
# Lớp ẩn với 64 đơn vị và hàm kích hoạt ReLU
# self.hidden1 = nn.Linear(1,1)
# self.hidden2 = nn.Linear(2, 2, bias=False)
self.hidden1 = nn.Linear(2, 1) # Lớp đầu tiên: 2 đầu vào -> 4 nơ-ron
self.hidden3 = nn.Linear(2, 1)
self.activation = nn.Sigmoid() # Hàm kích hoạt sigmoid cho đầu ra
# self.relu = nn.ReLU()
def forward(self, x):
# Truyền dữ liệu qua lớp ẩn
x1 = self.hidden1(x) # Hàm kích hoạt ReLU ở lớp 1
x1 = self.activation(x1) # Hàm sigmoid ở lớp đầu ra
x2 = self.hidden3(x)
# x1 = self.hidden1(x[:,0].unsqueeze(1))
# x2 = self.hidden2(x[:,1:3].unsqueeze(1))
# Lấy đầu ra phân loại và hồi quy
classification = x1
regression = x2
return classification, regression
# Tạo mô hình
model = MultiTaskModel()
# In thông tin mô hình (tóm tắt)
print(model)
X_tensor = torch.tensor(X, dtype=torch.float32) # Dữ liệu đầu vào
y_tensor = torch.tensor(y, dtype=torch.float32)
X_train, X_test, y_train, y_test = train_test_split(X_tensor, y_tensor, test_size=0.2, random_state=42)
y_classification_train = y_train[:,0].unsqueeze(1)
y_regression_train = y_train[:,1].unsqueeze(1)
y_classification_test = y_test[:,0].unsqueeze(1)
y_regression_test = y_test[:,1].unsqueeze(1)
```
%% Output
MultiTaskModel(
(hidden1): Linear(in_features=2, out_features=1, bias=True)
(hidden3): Linear(in_features=2, out_features=1, bias=True)
(activation): Sigmoid()
)
%% Cell type:code id:010ab373-1ac9-457a-80f1-ee6e949a3045 tags:
``` python
X_tensor.shape
```
%% Output
torch.Size([2000, 2])
%% Cell type:code id:a05e23ea-0e50-4a0f-9779-34227ec415e6 tags:
``` python
for epoch in range(10000): # Huấn luyện 10 epoch
model.train()
# Tiến hành dự đoán
classification_pred, regression_pred = model(X_train)
# Tính toán mất mát
classification_loss = classification_loss_fn(classification_pred, y_classification_train)
regression_loss = regression_loss_fn(regression_pred, y_regression_train)
# Tổng mất mát
total_loss = classification_loss + regression_loss
# Lưu gradient và tối ưu hóa
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
# Chuyển mô hình sang chế độ đánh giá để tính toán trên tập kiểm tra
model.eval()
with torch.no_grad():
# Dự đoán trên tập kiểm tra
test_classification_pred, test_regression_pred = model(X_test)
# Tính độ chính xác của phân loại trên tập kiểm tra
test_classification_accuracy = ((test_classification_pred >= 0.5).float() == y_classification_test).float().mean().item()
# Tính độ chính xác của hồi quy trên tập kiểm tra
regression_accuracy_threshold = 0.00001 # Ngưỡng cho độ chính xác hồi quy
test_regression_accuracy = (torch.abs(test_regression_pred - y_regression_test) <= regression_accuracy_threshold).float().mean().item()
# In thông tin mỗi 100 epoch
if (epoch + 1) % 100 == 0 or epoch == 9999:
print(f"Epoch {epoch+1}, Total Loss: {total_loss.item():.4f}, Classification Loss: {classification_loss.item():.4f}, Regression Loss: {regression_loss.item():.4f}, Test Classification Accuracy: {test_classification_accuracy:.4f}, Test Regression Accuracy: {test_regression_accuracy:.4f}")
```
%% Output
Epoch 100, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4050
Epoch 200, Total Loss: 0.1746, Classification Loss: 0.0000, Regression Loss: 0.1746, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 300, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 400, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 500, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9650
Epoch 600, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.2980
Epoch 700, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 800, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 900, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 1000, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4050
Epoch 1100, Total Loss: 0.7508, Classification Loss: 0.0000, Regression Loss: 0.7508, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 1200, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 1300, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4280
Epoch 1400, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 1500, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 1600, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 1700, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 1800, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 1900, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4125
Epoch 2000, Total Loss: 4.0875, Classification Loss: 0.0000, Regression Loss: 4.0875, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 2100, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 2200, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.1295
Epoch 2300, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 2400, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 2500, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 2600, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 2700, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 2800, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4050
Epoch 2900, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 3000, Total Loss: 0.0001, Classification Loss: 0.0000, Regression Loss: 0.0001, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 3100, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0330
Epoch 3200, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4125
Epoch 3300, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4050
Epoch 3400, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4000
Epoch 3500, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 3600, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 3700, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 3800, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4125
Epoch 3900, Total Loss: 0.0007, Classification Loss: 0.0000, Regression Loss: 0.0007, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 4000, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0065
Epoch 4100, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 4200, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 4300, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 4400, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 4500, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 4600, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 4700, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4050
Epoch 4800, Total Loss: 0.0033, Classification Loss: 0.0000, Regression Loss: 0.0033, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 4900, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 5000, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4125
Epoch 5100, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 5200, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 5300, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 5400, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 5500, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 5600, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4050
Epoch 5700, Total Loss: 0.0030, Classification Loss: 0.0000, Regression Loss: 0.0030, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 5800, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 5900, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.2155
Epoch 6000, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 6100, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 6200, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 6300, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 6400, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 6500, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4125
Epoch 6600, Total Loss: 0.0091, Classification Loss: 0.0000, Regression Loss: 0.0091, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 6700, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 6800, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.2980
Epoch 6900, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 7000, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 7100, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 7200, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 7300, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 7400, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4125
Epoch 7500, Total Loss: 0.5608, Classification Loss: 0.0000, Regression Loss: 0.5608, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 7600, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 7700, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.2910
Epoch 7800, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 7900, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 8000, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 8100, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 8200, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 8300, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4125
Epoch 8400, Total Loss: 1.5102, Classification Loss: 0.0000, Regression Loss: 1.5102, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 8500, Total Loss: 0.0001, Classification Loss: 0.0000, Regression Loss: 0.0001, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 8600, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9650
Epoch 8700, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.2590
Epoch 8800, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 8900, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 9000, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 9100, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 9200, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.4125
Epoch 9300, Total Loss: 0.0010, Classification Loss: 0.0000, Regression Loss: 0.0010, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 9400, Total Loss: 0.0003, Classification Loss: 0.0000, Regression Loss: 0.0003, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.0000
Epoch 9500, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.2915
Epoch 9600, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.2910
Epoch 9700, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
Epoch 9800, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9620
Epoch 9900, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 0.9505
Epoch 10000, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
%% Cell type:code id:f093fd96-d353-4582-b54c-162007b5b1b6 tags:
``` python
import torch
import torch.nn as nn
import numpy as np
# Thiết lập numpy để in không theo dạng khoa học
np.set_printoptions(precision=8, suppress=True)
# In ra tên, shape và giá trị của tham số weight
for name, param in model.named_parameters():
if param.requires_grad:
print(f"Name: {name}, Shape: {param.shape}, Values: {param.detach().cpu().numpy()}")
```
%% Output
Name: hidden1.weight, Shape: torch.Size([1, 2]), Values: [[-49.66276 0.0131847]]
Name: hidden1.bias, Shape: torch.Size([1]), Values: [16.926733]
Name: hidden3.weight, Shape: torch.Size([1, 2]), Values: [[0.00000035 1. ]]
Name: hidden3.bias, Shape: torch.Size([1]), Values: [4.]
%% Cell type:code id:a5ba2f19-d89f-4bec-9077-77581b56b664 tags:
``` python
```
%% Cell type:code id:544f5ea2-7b56-4b95-9e06-92b95fd0a818 tags:
``` python
import numpy as np
# Số lượng mẫu (rows) bạn muốn tạo
n_samples = 10000
# Tạo dữ liệu cho X
# Feature 1: số ngẫu nhiên giữa 0 và 1
feature_1_X = np.random.choice([0, 1], size=n_samples)
# Feature 2: số thực ngẫu nhiên từ 1 đến 100
feature_2_X = np.random.uniform(1, 100, n_samples)
# Ghép các feature lại thành X
X = np.column_stack((feature_1_X, feature_2_X))
# Tạo dữ liệu cho y
# Feature 1 của y: ngược lại với feature 1 của X
feature_1_Y = 1 - feature_1_X # Nếu X_1 = 0 thì Y_1 = 1, nếu X_1 = 1 thì Y_1 = 0
time_delay = 4
# Feature 2 của y: bằng tổng của feature_2_X và feature_3_X
feature_2_Y = feature_2_X + time_delay
# Ghép các feature lại thành y
y = np.column_stack((feature_1_Y, feature_2_Y))
# # In một số mẫu dữ liệu để kiểm tra
print("X[:5]:\n", X[:5]) # In 5 mẫu đầu tiên của X
print("\ny[:5]:\n", y[:5]) # In 5 mẫu đầu tiên của y
```
%% Output
X[:5]:
[[ 0. 13.80509487]
[ 1. 94.50276123]
[ 0. 25.80412185]
[ 1. 71.49234499]
[ 1. 37.56554914]]
y[:5]:
[[ 1. 17.80509487]
[ 0. 98.50276123]
[ 1. 29.80412185]
[ 0. 75.49234499]
[ 0. 41.56554914]]
%% Cell type:code id:4a41a07d-0687-471f-9482-dacadc0a64d3 tags:
``` python
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
# Định nghĩa mô hình mạng nơ-ron
class MultiTaskModel(nn.Module):
def __init__(self):
super(MultiTaskModel, self).__init__()
# Lớp ẩn với 64 đơn vị và hàm kích hoạt ReLU
# self.hidden1 = nn.Linear(1,1)
# self.hidden2 = nn.Linear(2, 2, bias=False)
self.hidden1 = nn.Linear(2, 1) # Lớp đầu tiên: 2 đầu vào -> 4 nơ-ron
self.hidden3 = nn.Linear(2, 1)
self.activation = nn.Sigmoid() # Hàm kích hoạt sigmoid cho đầu ra
# self.relu = nn.ReLU()
def forward(self, x):
# Truyền dữ liệu qua lớp ẩn
x1 = self.hidden1(x) # Hàm kích hoạt ReLU ở lớp 1
x2 = self.hidden3(x)
# x1 = self.hidden1(x[:,0].unsqueeze(1))
# x2 = self.hidden2(x[:,1:3].unsqueeze(1))
# Lấy đầu ra phân loại và hồi quy
classification = x1
regression = x2
return classification, regression
# Tạo mô hình
model = MultiTaskModel()
# In thông tin mô hình (tóm tắt)
print(model)
X_tensor = torch.tensor(X, dtype=torch.float32) # Dữ liệu đầu vào
y_tensor = torch.tensor(y, dtype=torch.float32)
X_train, X_test, y_train, y_test = train_test_split(X_tensor, y_tensor, test_size=0.2, random_state=42)
y_classification_train = y_train[:,0].unsqueeze(1)
y_regression_train = y_train[:,1].unsqueeze(1)
y_classification_test = y_test[:,0].unsqueeze(1)
y_regression_test = y_test[:,1].unsqueeze(1)
```
%% Output
MultiTaskModel(
(hidden1): Linear(in_features=2, out_features=1, bias=True)
(hidden3): Linear(in_features=2, out_features=1, bias=True)
(activation): Sigmoid()
)
%% Cell type:code id:010ab373-1ac9-457a-80f1-ee6e949a3045 tags:
``` python
X_tensor.shape
```
%% Output
torch.Size([2000, 2])
%% Cell type:code id:a05e23ea-0e50-4a0f-9779-34227ec415e6 tags:
``` python
# Tạo mô hình
model = MultiTaskModel()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model
# In thông tin mô hình (tóm tắt)
print(model)
# Định nghĩa các hàm mất mát và tối ưu hóa
classification_loss_fn = nn.MSELoss() # Binary Cross-Entropy Loss cho phân loại
regression_loss_fn = nn.MSELoss() # Mean Squared Error Loss cho hồi quy
optimizer = optim.Adam(model.parameters(), lr=0.1)
# Huấn luyện mô hình (một ví dụ đơn giản)
for epoch in range(10000): # Huấn luyện 10 epoch
model.train()
# Tiến hành dự đoán
classification_pred, regression_pred = model(X_train)
# Tính toán mất mát
classification_loss = classification_loss_fn(classification_pred, y_classification_train)
regression_loss = regression_loss_fn(regression_pred, y_regression_train)
# Tổng mất mát
total_loss = classification_loss + regression_loss
# Lưu gradient và tối ưu hóa
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
# Chuyển mô hình sang chế độ đánh giá để tính toán trên tập kiểm tra
model.eval()
with torch.no_grad():
# Dự đoán trên tập kiểm tra
test_classification_pred, test_regression_pred = model(X_test)
# Tính độ chính xác của phân loại trên tập kiểm tra
test_classification_accuracy = ((test_classification_pred >= 0.5).float() == y_classification_test).float().mean().item()
# Tính độ chính xác của hồi quy trên tập kiểm tra
regression_accuracy_threshold = 0.01 # Ngưỡng cho độ chính xác hồi quy
test_regression_accuracy = (torch.abs(test_regression_pred - y_regression_test) <= regression_accuracy_threshold).float().mean().item()
if epoch == 9999:
# In thông tin mỗi epoch
print(f"Epoch {epoch+1}, Total Loss: {total_loss.item():.4f}, Classification Loss: {classification_loss.item():.4f}, Regression Loss: {regression_loss.item():.4f}, Test Classification Accuracy: {test_classification_accuracy:.4f}, Test Regression Accuracy: {test_regression_accuracy:.4f}")
```
%% Output
MultiTaskModel(
(hidden1): Linear(in_features=2, out_features=1, bias=True)
(hidden3): Linear(in_features=2, out_features=1, bias=True)
(activation): Sigmoid()
)
Epoch 10000, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
%% Cell type:code id:f093fd96-d353-4582-b54c-162007b5b1b6 tags:
``` python
import torch
import torch.nn as nn
import numpy as np
# Thiết lập numpy để in không theo dạng khoa học
np.set_printoptions(precision=8, suppress=True)
# In ra tên, shape và giá trị của tham số weight
for name, param in model.named_parameters():
if param.requires_grad:
print(f"Name: {name}, Shape: {param.shape}, Values: {param.detach().cpu().numpy()}")
```
%% Output
Name: hidden1.weight, Shape: torch.Size([1, 2]), Values: [[-1. 0.]]
Name: hidden1.bias, Shape: torch.Size([1]), Values: [1.]
Name: hidden3.weight, Shape: torch.Size([1, 2]), Values: [[0.00000026 0.9999999 ]]
Name: hidden3.bias, Shape: torch.Size([1]), Values: [3.9999998]
%% Cell type:code id:a5ba2f19-d89f-4bec-9077-77581b56b664 tags:
``` python
```
%% Cell type:code id:544f5ea2-7b56-4b95-9e06-92b95fd0a818 tags:
``` python
import numpy as np
# Số lượng mẫu (rows) bạn muốn tạo
n_samples = 10000
# Tạo dữ liệu cho X
# Feature 1: số ngẫu nhiên giữa 0 và 1
feature_1_X = np.random.choice([0, 1], size=n_samples)
# Feature 2: số thực ngẫu nhiên từ 1 đến 100
feature_2_X = np.random.uniform(1, 100, n_samples)
# Ghép các feature lại thành X
X = np.column_stack((feature_1_X, feature_2_X))
# Tạo dữ liệu cho y
# Feature 1 của y: ngược lại với feature 1 của X
feature_1_Y = 1 - feature_1_X # Nếu X_1 = 0 thì Y_1 = 1, nếu X_1 = 1 thì Y_1 = 0
time_delay = 4
# Feature 2 của y: bằng tổng của feature_2_X và feature_3_X
feature_2_Y = feature_2_X + time_delay
# Ghép các feature lại thành y
y = np.column_stack((feature_1_Y, feature_2_Y))
# # In một số mẫu dữ liệu để kiểm tra
print("Example of X:\n", X[:5]) # In 5 mẫu đầu tiên của X
print("\nExample of y:\n", y[:5]) # In 5 mẫu đầu tiên của y
```
%% Output
Example of X:
[[ 1. 61.06791684]
[ 0. 55.7787045 ]
[ 1. 9.50724453]
[ 1. 46.85113782]
[ 1. 23.11717633]]
Example of y:
[[ 0. 65.06791684]
[ 1. 59.7787045 ]
[ 0. 13.50724453]
[ 0. 50.85113782]
[ 0. 27.11717633]]
%% Cell type:code id:4a41a07d-0687-471f-9482-dacadc0a64d3 tags:
``` python
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
# Định nghĩa mô hình mạng nơ-ron
class MultiTaskModel(nn.Module):
def __init__(self):
super(MultiTaskModel, self).__init__()
# Lớp ẩn với 64 đơn vị và hàm kích hoạt ReLU
# self.hidden1 = nn.Linear(1,1)
# self.hidden2 = nn.Linear(2, 2, bias=False)
self.hidden1 = nn.Linear(1, 1) # Lớp đầu tiên: 2 đầu vào -> 4 nơ-ron
self.hidden2 = nn.Linear(1, 1)
def forward(self, x):
# Truyền dữ liệu qua lớp ẩn
x1 = self.hidden1(x[:,0].unsqueeze(1)) # Hàm kích hoạt ReLU ở lớp 1
x2 = self.hidden2(x[:,1].unsqueeze(1))
# x1 = self.hidden1(x[:,0].unsqueeze(1))
# x2 = self.hidden2(x[:,1:3].unsqueeze(1))
# Lấy đầu ra phân loại và hồi quy
classification = x1
regression = x2
return classification, regression
# Tạo mô hình
model = MultiTaskModel()
# In thông tin mô hình (tóm tắt)
print(model)
X_tensor = torch.tensor(X, dtype=torch.float32) # Dữ liệu đầu vào
y_tensor = torch.tensor(y, dtype=torch.float32)
X_train, X_test, y_train, y_test = train_test_split(X_tensor, y_tensor, test_size=0.2, random_state=42)
y_classification_train = y_train[:,0].unsqueeze(1)
y_regression_train = y_train[:,1].unsqueeze(1)
y_classification_test = y_test[:,0].unsqueeze(1)
y_regression_test = y_test[:,1].unsqueeze(1)
```
%% Output
MultiTaskModel(
(hidden1): Linear(in_features=1, out_features=1, bias=True)
(hidden2): Linear(in_features=1, out_features=1, bias=True)
)
%% Cell type:code id:a05e23ea-0e50-4a0f-9779-34227ec415e6 tags:
``` python
# Tạo mô hình
model = MultiTaskModel()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model
# In thông tin mô hình (tóm tắt)
print(model)
# Định nghĩa các hàm mất mát và tối ưu hóa
classification_loss_fn = nn.MSELoss() # Binary Cross-Entropy Loss cho phân loại
regression_loss_fn = nn.MSELoss() # Mean Squared Error Loss cho hồi quy
optimizer = optim.Adam(model.parameters(), lr=0.1)
# Huấn luyện mô hình (một ví dụ đơn giản)
for epoch in range(10000): # Huấn luyện 10 epoch
model.train()
# Tiến hành dự đoán
classification_pred, regression_pred = model(X_train)
# Tính toán mất mát
classification_loss = classification_loss_fn(classification_pred, y_classification_train)
regression_loss = regression_loss_fn(regression_pred, y_regression_train)
# Tổng mất mát
total_loss = classification_loss + regression_loss
# Lưu gradient và tối ưu hóa
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
# Chuyển mô hình sang chế độ đánh giá để tính toán trên tập kiểm tra
model.eval()
with torch.no_grad():
# Dự đoán trên tập kiểm tra
test_classification_pred, test_regression_pred = model(X_test)
# Tính độ chính xác của phân loại trên tập kiểm tra
test_classification_accuracy = ((test_classification_pred >= 0.5).float() == y_classification_test).float().mean().item()
# Tính độ chính xác của hồi quy trên tập kiểm tra
regression_accuracy_threshold = 0.01 # Ngưỡng cho độ chính xác hồi quy
test_regression_accuracy = (torch.abs(test_regression_pred - y_regression_test) <= regression_accuracy_threshold).float().mean().item()
if epoch == 9999:
# In thông tin mỗi epoch
print(f"Epoch {epoch+1}, Total Loss: {total_loss.item():.4f}, Classification Loss: {classification_loss.item():.4f}, Regression Loss: {regression_loss.item():.4f}, Test Classification Accuracy: {test_classification_accuracy:.4f}, Test Regression Accuracy: {test_regression_accuracy:.4f}")
```
%% Output
MultiTaskModel(
(hidden1): Linear(in_features=1, out_features=1, bias=True)
(hidden2): Linear(in_features=1, out_features=1, bias=True)
)
Epoch 10000, Total Loss: 0.0000, Classification Loss: 0.0000, Regression Loss: 0.0000, Test Classification Accuracy: 1.0000, Test Regression Accuracy: 1.0000
%% Cell type:code id:f093fd96-d353-4582-b54c-162007b5b1b6 tags:
``` python
import torch
import torch.nn as nn
import numpy as np
# Thiết lập numpy để in không theo dạng khoa học
np.set_printoptions(precision=8, suppress=True)
# In ra tên, shape và giá trị của tham số weight
for name, param in model.named_parameters():
if param.requires_grad:
print(f"Name: {name}, Shape: {param.shape}, Values: {param.detach().cpu().numpy()}")
```
%% Output
Name: hidden1.weight, Shape: torch.Size([1, 1]), Values: [[-1.]]
Name: hidden1.bias, Shape: torch.Size([1]), Values: [1.]
Name: hidden2.weight, Shape: torch.Size([1, 1]), Values: [[1.]]
Name: hidden2.bias, Shape: torch.Size([1]), Values: [4.]
%% Cell type:code id:a5ba2f19-d89f-4bec-9077-77581b56b664 tags:
``` python
```
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment