101: Linear Regression and export to ONNX

scikit-learn and torch to train a linear regression.

data

import numpy as np
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
import torch
from onnxruntime import InferenceSession
from onnx_array_api.plotting.text_plot import onnx_simple_text_plot
from onnx_array_api.plotting.graphviz_helper import plot_dot
from experimental_experiment.torch_interpreter import to_onnx


X, y = make_regression(1000, n_features=5, noise=10.0, n_informative=2)
print(X.shape, y.shape)

X_train, X_test, y_train, y_test = train_test_split(X, y)
(1000, 5) (1000,)

scikit-learn: the simple regression

A^* = (X'X)^{-1}X'Y

clr = LinearRegression()
clr.fit(X_train, y_train)

print(f"coefficients: {clr.coef_}, {clr.intercept_}")
coefficients: [ 8.77645073e-01  7.23508174e+01 -5.21526059e-02  9.31183877e+01
  5.58228290e-01], 0.26571651725229817

Evaluation

y_pred = clr.predict(X_test)
l2 = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print(f"LinearRegression: l2={l2}, r2={r2}")
LinearRegression: l2=99.37158225696727, r2=0.9921955426688784

scikit-learn: SGD algorithm

SGD = Stochastic Gradient Descent

clr = SGDRegressor(max_iter=5, verbose=1)
clr.fit(X_train, y_train)

print(f"coefficients: {clr.coef_}, {clr.intercept_}")
-- Epoch 1
Norm: 99.54, NNZs: 5, Bias: -1.083300, T: 750, Avg. loss: 1453.695892
Total training time: 0.00 seconds.
-- Epoch 2
Norm: 112.76, NNZs: 5, Bias: -0.399563, T: 1500, Avg. loss: 105.002287
Total training time: 0.00 seconds.
-- Epoch 3
Norm: 116.17, NNZs: 5, Bias: -0.083427, T: 2250, Avg. loss: 51.117831
Total training time: 0.00 seconds.
-- Epoch 4
Norm: 117.33, NNZs: 5, Bias: 0.205811, T: 3000, Avg. loss: 46.281330
Total training time: 0.00 seconds.
-- Epoch 5
Norm: 117.71, NNZs: 5, Bias: 0.129218, T: 3750, Avg. loss: 45.656226
Total training time: 0.00 seconds.
/home/xadupre/install/scikit-learn/sklearn/linear_model/_stochastic_gradient.py:1575: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit.
  warnings.warn(
coefficients: [ 0.8506411  72.25623951 -0.17585021 92.92246497  0.5644385 ], [0.12921801]

Evaluation

y_pred = clr.predict(X_test)
sl2 = mean_squared_error(y_test, y_pred)
sr2 = r2_score(y_test, y_pred)
print(f"SGDRegressor: sl2={sl2}, sr2={sr2}")
SGDRegressor: sl2=99.41981237130881, sr2=0.9921917547663321

torch

class TorchLinearRegression(torch.nn.Module):
    def __init__(self, n_dims: int, n_targets: int):
        super(TorchLinearRegression, self).__init__()
        self.linear = torch.nn.Linear(n_dims, n_targets)

    def forward(self, x):
        return self.linear(x)


def train_loop(dataloader, model, loss_fn, optimizer):
    total_loss = 0.0

    # Set the model to training mode - important for batch normalization and dropout layers
    # Unnecessary in this situation but added for best practices
    model.train()
    for batch, (X, y) in enumerate(dataloader):
        # Compute prediction and loss
        pred = model(X)
        loss = loss_fn(pred.ravel(), y)

        # Backpropagation
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        # training loss
        total_loss += loss

    return total_loss


model = TorchLinearRegression(X_train.shape[1], 1)
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
loss_fn = torch.nn.MSELoss()

device = "cpu"
model = model.to(device)
dataset = torch.utils.data.TensorDataset(
    torch.Tensor(X_train).to(device), torch.Tensor(y_train).to(device)
)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1)


for i in range(5):
    loss = train_loop(dataloader, model, loss_fn, optimizer)
    print(f"iteration {i}, loss={loss}")
iteration 0, loss=3388378.75
iteration 1, loss=246132.15625
iteration 2, loss=78806.203125
iteration 3, loss=69241.828125
iteration 4, loss=68663.84375

Let’s check the error

y_pred = model(torch.Tensor(X_test)).detach().numpy()
tl2 = mean_squared_error(y_test, y_pred)
tr2 = r2_score(y_test, y_pred)
print(f"TorchLinearRegression: tl2={tl2}, tr2={tr2}")
TorchLinearRegression: tl2=99.65278124005958, tr2=0.9921734578291762

And the coefficients.

print("coefficients:")
for p in model.parameters():
    print(p)
coefficients:
Parameter containing:
tensor([[ 1.0995e+00,  7.2252e+01, -8.1620e-02,  9.3018e+01,  5.0411e-01]],
       requires_grad=True)
Parameter containing:
tensor([0.2408], requires_grad=True)

Conversion to ONNX

Let’s convert it to ONNX.

onx = to_onnx(model, (torch.Tensor(X_test[:2]),), input_names=["x"])

Let’s check it is work.

sess = InferenceSession(onx.SerializeToString(), providers=["CPUExecutionProvider"])
res = sess.run(None, {"x": X_test.astype(np.float32)[:2]})
print(res)
[array([[-192.17436],
       [ -48.7338 ]], dtype=float32)]

And the model.

plot torch linreg 101
<Axes: >

With dynamic shapes

onx = to_onnx(
    model,
    (torch.Tensor(X_test[:2]),),
    input_names=["x"],
    dynamic_shapes={"x": {0: torch.export.Dim("batch")}},
)

print(onnx_simple_text_plot(onx))
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='arg0_1' type=dtype('float32') shape=(1, 5)
init: name='arg1_1' type=dtype('float32') shape=(1,) -- array([0.24076203], dtype=float32)
Gemm(x, arg0_1, arg1_1, transA=0, transB=1, alpha=1.00, beta=1.00) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]

Total running time of the script: (0 minutes 3.163 seconds)

Gallery generated by Sphinx-Gallery