Examples#
Compute a distance between two graphs.
See Distance between two graphs.
<<<
import copy
from mlstatpy.graph import GraphDistance
# We define two graphs as list of edges.
graph1 = [
("a", "b"),
("b", "c"),
("b", "X"),
("X", "c"),
("c", "d"),
("d", "e"),
("0", "b"),
]
graph2 = [
("a", "b"),
("b", "c"),
("b", "X"),
("X", "c"),
("c", "t"),
("t", "d"),
("d", "e"),
("d", "g"),
]
# We convert them into objects GraphDistance.
graph1 = GraphDistance(graph1)
graph2 = GraphDistance(graph2)
distance, graph = graph1.distance_matching_graphs_paths(graph2, use_min=False)
print("distance", distance)
print("common paths:", graph)
>>>
distance 0.3318250377073907
common paths: 0
X
a
b
c
d
e
00
11
g
t
a -> b []
b -> c []
b -> X []
X -> c []
c -> d []
d -> e []
0 -> b []
00 -> a []
00 -> 0 []
e -> 11 []
c -> 2a.t []
2a.t -> d []
d -> 2a.g []
2a.g -> 11 []
(entrée originale : graph_distance.py:docstring of mlstatpy.graph.graph_distance.GraphDistance, line 3)
Compute a distance between two graphs.
See Distance between two graphs.
<<<
import copy
from mlstatpy.graph import GraphDistance
# We define two graphs as list of edges.
graph1 = [
("a", "b"),
("b", "c"),
("b", "X"),
("X", "c"),
("c", "d"),
("d", "e"),
("0", "b"),
]
graph2 = [
("a", "b"),
("b", "c"),
("b", "X"),
("X", "c"),
("c", "t"),
("t", "d"),
("d", "e"),
("d", "g"),
]
# We convert them into objects GraphDistance.
graph1 = GraphDistance(graph1)
graph2 = GraphDistance(graph2)
distance, graph = graph1.distance_matching_graphs_paths(graph2, use_min=False)
print("distance", distance)
print("common paths:", graph)
>>>
distance 0.3318250377073907
common paths: 0
X
a
b
c
d
e
00
11
g
t
a -> b []
b -> c []
b -> X []
X -> c []
c -> d []
d -> e []
0 -> b []
00 -> a []
00 -> 0 []
e -> 11 []
c -> 2a.t []
2a.t -> d []
d -> 2a.g []
2a.g -> 11 []
(entrée originale : graph_distance.py:docstring of mlstatpy.graph.graph_distance.GraphDistance, line 3)
Stochastic Gradient Descent applied to linear regression
The following example how to optimize a simple linear regression.
<<<
import numpy
from mlstatpy.optim import SGDOptimizer
def fct_loss(c, X, y):
return numpy.linalg.norm(X @ c - y) ** 2
def fct_grad(c, x, y, i=0):
return x * (x @ c - y) * 0.1
coef = numpy.array([0.5, 0.6, -0.7])
X = numpy.random.randn(10, 3)
y = X @ coef
sgd = SGDOptimizer(numpy.random.randn(3))
sgd.train(X, y, fct_loss, fct_grad, max_iter=15, verbose=True)
print("optimized coefficients:", sgd.coef)
>>>
0/15: loss: 8.115 lr=0.1 max(coef): 1.3 l1=0/2.8 l2=0/2.9
1/15: loss: 2.603 lr=0.0302 max(coef): 1.1 l1=0.079/2.2 l2=0.0021/1.8
2/15: loss: 0.9904 lr=0.0218 max(coef): 0.87 l1=0.0089/1.9 l2=5e-05/1.2
3/15: loss: 0.593 lr=0.018 max(coef): 0.77 l1=0.0025/1.7 l2=3.7e-06/1.1
4/15: loss: 0.3241 lr=0.0156 max(coef): 0.69 l1=0.094/1.7 l2=0.0034/1
5/15: loss: 0.1603 lr=0.014 max(coef): 0.66 l1=0.041/1.7 l2=0.00059/1
6/15: loss: 0.07627 lr=0.0128 max(coef): 0.65 l1=0.0077/1.7 l2=3.8e-05/1
7/15: loss: 0.03915 lr=0.0119 max(coef): 0.67 l1=0.0059/1.8 l2=2.2e-05/1.1
8/15: loss: 0.02361 lr=0.0111 max(coef): 0.68 l1=0.023/1.8 l2=0.0002/1.1
9/15: loss: 0.0149 lr=0.0105 max(coef): 0.69 l1=0.00051/1.8 l2=8.8e-08/1.1
10/15: loss: 0.01025 lr=0.00995 max(coef): 0.69 l1=0.0082/1.8 l2=3.9e-05/1.1
11/15: loss: 0.007828 lr=0.00949 max(coef): 0.69 l1=0.0059/1.8 l2=1.2e-05/1.1
12/15: loss: 0.005864 lr=0.00909 max(coef): 0.7 l1=0.016/1.8 l2=8.7e-05/1.1
13/15: loss: 0.004568 lr=0.00874 max(coef): 0.7 l1=0.0069/1.8 l2=2.7e-05/1.1
14/15: loss: 0.003654 lr=0.00842 max(coef): 0.7 l1=0.00033/1.8 l2=6.2e-08/1.1
15/15: loss: 0.003042 lr=0.00814 max(coef): 0.7 l1=0.0061/1.8 l2=1.3e-05/1.1
optimized coefficients: [ 0.485 0.617 -0.702]
(entrée originale : sgd.py:docstring of mlstatpy.optim.sgd.SGDOptimizer, line 34)
Stochastic Gradient Descent applied to linear regression
The following example how to optimize a simple linear regression.
<<<
import numpy
from mlstatpy.optim import SGDOptimizer
def fct_loss(c, X, y):
return numpy.linalg.norm(X @ c - y) ** 2
def fct_grad(c, x, y, i=0):
return x * (x @ c - y) * 0.1
coef = numpy.array([0.5, 0.6, -0.7])
X = numpy.random.randn(10, 3)
y = X @ coef
sgd = SGDOptimizer(numpy.random.randn(3))
sgd.train(X, y, fct_loss, fct_grad, max_iter=15, verbose=True)
print("optimized coefficients:", sgd.coef)
>>>
0/15: loss: 12.77 lr=0.1 max(coef): 1.7 l1=0/3 l2=0/3.8
1/15: loss: 9.599 lr=0.0302 max(coef): 1.6 l1=0.45/2.5 l2=0.075/2.9
2/15: loss: 6.492 lr=0.0218 max(coef): 1.3 l1=0.035/1.8 l2=0.00044/1.8
3/15: loss: 5.571 lr=0.018 max(coef): 1.2 l1=0.26/1.5 l2=0.028/1.4
4/15: loss: 5.101 lr=0.0156 max(coef): 1.1 l1=0.19/1.4 l2=0.017/1.2
5/15: loss: 4.68 lr=0.014 max(coef): 1 l1=0.35/1.4 l2=0.047/1.1
6/15: loss: 4.357 lr=0.0128 max(coef): 0.96 l1=0.087/1.3 l2=0.0035/1
7/15: loss: 4.137 lr=0.0119 max(coef): 0.91 l1=0.14/1.3 l2=0.0073/0.94
8/15: loss: 3.988 lr=0.0111 max(coef): 0.87 l1=0.094/1.3 l2=0.0041/0.87
9/15: loss: 3.817 lr=0.0105 max(coef): 0.84 l1=0.09/1.3 l2=0.0037/0.83
10/15: loss: 3.689 lr=0.00995 max(coef): 0.81 l1=0.21/1.3 l2=0.02/0.8
11/15: loss: 3.573 lr=0.00949 max(coef): 0.79 l1=0.007/1.2 l2=2e-05/0.76
12/15: loss: 3.478 lr=0.00909 max(coef): 0.77 l1=0.2/1.2 l2=0.019/0.72
13/15: loss: 3.383 lr=0.00874 max(coef): 0.75 l1=0.025/1.2 l2=0.00023/0.7
14/15: loss: 3.278 lr=0.00842 max(coef): 0.74 l1=0.068/1.1 l2=0.0021/0.66
15/15: loss: 3.201 lr=0.00814 max(coef): 0.72 l1=0.014/1.1 l2=6.6e-05/0.63
optimized coefficients: [-0.723 0.325 0.044]
(entrée originale : sgd.py:docstring of mlstatpy.optim.sgd.SGDOptimizer, line 34)