Benchmark of TreeEnsemble implementation#

The following example compares the inference time between onnxruntime and sklearn.ensemble.RandomForestRegressor, fow different number of estimators, max depth, and parallelization. It does it for a fixed number of rows and features.

import and registration of necessary converters#

import pickle
import os
import time
from itertools import product

import matplotlib.pyplot as plt
import numpy
import pandas
from lightgbm import LGBMRegressor
from onnxruntime import InferenceSession, SessionOptions
from psutil import cpu_count
from sphinx_runpython.runpython import run_cmd
from skl2onnx import to_onnx, update_registered_converter
from skl2onnx.common.shape_calculator import calculate_linear_regressor_output_shapes
from sklearn import set_config
from sklearn.ensemble import RandomForestRegressor
from tqdm import tqdm
from xgboost import XGBRegressor
from onnxmltools.convert.xgboost.operator_converters.XGBoost import convert_xgboost


def skl2onnx_convert_lightgbm(scope, operator, container):
    from onnxmltools.convert.lightgbm.operator_converters.LightGbm import (
        convert_lightgbm,
    )

    options = scope.get_options(operator.raw_operator)
    if "split" in options:
        operator.split = options["split"]
    else:
        operator.split = None
    convert_lightgbm(scope, operator, container)


update_registered_converter(
    LGBMRegressor,
    "LightGbmLGBMRegressor",
    calculate_linear_regressor_output_shapes,
    skl2onnx_convert_lightgbm,
    options={"split": None},
)
update_registered_converter(
    XGBRegressor,
    "XGBoostXGBRegressor",
    calculate_linear_regressor_output_shapes,
    convert_xgboost,
)

# The following instruction reduces the time spent by scikit-learn
# to validate the data.
set_config(assume_finite=True)

Machine details#

print(f"Number of cores: {cpu_count()}")
Number of cores: 8

But this information is not usually enough. Let’s extract the cache information.

try:
    out, err = run_cmd("lscpu")
    print(out)
except Exception as e:
    print(f"lscpu not available: {e}")
<Popen: returncode: None args: ['lscpu']>

Or with the following command.

out, err = run_cmd("cat /proc/cpuinfo")
print(out)
<Popen: returncode: None args: ['cat', '/proc/cpuinfo']>

Fonction to measure inference time#

def measure_inference(fct, X, repeat, max_time=5, quantile=1):
    """
    Run *repeat* times the same function on data *X*.

    :param fct: fonction to run
    :param X: data
    :param repeat: number of times to run
    :param max_time: maximum time to use to measure the inference
    :return: number of runs, sum of the time, average, median
    """
    times = []
    for n in range(repeat):
        perf = time.perf_counter()
        fct(X)
        delta = time.perf_counter() - perf
        times.append(delta)
        if len(times) < 3:
            continue
        if max_time is not None and sum(times) >= max_time:
            break
    times.sort()
    quantile = 0 if (len(times) - quantile * 2) < 3 else quantile
    if quantile == 0:
        tt = times
    else:
        tt = times[quantile:-quantile]
    return (len(times), sum(times), sum(tt) / len(tt), times[len(times) // 2])

Benchmark#

The following script benchmarks the inference for the same model for a random forest and onnxruntime after it was converted into ONNX and for the following configurations.

small = cpu_count() < 12
if small:
    N = 1000
    n_features = 10
    n_jobs = [1, cpu_count() // 2, cpu_count()]
    n_ests = [10, 20, 30]
    depth = [4, 6, 8, 10]
    Regressor = RandomForestRegressor
else:
    N = 100000
    n_features = 50
    n_jobs = [cpu_count(), cpu_count() // 2, 1]
    n_ests = [100, 200, 400]
    depth = [6, 8, 10, 12, 14]
    Regressor = RandomForestRegressor

legend = f"parallel-nf-{n_features}-"

# avoid duplicates on machine with 1 or 2 cores.
n_jobs = list(sorted(set(n_jobs), reverse=True))

Benchmark parameters

repeat = 7  # repeat n times the same inference
quantile = 1  # exclude extreme times
max_time = 5  # maximum number of seconds to spend on one configuration

Data

X = numpy.random.randn(N, n_features).astype(numpy.float32)
noise = (numpy.random.randn(X.shape[0]) / (n_features // 5)).astype(numpy.float32)
y = X.mean(axis=1) + noise
n_train = min(N, N // 3)


data = []
couples = list(product(n_jobs, depth, n_ests))
bar = tqdm(couples)
cache_dir = "_cache"
if not os.path.exists(cache_dir):
    os.mkdir(cache_dir)

for n_j, max_depth, n_estimators in bar:
    if n_j == 1 and n_estimators > n_ests[0]:
        # skipping
        continue

    # parallelization
    cache_name = os.path.join(
        cache_dir, f"nf-{X.shape[1]}-rf-J-{n_j}-E-{n_estimators}-D-{max_depth}.pkl"
    )
    if os.path.exists(cache_name):
        with open(cache_name, "rb") as f:
            rf = pickle.load(f)
    else:
        bar.set_description(f"J={n_j} E={n_estimators} D={max_depth} train rf")
        if n_j == 1 and issubclass(Regressor, RandomForestRegressor):
            rf = Regressor(max_depth=max_depth, n_estimators=n_estimators, n_jobs=-1)
            rf.fit(X[:n_train], y[:n_train])
            rf.n_jobs = 1
        else:
            rf = Regressor(max_depth=max_depth, n_estimators=n_estimators, n_jobs=n_j)
            rf.fit(X[:n_train], y[:n_train])
        with open(cache_name, "wb") as f:
            pickle.dump(rf, f)

    bar.set_description(f"J={n_j} E={n_estimators} D={max_depth} ISession")
    so = SessionOptions()
    so.intra_op_num_threads = n_j
    cache_name = os.path.join(
        cache_dir, f"nf-{X.shape[1]}-rf-J-{n_j}-E-{n_estimators}-D-{max_depth}.onnx"
    )
    if os.path.exists(cache_name):
        sess = InferenceSession(cache_name, so, providers=["CPUExecutionProvider"])
    else:
        bar.set_description(f"J={n_j} E={n_estimators} D={max_depth} cvt onnx")
        onx = to_onnx(rf, X[:1])
        with open(cache_name, "wb") as f:
            f.write(onx.SerializeToString())
        sess = InferenceSession(cache_name, so, providers=["CPUExecutionProvider"])
    onx_size = os.stat(cache_name).st_size

    # run once to avoid counting the first run
    bar.set_description(f"J={n_j} E={n_estimators} D={max_depth} predict1")
    rf.predict(X)
    sess.run(None, {"X": X})

    # fixed data
    obs = dict(
        n_jobs=n_j,
        max_depth=max_depth,
        n_estimators=n_estimators,
        repeat=repeat,
        max_time=max_time,
        name=rf.__class__.__name__,
        n_rows=X.shape[0],
        n_features=X.shape[1],
        onnx_size=onx_size,
    )

    # baseline
    bar.set_description(f"J={n_j} E={n_estimators} D={max_depth} predictB")
    r, t, mean, med = measure_inference(rf.predict, X, repeat=repeat, max_time=max_time)
    o1 = obs.copy()
    o1.update(dict(avg=mean, med=med, n_runs=r, ttime=t, name="base"))
    data.append(o1)

    # onnxruntime
    bar.set_description(f"J={n_j} E={n_estimators} D={max_depth} predictO")
    r, t, mean, med = measure_inference(
        lambda x: sess.run(None, {"X": x}), X, repeat=repeat, max_time=max_time
    )
    o2 = obs.copy()
    o2.update(dict(avg=mean, med=med, n_runs=r, ttime=t, name="ort_"))
    data.append(o2)
  0%|          | 0/36 [00:00<?, ?it/s]
J=8 E=10 D=4 ISession:   0%|          | 0/36 [00:00<?, ?it/s]
J=8 E=10 D=4 predict1:   0%|          | 0/36 [00:00<?, ?it/s]
J=8 E=10 D=4 predictB:   0%|          | 0/36 [00:00<?, ?it/s]
J=8 E=10 D=4 predictO:   0%|          | 0/36 [00:00<?, ?it/s]
J=8 E=10 D=4 predictO:   3%|▎         | 1/36 [00:00<00:03,  9.15it/s]
J=8 E=20 D=4 ISession:   3%|▎         | 1/36 [00:00<00:03,  9.15it/s]
J=8 E=20 D=4 predict1:   3%|▎         | 1/36 [00:00<00:03,  9.15it/s]
J=8 E=20 D=4 predictB:   3%|▎         | 1/36 [00:00<00:03,  9.15it/s]
J=8 E=20 D=4 predictO:   3%|▎         | 1/36 [00:00<00:03,  9.15it/s]
J=8 E=20 D=4 predictO:   6%|▌         | 2/36 [00:00<00:03,  9.17it/s]
J=8 E=30 D=4 ISession:   6%|▌         | 2/36 [00:00<00:03,  9.17it/s]
J=8 E=30 D=4 predict1:   6%|▌         | 2/36 [00:00<00:03,  9.17it/s]
J=8 E=30 D=4 predictB:   6%|▌         | 2/36 [00:00<00:03,  9.17it/s]
J=8 E=30 D=4 predictO:   6%|▌         | 2/36 [00:00<00:03,  9.17it/s]
J=8 E=30 D=4 predictO:   8%|▊         | 3/36 [00:00<00:04,  7.55it/s]
J=8 E=10 D=6 ISession:   8%|▊         | 3/36 [00:00<00:04,  7.55it/s]
J=8 E=10 D=6 predict1:   8%|▊         | 3/36 [00:00<00:04,  7.55it/s]
J=8 E=10 D=6 predictB:   8%|▊         | 3/36 [00:00<00:04,  7.55it/s]
J=8 E=10 D=6 predictO:   8%|▊         | 3/36 [00:00<00:04,  7.55it/s]
J=8 E=20 D=6 ISession:   8%|▊         | 3/36 [00:00<00:04,  7.55it/s]
J=8 E=20 D=6 predict1:   8%|▊         | 3/36 [00:00<00:04,  7.55it/s]
J=8 E=20 D=6 predictB:   8%|▊         | 3/36 [00:00<00:04,  7.55it/s]
J=8 E=20 D=6 predictO:   8%|▊         | 3/36 [00:00<00:04,  7.55it/s]
J=8 E=20 D=6 predictO:  14%|█▍        | 5/36 [00:00<00:03,  9.48it/s]
J=8 E=30 D=6 ISession:  14%|█▍        | 5/36 [00:00<00:03,  9.48it/s]
J=8 E=30 D=6 predict1:  14%|█▍        | 5/36 [00:00<00:03,  9.48it/s]
J=8 E=30 D=6 predictB:  14%|█▍        | 5/36 [00:00<00:03,  9.48it/s]
J=8 E=30 D=6 predictO:  14%|█▍        | 5/36 [00:00<00:03,  9.48it/s]
J=8 E=30 D=6 predictO:  17%|█▋        | 6/36 [00:00<00:03,  7.95it/s]
J=8 E=10 D=8 ISession:  17%|█▋        | 6/36 [00:00<00:03,  7.95it/s]
J=8 E=10 D=8 predict1:  17%|█▋        | 6/36 [00:00<00:03,  7.95it/s]
J=8 E=10 D=8 predictB:  17%|█▋        | 6/36 [00:00<00:03,  7.95it/s]
J=8 E=10 D=8 predictO:  17%|█▋        | 6/36 [00:00<00:03,  7.95it/s]
J=8 E=20 D=8 ISession:  17%|█▋        | 6/36 [00:00<00:03,  7.95it/s]
J=8 E=20 D=8 predict1:  17%|█▋        | 6/36 [00:00<00:03,  7.95it/s]
J=8 E=20 D=8 predictB:  17%|█▋        | 6/36 [00:00<00:03,  7.95it/s]
J=8 E=20 D=8 predictO:  17%|█▋        | 6/36 [00:00<00:03,  7.95it/s]
J=8 E=20 D=8 predictO:  22%|██▏       | 8/36 [00:00<00:03,  8.79it/s]
J=8 E=30 D=8 ISession:  22%|██▏       | 8/36 [00:00<00:03,  8.79it/s]
J=8 E=30 D=8 predict1:  22%|██▏       | 8/36 [00:00<00:03,  8.79it/s]
J=8 E=30 D=8 predictB:  22%|██▏       | 8/36 [00:00<00:03,  8.79it/s]
J=8 E=30 D=8 predictO:  22%|██▏       | 8/36 [00:01<00:03,  8.79it/s]
J=8 E=30 D=8 predictO:  25%|██▌       | 9/36 [00:01<00:03,  8.57it/s]
J=8 E=10 D=10 ISession:  25%|██▌       | 9/36 [00:01<00:03,  8.57it/s]
J=8 E=10 D=10 predict1:  25%|██▌       | 9/36 [00:01<00:03,  8.57it/s]
J=8 E=10 D=10 predictB:  25%|██▌       | 9/36 [00:01<00:03,  8.57it/s]
J=8 E=10 D=10 predictO:  25%|██▌       | 9/36 [00:01<00:03,  8.57it/s]
J=8 E=20 D=10 ISession:  25%|██▌       | 9/36 [00:01<00:03,  8.57it/s]
J=8 E=20 D=10 predict1:  25%|██▌       | 9/36 [00:01<00:03,  8.57it/s]
J=8 E=20 D=10 predictB:  25%|██▌       | 9/36 [00:01<00:03,  8.57it/s]
J=8 E=20 D=10 predictO:  25%|██▌       | 9/36 [00:01<00:03,  8.57it/s]
J=8 E=20 D=10 predictO:  31%|███       | 11/36 [00:01<00:02,  9.57it/s]
J=8 E=30 D=10 ISession:  31%|███       | 11/36 [00:01<00:02,  9.57it/s]
J=8 E=30 D=10 predict1:  31%|███       | 11/36 [00:01<00:02,  9.57it/s]
J=8 E=30 D=10 predictB:  31%|███       | 11/36 [00:01<00:02,  9.57it/s]
J=8 E=30 D=10 predictO:  31%|███       | 11/36 [00:01<00:02,  9.57it/s]
J=8 E=30 D=10 predictO:  33%|███▎      | 12/36 [00:01<00:02,  8.59it/s]
J=4 E=10 D=4 ISession:  33%|███▎      | 12/36 [00:01<00:02,  8.59it/s]
J=4 E=10 D=4 predict1:  33%|███▎      | 12/36 [00:01<00:02,  8.59it/s]
J=4 E=10 D=4 predictB:  33%|███▎      | 12/36 [00:01<00:02,  8.59it/s]
J=4 E=10 D=4 predictO:  33%|███▎      | 12/36 [00:01<00:02,  8.59it/s]
J=4 E=20 D=4 ISession:  33%|███▎      | 12/36 [00:01<00:02,  8.59it/s]
J=4 E=20 D=4 predict1:  33%|███▎      | 12/36 [00:01<00:02,  8.59it/s]
J=4 E=20 D=4 predictB:  33%|███▎      | 12/36 [00:01<00:02,  8.59it/s]
J=4 E=20 D=4 predictO:  33%|███▎      | 12/36 [00:01<00:02,  8.59it/s]
J=4 E=20 D=4 predictO:  39%|███▉      | 14/36 [00:01<00:02,  9.78it/s]
J=4 E=30 D=4 ISession:  39%|███▉      | 14/36 [00:01<00:02,  9.78it/s]
J=4 E=30 D=4 predict1:  39%|███▉      | 14/36 [00:01<00:02,  9.78it/s]
J=4 E=30 D=4 predictB:  39%|███▉      | 14/36 [00:01<00:02,  9.78it/s]
J=4 E=30 D=4 predictO:  39%|███▉      | 14/36 [00:01<00:02,  9.78it/s]
J=4 E=30 D=4 predictO:  42%|████▏     | 15/36 [00:01<00:02,  9.30it/s]
J=4 E=10 D=6 ISession:  42%|████▏     | 15/36 [00:01<00:02,  9.30it/s]
J=4 E=10 D=6 predict1:  42%|████▏     | 15/36 [00:01<00:02,  9.30it/s]
J=4 E=10 D=6 predictB:  42%|████▏     | 15/36 [00:01<00:02,  9.30it/s]
J=4 E=10 D=6 predictO:  42%|████▏     | 15/36 [00:01<00:02,  9.30it/s]
J=4 E=20 D=6 ISession:  42%|████▏     | 15/36 [00:01<00:02,  9.30it/s]
J=4 E=20 D=6 predict1:  42%|████▏     | 15/36 [00:01<00:02,  9.30it/s]
J=4 E=20 D=6 predictB:  42%|████▏     | 15/36 [00:01<00:02,  9.30it/s]
J=4 E=20 D=6 predictO:  42%|████▏     | 15/36 [00:01<00:02,  9.30it/s]
J=4 E=20 D=6 predictO:  47%|████▋     | 17/36 [00:01<00:01,  9.71it/s]
J=4 E=30 D=6 ISession:  47%|████▋     | 17/36 [00:01<00:01,  9.71it/s]
J=4 E=30 D=6 predict1:  47%|████▋     | 17/36 [00:01<00:01,  9.71it/s]
J=4 E=30 D=6 predictB:  47%|████▋     | 17/36 [00:01<00:01,  9.71it/s]
J=4 E=30 D=6 predictO:  47%|████▋     | 17/36 [00:01<00:01,  9.71it/s]
J=4 E=30 D=6 predictO:  50%|█████     | 18/36 [00:02<00:02,  8.92it/s]
J=4 E=10 D=8 ISession:  50%|█████     | 18/36 [00:02<00:02,  8.92it/s]
J=4 E=10 D=8 predict1:  50%|█████     | 18/36 [00:02<00:02,  8.92it/s]
J=4 E=10 D=8 predictB:  50%|█████     | 18/36 [00:02<00:02,  8.92it/s]
J=4 E=10 D=8 predictO:  50%|█████     | 18/36 [00:02<00:02,  8.92it/s]
J=4 E=20 D=8 ISession:  50%|█████     | 18/36 [00:02<00:02,  8.92it/s]
J=4 E=20 D=8 predict1:  50%|█████     | 18/36 [00:02<00:02,  8.92it/s]
J=4 E=20 D=8 predictB:  50%|█████     | 18/36 [00:02<00:02,  8.92it/s]
J=4 E=20 D=8 predictO:  50%|█████     | 18/36 [00:02<00:02,  8.92it/s]
J=4 E=20 D=8 predictO:  56%|█████▌    | 20/36 [00:02<00:01,  9.92it/s]
J=4 E=30 D=8 ISession:  56%|█████▌    | 20/36 [00:02<00:01,  9.92it/s]
J=4 E=30 D=8 predict1:  56%|█████▌    | 20/36 [00:02<00:01,  9.92it/s]
J=4 E=30 D=8 predictB:  56%|█████▌    | 20/36 [00:02<00:01,  9.92it/s]
J=4 E=30 D=8 predictO:  56%|█████▌    | 20/36 [00:02<00:01,  9.92it/s]
J=4 E=30 D=8 predictO:  58%|█████▊    | 21/36 [00:02<00:01,  9.24it/s]
J=4 E=10 D=10 ISession:  58%|█████▊    | 21/36 [00:02<00:01,  9.24it/s]
J=4 E=10 D=10 predict1:  58%|█████▊    | 21/36 [00:02<00:01,  9.24it/s]
J=4 E=10 D=10 predictB:  58%|█████▊    | 21/36 [00:02<00:01,  9.24it/s]
J=4 E=10 D=10 predictO:  58%|█████▊    | 21/36 [00:02<00:01,  9.24it/s]
J=4 E=20 D=10 ISession:  58%|█████▊    | 21/36 [00:02<00:01,  9.24it/s]
J=4 E=20 D=10 predict1:  58%|█████▊    | 21/36 [00:02<00:01,  9.24it/s]
J=4 E=20 D=10 predictB:  58%|█████▊    | 21/36 [00:02<00:01,  9.24it/s]
J=4 E=20 D=10 predictO:  58%|█████▊    | 21/36 [00:02<00:01,  9.24it/s]
J=4 E=20 D=10 predictO:  64%|██████▍   | 23/36 [00:02<00:01, 10.22it/s]
J=4 E=30 D=10 ISession:  64%|██████▍   | 23/36 [00:02<00:01, 10.22it/s]
J=4 E=30 D=10 predict1:  64%|██████▍   | 23/36 [00:02<00:01, 10.22it/s]
J=4 E=30 D=10 predictB:  64%|██████▍   | 23/36 [00:02<00:01, 10.22it/s]
J=4 E=30 D=10 predictO:  64%|██████▍   | 23/36 [00:02<00:01, 10.22it/s]
J=1 E=10 D=4 ISession:  64%|██████▍   | 23/36 [00:02<00:01, 10.22it/s]
J=1 E=10 D=4 predict1:  64%|██████▍   | 23/36 [00:02<00:01, 10.22it/s]
J=1 E=10 D=4 predictB:  64%|██████▍   | 23/36 [00:02<00:01, 10.22it/s]
J=1 E=10 D=4 predictO:  64%|██████▍   | 23/36 [00:02<00:01, 10.22it/s]
J=1 E=10 D=4 predictO:  69%|██████▉   | 25/36 [00:02<00:00, 11.04it/s]
J=1 E=10 D=6 ISession:  69%|██████▉   | 25/36 [00:02<00:00, 11.04it/s]
J=1 E=10 D=6 predict1:  69%|██████▉   | 25/36 [00:02<00:00, 11.04it/s]
J=1 E=10 D=6 predictB:  69%|██████▉   | 25/36 [00:02<00:00, 11.04it/s]
J=1 E=10 D=6 predictO:  69%|██████▉   | 25/36 [00:02<00:00, 11.04it/s]
J=1 E=10 D=8 ISession:  69%|██████▉   | 25/36 [00:02<00:00, 11.04it/s]
J=1 E=10 D=8 predict1:  69%|██████▉   | 25/36 [00:02<00:00, 11.04it/s]
J=1 E=10 D=8 predictB:  69%|██████▉   | 25/36 [00:02<00:00, 11.04it/s]
J=1 E=10 D=8 predictO:  69%|██████▉   | 25/36 [00:02<00:00, 11.04it/s]
J=1 E=10 D=10 ISession:  69%|██████▉   | 25/36 [00:02<00:00, 11.04it/s]
J=1 E=10 D=10 predict1:  69%|██████▉   | 25/36 [00:02<00:00, 11.04it/s]
J=1 E=10 D=10 predictB:  69%|██████▉   | 25/36 [00:02<00:00, 11.04it/s]
J=1 E=10 D=10 predictO:  69%|██████▉   | 25/36 [00:02<00:00, 11.04it/s]
J=1 E=10 D=10 predictO: 100%|██████████| 36/36 [00:02<00:00, 13.39it/s]

Saving data#

name = os.path.join(cache_dir, "plot_beanchmark_rf")
print(f"Saving data into {name!r}")

df = pandas.DataFrame(data)
df2 = df.copy()
df2["legend"] = legend
df2.to_csv(f"{name}-{legend}.csv", index=False)
Saving data into '_cache/plot_beanchmark_rf'

Printing the data

n_jobs max_depth n_estimators repeat max_time name n_rows n_features onnx_size avg med n_runs ttime
0 8 4 10 7 5 base 1000 10 11016 0.008059 0.008305 7 0.064896
1 8 4 10 7 5 ort_ 1000 10 11016 0.000122 0.000119 7 0.001116
2 8 4 20 7 5 base 1000 10 21920 0.010397 0.009973 7 0.073086
3 8 4 20 7 5 ort_ 1000 10 21920 0.000197 0.000193 7 0.018644
4 8 4 30 7 5 base 1000 10 31581 0.014009 0.013726 7 0.117249
5 8 4 30 7 5 ort_ 1000 10 31581 0.000234 0.000227 7 0.018801
6 8 6 10 7 5 base 1000 10 32334 0.005623 0.005928 7 0.040000
7 8 6 10 7 5 ort_ 1000 10 32334 0.000137 0.000135 7 0.015484
8 8 6 20 7 5 base 1000 10 63384 0.009197 0.009705 7 0.063185
9 8 6 20 7 5 ort_ 1000 10 63384 0.000201 0.000200 7 0.008112
10 8 6 30 7 5 base 1000 10 93267 0.015458 0.013936 7 0.111482
11 8 6 30 7 5 ort_ 1000 10 93267 0.000984 0.001171 7 0.008292
12 8 8 10 7 5 base 1000 10 70250 0.007109 0.007691 7 0.050002
13 8 8 10 7 5 ort_ 1000 10 70250 0.000188 0.000176 7 0.001608
14 8 8 20 7 5 base 1000 10 136595 0.010403 0.010489 7 0.073281
15 8 8 20 7 5 ort_ 1000 10 136595 0.000235 0.000218 7 0.001886
16 8 8 30 7 5 base 1000 10 200526 0.013901 0.013794 7 0.095357
17 8 8 30 7 5 ort_ 1000 10 200526 0.000318 0.000329 7 0.002452
18 8 10 10 7 5 base 1000 10 110896 0.005046 0.004331 7 0.036955
19 8 10 10 7 5 ort_ 1000 10 110896 0.000195 0.000193 7 0.001651
20 8 10 20 7 5 base 1000 10 205168 0.009787 0.009735 7 0.070975
21 8 10 20 7 5 ort_ 1000 10 205168 0.000341 0.000329 7 0.002545
22 8 10 30 7 5 base 1000 10 337667 0.015894 0.016135 7 0.110542
23 8 10 30 7 5 ort_ 1000 10 337667 0.000423 0.000450 7 0.003040
24 4 4 10 7 5 base 1000 10 10651 0.006571 0.006656 7 0.046083
25 4 4 10 7 5 ort_ 1000 10 10651 0.000100 0.000096 7 0.000862
26 4 4 20 7 5 base 1000 10 22066 0.009687 0.009409 7 0.068101
27 4 4 20 7 5 ort_ 1000 10 22066 0.000187 0.000189 7 0.001471
28 4 4 30 7 5 base 1000 10 32019 0.013959 0.013730 7 0.100615
29 4 4 30 7 5 ort_ 1000 10 32019 0.000363 0.000350 7 0.002796
30 4 6 10 7 5 base 1000 10 29122 0.010356 0.010596 7 0.071284
31 4 6 10 7 5 ort_ 1000 10 29122 0.000175 0.000156 7 0.001587
32 4 6 20 7 5 base 1000 10 60902 0.009367 0.009151 7 0.067881
33 4 6 20 7 5 ort_ 1000 10 60902 0.000242 0.000239 7 0.001878
34 4 6 30 7 5 base 1000 10 90785 0.016756 0.016786 7 0.118466
35 4 6 30 7 5 ort_ 1000 10 90785 0.000333 0.000328 7 0.002602
36 4 8 10 7 5 base 1000 10 63147 0.007990 0.008152 7 0.056213
37 4 8 10 7 5 ort_ 1000 10 63147 0.000143 0.000133 7 0.001182
38 4 8 20 7 5 base 1000 10 129927 0.009747 0.009624 7 0.069242
39 4 8 20 7 5 ort_ 1000 10 129927 0.000320 0.000318 7 0.002462
40 4 8 30 7 5 base 1000 10 199897 0.014127 0.013040 7 0.102263
41 4 8 30 7 5 ort_ 1000 10 199897 0.000430 0.000432 7 0.003159
42 4 10 10 7 5 base 1000 10 115188 0.006567 0.006794 7 0.046063
43 4 10 10 7 5 ort_ 1000 10 115188 0.000174 0.000175 7 0.001444
44 4 10 20 7 5 base 1000 10 206338 0.009754 0.009427 7 0.069542
45 4 10 20 7 5 ort_ 1000 10 206338 0.000309 0.000298 7 0.002365
46 4 10 30 7 5 base 1000 10 321591 0.013553 0.013157 7 0.095630
47 4 10 30 7 5 ort_ 1000 10 321591 0.000645 0.000649 7 0.004497
48 1 4 10 7 5 base 1000 10 11527 0.001613 0.001609 7 0.011519
49 1 4 10 7 5 ort_ 1000 10 11527 0.000301 0.000295 7 0.002174
50 1 6 10 7 5 base 1000 10 35181 0.001426 0.001380 7 0.010524
51 1 6 10 7 5 ort_ 1000 10 35181 0.000322 0.000316 7 0.002378
52 1 8 10 7 5 base 1000 10 69863 0.001542 0.001527 7 0.010840
53 1 8 10 7 5 ort_ 1000 10 69863 0.000422 0.000418 7 0.003022
54 1 10 10 7 5 base 1000 10 110504 0.001771 0.001779 7 0.013240
55 1 10 10 7 5 ort_ 1000 10 110504 0.000507 0.000500 7 0.003619


Plot#

n_rows = len(n_jobs)
n_cols = len(n_ests)


fig, axes = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 4 * n_rows))
fig.suptitle(f"{rf.__class__.__name__}\nX.shape={X.shape}")

for n_j, n_estimators in tqdm(product(n_jobs, n_ests)):
    i = n_jobs.index(n_j)
    j = n_ests.index(n_estimators)
    ax = axes[i, j]

    subdf = df[(df.n_estimators == n_estimators) & (df.n_jobs == n_j)]
    if subdf.shape[0] == 0:
        continue
    piv = subdf.pivot(index="max_depth", columns="name", values=["avg", "med"])
    piv.plot(ax=ax, title=f"jobs={n_j}, trees={n_estimators}")
    ax.set_ylabel(f"n_jobs={n_j}", fontsize="small")
    ax.set_xlabel("max_depth", fontsize="small")

    # ratio
    ax2 = ax.twinx()
    piv1 = subdf.pivot(index="max_depth", columns="name", values="avg")
    piv1["speedup"] = piv1.base / piv1.ort_
    ax2.plot(piv1.index, piv1.speedup, "b--", label="speedup avg")

    piv1 = subdf.pivot(index="max_depth", columns="name", values="med")
    piv1["speedup"] = piv1.base / piv1.ort_
    ax2.plot(piv1.index, piv1.speedup, "y--", label="speedup med")
    ax2.legend(fontsize="x-small")

    # 1
    ax2.plot(piv1.index, [1 for _ in piv1.index], "k--", label="no speedup")

for i in range(axes.shape[0]):
    for j in range(axes.shape[1]):
        axes[i, j].legend(fontsize="small")

fig.tight_layout()
fig.savefig(f"{name}-{legend}.png")
# plt.show()
RandomForestRegressor X.shape=(1000, 10), jobs=8, trees=10, jobs=8, trees=20, jobs=8, trees=30, jobs=4, trees=10, jobs=4, trees=20, jobs=4, trees=30, jobs=1, trees=10
0it [00:00, ?it/s]
1it [00:00,  3.24it/s]
4it [00:00, 11.16it/s]
7it [00:00, 15.23it/s]
9it [00:00, 16.06it/s]
2024-04-18 15:37:51,091 matplotlib.legend [WARNING] - No artists with labels found to put in legend.  Note that artists whose label start with an underscore are ignored when legend() is called with no argument.
2024-04-18 15:37:51,092 matplotlib.legend [WARNING] - No artists with labels found to put in legend.  Note that artists whose label start with an underscore are ignored when legend() is called with no argument.

Total running time of the script: (0 minutes 12.944 seconds)

Gallery generated by Sphinx-Gallery