From 2b3027729f9fbcd0e693171f15fd19061a8c5e39 Mon Sep 17 00:00:00 2001
From: Yifan Zhao <yifanz16@illinois.edu>
Date: Thu, 28 Jan 2021 20:32:17 -0600
Subject: [PATCH] Added ILP tuning and sample

---
 env.yaml                  |   2 +
 predtuner_exp/__init__.py |   0
 predtuner_exp/ilp.py      | 137 ++++++++++++++++++++++++++++++++++++++
 predtuner_exp/main.py     |  33 +++++++++
 4 files changed, 172 insertions(+)
 create mode 100644 predtuner_exp/__init__.py
 create mode 100644 predtuner_exp/ilp.py
 create mode 100644 predtuner_exp/main.py

diff --git a/env.yaml b/env.yaml
index 735790b..842652d 100644
--- a/env.yaml
+++ b/env.yaml
@@ -1,6 +1,7 @@
 name: predtuner
 channels:
   - pytorch
+  - gurobi
   - defaults
 dependencies:
   - matplotlib=3.3.2
@@ -13,6 +14,7 @@ dependencies:
   - pip=20.2.4
   - wheel=0.35.1
   - jsonpickle=1.5
+  - gurobi==9.1
   - pip:
       - argparse
       - opentuner==0.8.3  # Must be 0.8.3, they fixed an important bug
diff --git a/predtuner_exp/__init__.py b/predtuner_exp/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/predtuner_exp/ilp.py b/predtuner_exp/ilp.py
new file mode 100644
index 0000000..1b54706
--- /dev/null
+++ b/predtuner_exp/ilp.py
@@ -0,0 +1,137 @@
+import logging
+from typing import Dict, List, Tuple, cast
+
+import gurobipy as gp
+import numpy as np
+import pandas as pd
+from gurobipy import GRB
+from predtuner import TorchApp
+from predtuner.approxapp import Config
+from predtuner.modeledapp import ApproxModeledTuner, LinearPerfModel, QoSModelP2
+
+msg_logger = logging.getLogger(__name__)
+
+
+class TorchILPTuningApp(TorchApp):
+    def get_tuner(self) -> "ILPTuner":
+        return ILPTuner(self)
+
+
+class ILPTuner(ApproxModeledTuner):
+    def __init__(self, app: TorchILPTuningApp) -> None:
+        super().__init__(app)
+        self.baseline_qos, _ = self.app.empirical_measure_qos_perf({}, False)
+        models = self.app._name_to_model
+        try:
+            self.perf_model = cast(LinearPerfModel, models["perf_linear"])
+            self.perf_model._init()
+            self.cost_df = self.perf_model.cost_df
+            self.baseline_cost = self.perf_model.measure_perf({})
+        except (TypeError, KeyError) as e:
+            raise ValueError(
+                f'App "{app.name}" does not define linear performance model properly. Error: {e}'
+            )
+        try:
+            self.p2_model = cast(QoSModelP2, models["qos_p2"])
+            self.p2_model._init()
+            self.qos_drop_df = self.baseline_qos - self.p2_model.qos_df
+        except (TypeError, KeyError) as e:
+            raise ValueError(
+                f'App "{app.name}" does not define qos model p2 properly. Error: {e}'
+            )
+
+    def tune(
+        self,
+        qos_tuner_threshold: float,
+        n_solutions: int,
+        test_configs: bool = True,
+        **kwargs,
+    ) -> List[Config]:
+        for threshold in np.linspace(0.0, qos_tuner_threshold, n_solutions):
+            msg_logger.info(f"=> Optimizing for QoS = {threshold:.2f}")
+            config, is_optimal = self.tune_threshold(threshold)
+            if is_optimal:
+                msg_logger.info(f"  => Optimal solution!")
+            conf_speedup = self.baseline_cost / config.perf
+            msg_logger.info(
+                f"=> Speedup = {conf_speedup:.4f}, QoS drop = {config.qos:.3f}"
+            )
+            msg_logger.info(f"=> Knobs: {config.knobs}")
+            self.all_configs.append(config)
+        self.best_configs = self.kept_configs = self.all_configs
+        msg_logger.info(
+            "Tuning finished with %d configs in total.",
+            len(self.all_configs),
+        )
+        if test_configs:
+            msg_logger.info("Checking configurations on test inputs")
+            self.test_configs_(self.best_configs)
+        self._tuned = True
+        return self.best_configs
+
+    def tune_threshold(self, threshold: float) -> Tuple[Config, bool]:
+        # Create a new model
+        model = gp.Model("")
+        model.Params.OutputFlag = 0
+        # Create variables
+        knob_vars = {}
+        for op_name, knobs in self.app.op_knobs.items():
+            for knob in knobs:
+                knob_name = knob.name
+                if "_" in op_name or "_" in knob_name:
+                    raise ValueError(
+                        "Underscore in operator name or knob name is unsupported"
+                    )
+                knob_vars[op_name, knob_name] = model.addVar(
+                    name=f"C_{op_name}_{knob_name}", vtype=GRB.BINARY
+                )
+        # Make numpy array of vars, the holes are filled with 0
+        knob_vars = _pair_key_dict_to_df(knob_vars, 0.0)
+        model.update()  # Update so we can print expr of these variables
+        # _m denotes symbolic expression (with variables)
+        total_cost_m = (self.cost_df * knob_vars).sum().sum()
+        # Set objective
+        model.setObjective(total_cost_m, GRB.MINIMIZE)
+        # Add QoS constraint
+        total_qos_drop = (self.qos_drop_df * knob_vars).sum().sum()
+        model.addConstr(total_qos_drop <= threshold, "qos")
+        # Add single-knob constraint
+        for op, layer_vars in knob_vars.iterrows():
+            model.addConstr(layer_vars.sum() == 1, f"layer{op}")
+        # Optimize model
+        model.optimize()
+        # Get results
+        _, config = _get_knobs_from_vars(model.getVars())
+        cost = model.PoolObjVal
+        qos_drop = self.baseline_qos - self.p2_model.measure_qos(config)
+        return Config(qos_drop, cost, config), model.Status == 2
+
+
+def _pair_key_dict_to_df(value: Dict[tuple, object], null_default=None):
+    from collections import defaultdict
+
+    nested_dict = defaultdict(dict)
+    for (k1, k2), v in value.items():
+        nested_dict[k1][k2] = v
+    df = pd.DataFrame(nested_dict).T
+    df[df.isnull()] = null_default
+    return df
+
+
+def _get_knobs_from_vars(variables):
+    def unique_one(series):
+        nonzeros = series.index[series == 1]
+        assert len(nonzeros) == 1
+        return nonzeros[0]
+
+    def parse_var_name(var_name: str) -> Tuple[str, str]:
+        import re
+
+        match = re.match(r"C_([^_]+)_([^_]+)", var_name)
+        layer, knob = match.groups()
+        return layer, knob
+
+    var_values = {parse_var_name(v.varName): v.x for v in variables}
+    var_values = _pair_key_dict_to_df(var_values)
+    knobs = {layer: unique_one(var_values.loc[layer]) for layer in var_values.index}
+    return var_values, knobs
diff --git a/predtuner_exp/main.py b/predtuner_exp/main.py
new file mode 100644
index 0000000..e298303
--- /dev/null
+++ b/predtuner_exp/main.py
@@ -0,0 +1,33 @@
+import site
+from pathlib import Path
+
+import torch
+from torch.utils.data.dataloader import DataLoader
+from torch.utils.data.dataset import Subset
+
+site.addsitedir(Path(__file__).absolute().parent.parent.as_posix())
+from predtuner import accuracy, config_pylogger, get_knobs_from_file
+from predtuner.model_zoo import CIFAR, ResNet18
+
+from predtuner_exp.ilp import TorchILPTuningApp
+
+msg_logger = config_pylogger(output_dir="tuner_results/logs", verbose=True)
+tune_set = CIFAR.from_file(
+    "model_params/resnet18_cifar10/tune_input.bin",
+    "model_params/resnet18_cifar10/tune_labels.bin",
+)
+tune_loader = DataLoader(tune_set, batch_size=500)
+module = ResNet18()
+module.load_state_dict(torch.load("model_params/resnet18_cifar10.pth.tar"))
+app = TorchILPTuningApp(
+    "TestTorchApp",
+    module,
+    tune_loader,
+    tune_loader,
+    get_knobs_from_file(),
+    accuracy,
+    model_storage_folder="tuner_results/resnet18_cifar10",
+)
+tuner = app.get_tuner()
+tuner.tune(2.1, 30)
+tuner.dump_configs("tuner_results/test/configs.json")
-- 
GitLab