diff --git a/.gitignore b/.gitignore index 5c26e73956b30a42dda16d719956d32ba4b9011b..280453d5fb32f342629f1888edfb18a308929d7a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ _build +.mypy* .Rhistory tmp .build diff --git a/.gitlab-ci.dhall b/.gitlab-ci.dhall index f3cdd684a0ce25a35cbecb80573d2975bf72ac99..0d77f6f6965b1de18d06d9b56c6edf79bad54c7b 100644 --- a/.gitlab-ci.dhall +++ b/.gitlab-ci.dhall @@ -29,7 +29,6 @@ in { stages = [ "source", "build", "test", "deploy" ] , nix/pynrm = mkNixB "pythonPackages.pynrm" , nix/libnrm = mkNixB "libnrm" , nix/stream = mkNixB "stream" - , notebooks = mkT "notebooks" , tests/kvm = mkT "tests-kvm" , tests/apps = mkT "app-tests" , tests/rapl = mkT "tests-rapl" ⫽ { tags = [ "chimera" ] } diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4173e2feeafac6a1f4ef00b214ef11677a9f22fa..3d07f28bbce345cecfa462e751ecbbe8961e0535 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -89,12 +89,6 @@ nixfmt: tags: - nix - kvm -notebooks: - script: "nix-shell -p gnumake --run 'make notebooks'" - stage: test - tags: - - nix - - kvm pynrm/black: script: "nix-shell -p gnumake --run 'make pynrm/black'" stage: source diff --git a/Makefile b/Makefile index 5959334132a5bdaa44b6460addd71f64b21e05fb..34c1651003cdc28a4370a44884008b95f892d8c9 100644 --- a/Makefile +++ b/Makefile @@ -38,16 +38,6 @@ pre-commit: hsnrm/pre-commit\ resource-propagation\ .gitlab-ci.yml -.PHONY: notebooks -notebooks: - @nix-shell --pure --run <<< bash ' - notebooks/batchnb.py notebooks/configuration.ipynb - jupyter nbconvert doc/notebooks/notebooks/configuration.ipynb --output-dir=doc/notebooks/notebooks - rm doc/notebooks/notebooks/configuration.ipynb - jupyter nbconvert notebooks/tutorial.ipynb --output-dir=doc/notebooks/notebooks - jupyter nbconvert notebooks/internal-control.ipynb --output-dir=doc/notebooks/notebooks - ' - app-tests: @nix-shell --pure -p \ 'with (import {}); stream.override { nrmSupport = true; }' \ diff --git a/doc/notebooks/notebooks/configuration.html b/doc/notebooks/notebooks/configuration.html deleted file mode 100644 index 476067ec8596d70bf981d66e99fd573386e5e29d..0000000000000000000000000000000000000000 --- a/doc/notebooks/notebooks/configuration.html +++ /dev/null @@ -1,13598 +0,0 @@ - - - - -configuration - - - - - - - - - - - - - - - - - - - - - - -
-
- -
-
-
-

NRM Configuration/Manifest guide

This notebook documents NRM's configuration and manifest format. The two next cells are for setup purposes.

- -
-
-
-
-
-
In [1]:
-
-
-
%%capture
-cd ..
-
- -
-
-
- -
-
-
-
In [2]:
-
-
-
import json
-import pprint
-
-pp = pprint.PrettyPrinter(indent=4)
-
- -
-
-
- -
-
-
-
-

Daemon configuration

-
-
-
-
-
-
-

nrmd's configuration can be defined in the json/yaml/Dhall formats. Admissible values are defined in file resources/configurationSchema.json, and alternatively available as a Dhall type in resources/types/Cfg.dhall. Schema files get large, so the next cells shows the Dhall Configuration type as a more readable alternative.

- -
-
-
-
-
-
In [3]:
-
-
-
%%script dhall resolve
-./hsnrm/resources/defaults/Cfg.dhall
-
- -
-
-
- -
-
- - -
- -
- - -
-
{ activeSensorFrequency =
-    { fromHz = 1.0 }
-, argo_nodeos_config =
-    "argo_nodeos_config"
-, argo_perf_wrapper =
-    "nrm-perfwrapper"
-, controlCfg =
-    < ControlCfg :
-        { hint :
-            < Full
-            | Only :
-                { only :
-                    List (List { actuatorID : Text, actuatorValue : Double })
-                }
-            >
-        , learnCfg :
-            < Contextual :
-                { contextual : { horizon : Integer } }
-            | Lagrange :
-                { lagrange : Double }
-            | Random :
-                { random : Optional Integer }
-            >
-        , minimumControlInterval :
-            { fromuS : Double }
-        , referenceMeasurementRoundInterval :
-            Integer
-        , speedThreshold :
-            Double
-        , staticPower :
-            { fromuW : Double }
-        }
-    | FixedCommand :
-        { fixedPower : { fromuW : Double } }
-    >.FixedCommand
-    { fixedPower = { fromuW = 2.5e8 } }
-, downstreamCfg =
-    { downstreamBindAddress = "ipc:///tmp/nrm-downstream-event" }
-, dummy =
-    True
-, hwloc =
-    "hwloc"
-, hwmonCfg =
-    { hwmonEnabled = True, hwmonPath = "/sys/class/hwmon" }
-, libnrmPath =
-    None Text
-, logfile =
-    "/tmp/nrm.log"
-, nodeos =
-    False
-, perf =
-    "perf"
-, pmpi_lib =
-    "pmpi_lib"
-, raplCfg =
-    Some
-    { raplActions =
-        [ { fromuW = 1.0e8 }, { fromuW = 2.0e8 } ]
-    , raplPath =
-        "/sys/devices/virtual/powercap/intel-rapl"
-    , referencePower =
-        { fromuW = 2.5e8 }
-    }
-, singularity =
-    False
-, slice_runtime =
-    < Dummy | Nodeos | Singularity >.Dummy
-, upstreamCfg =
-    { pubPort = +2345, rpcPort = +3456, upstreamBindAddress = "*" }
-, verbose =
-    < Debug | Error | Info >.Error
-}
-
-
-
- -
-
- -
-
-
-
-

Optional values are filled using defaults that can be found in resources/defaults/Cfg.json (also available in the Dhall format):

- -
-
-
-
-
-
In [4]:
-
-
-
%%bash
-cat ./hsnrm/resources/defaults/Cfg.json | jq
-
- -
-
-
- -
-
- - -
- -
- - -
-
{
-  "pmpi_lib": "pmpi_lib",
-  "verbose": "Error",
-  "logfile": "/tmp/nrm.log",
-  "singularity": false,
-  "argo_nodeos_config": "argo_nodeos_config",
-  "controlCfg": {
-    "fixedPower": {
-      "fromuW": 250000000
-    }
-  },
-  "upstreamCfg": {
-    "upstreamBindAddress": "*",
-    "rpcPort": 3456,
-    "pubPort": 2345
-  },
-  "libnrmPath": null,
-  "activeSensorFrequency": {
-    "fromHz": 1
-  },
-  "perf": "perf",
-  "argo_perf_wrapper": "nrm-perfwrapper",
-  "downstreamCfg": {
-    "downstreamBindAddress": "ipc:///tmp/nrm-downstream-event"
-  },
-  "nodeos": false,
-  "hwloc": "hwloc",
-  "raplCfg": {
-    "referencePower": {
-      "fromuW": 250000000
-    },
-    "raplActions": [
-      {
-        "fromuW": 100000000
-      },
-      {
-        "fromuW": 200000000
-      }
-    ],
-    "raplPath": "/sys/devices/virtual/powercap/intel-rapl"
-  },
-  "dummy": true,
-  "slice_runtime": "Dummy",
-  "hwmonCfg": {
-    "hwmonPath": "/sys/class/hwmon",
-    "hwmonEnabled": true
-  }
-}
-
-
-
- -
-
- -
-
-
-
-

Manifest configuration

-
-
-
-
-
-
-

Example manifest files are in resources/examples in JSON/YAML/Dhall format. For instance, the manifest file resources/examples/perfwrap.json enables enables performance monitoring:

- -
-
-
-
-
-
In [5]:
-
-
-
%%bash
-cat ./hsnrm/resources/examples/perfwrap.json | jq
-
- -
-
-
- -
-
- - -
- -
- - -
-
{
-  "image": null,
-  "hwbind": false,
-  "app": {
-    "scheduler": "FIFO",
-    "instrumentation": null,
-    "power": {
-      "slowdown": 1,
-      "profile": false,
-      "policy": "NoPowerPolicy"
-    },
-    "perfwrapper": {
-      "perfLimit": {
-        "fromOps": 100000
-      },
-      "perfFreq": {
-        "fromHz": 1
-      }
-    },
-    "slice": {
-      "cpus": 1,
-      "mems": 1
-    }
-  },
-  "name": "default"
-}
-
-
-
- -
-
- -
-
-
-
-

Manifest options are documented in schema file resources/manifestSchema.json. The next cell shows the corresponding Dhall type.

- -
-
-
-
-
-
In [6]:
-
-
-
%%script dhall resolve
-./hsnrm/resources/types/Manifest.dhall
-
- -
-
-
- -
-
- - -
- -
- - -
-
{ app :
-    { instrumentation :
-        Optional { ratelimit : { fromHz : Double } }
-    , perfwrapper :
-        < Perfwrapper :
-            { perfFreq :
-                { fromHz : Double }
-            , perfLimit :
-                { fromOps : Integer }
-            }
-        | PerfwrapperDisabled
-        >
-    , power :
-        { policy :
-            < Combined | DDCM | DVFS | NoPowerPolicy >
-        , profile :
-            Bool
-        , slowdown :
-            Integer
-        }
-    , scheduler :
-        < FIFO | HPC | Other : { _1 : Integer } >
-    , slice :
-        { cpus : Integer, mems : Integer }
-    }
-, hwbind :
-    Bool
-, image :
-    Optional
-    { binds : Optional (List Text), imagetype : < Docker | Sif >, path : Text }
-, name :
-    Text
-}
-
-
-
- -
-
- -
-
-
-
-

Under-specified manifests like the one in our workloads above (with missing optional fields from the schema) fill missing values with defaults, which are located in file resources/defaults/Manifest.json:

- -
-
-
-
-
-
In [7]:
-
-
-
%%bash
-cat hsnrm/resources/defaults/Manifest.json | jq
-
- -
-
-
- -
-
- - -
- -
- - -
-
{
-  "image": null,
-  "hwbind": false,
-  "app": {
-    "scheduler": "FIFO",
-    "instrumentation": null,
-    "power": {
-      "slowdown": 1,
-      "profile": false,
-      "policy": "NoPowerPolicy"
-    },
-    "perfwrapper": "PerfwrapperDisabled",
-    "slice": {
-      "cpus": 1,
-      "mems": 1
-    }
-  },
-  "name": "default"
-}
-
-
-
- -
-
- -
-
-
-
-

The dhall and dhall-to-json utilities are available as convenience in this environment should you need them. Dhall is useful as a configuration language in itself:

- -
-
-
-
-
-
In [8]:
-
-
-
%%script bash
-dhall-to-json <<< 'let Manifest = ./hsnrm/resources/types/Manifest.dhall 
-    let appendName = \(m: Manifest) -> m // {name = m.name ++ "-appended" }
-    in appendName ./hsnrm/resources/defaults/Manifest.dhall
-' | jq
-
- -
-
-
- -
-
- - -
- -
- - -
-
{
-  "image": null,
-  "hwbind": false,
-  "app": {
-    "scheduler": "FIFO",
-    "instrumentation": null,
-    "power": {
-      "slowdown": 1,
-      "profile": false,
-      "policy": "NoPowerPolicy"
-    },
-    "perfwrapper": "PerfwrapperDisabled",
-    "slice": {
-      "cpus": 1,
-      "mems": 1
-    }
-  },
-  "name": "default-appended"
-}
-
-
-
- -
-
- -
-
-
-
-

Remember that any json document is one little step away from being a Python dictionaryy:

- -
-
-
-
-
- - - - - - diff --git a/doc/notebooks/notebooks/internal-control.html b/doc/notebooks/notebooks/internal-control.html deleted file mode 100644 index 16fbe9c3c170f6650071385ceb44b3ca4dd3daa9..0000000000000000000000000000000000000000 --- a/doc/notebooks/notebooks/internal-control.html +++ /dev/null @@ -1,14257 +0,0 @@ - - - - -internal-control - - - - - - - - - - - - - - - - - - - - - - -
-
- -
-
-
-

Internal control experiments

This notebooks runs NRM with internal bandit NRM control enabled.

- -
-
-
-
-
-
-

For the application of the NRM model to resource management to one -computational job, the global resource optimization problem is the following:

-$$ -\begin{array}{l} - \min \quad e_{\text{total}} \\ - \text{s.t.} \quad t > \tau t_{\text{ref}} -\end{array} -$$

Where $e_{\text{total}}$ denotes the total energy spent by the system during -the lifetime of the job, whose duration is denoted by $t^T$. We denote by -$t_{\text{ref}}$ a reference measurement of the runtime of the job on an -unmanaged system. $\tau <1$ is a parameter controlling the amount of runtime -degradation allowed for the job.

-

The value of this global objective can be easily measured a-posteriori for a -computational job using power instrumentation techniques. Assuming both -workload and platform behavior to be deterministic, this objective is measured -using two runs of the system: A first run without resource management to -acquire $t_{\text{ref}}$, and one run with NRM enabled. In order for NRM's -round-based control strategy to address this problem, we need an online loss -value however. This loss is obtained using the following loose assumptions:

-
    -
  • The passive power consumption of the node is fixed and known. [1]

    -
  • -
  • The total power consumption in a given time period can be estimated as -the sum of the static node consumption over that period and the RAPL power -measurement over that period. [2]

    -
  • -
  • The impact of a choice of power-cap on the job's runtime can be -interpolated linearly from its impact on CPU counters. [3]

    -
  • -
-

Denoting as in the previous section the round counter by $0<r<T$, the known -passive static power consumption by $p_{\text{static}}$, the starting time of -the job by $t^0$ and the end time of round $r$ by $t^r$, we can write the total -energy expenditure of the job based on RAPL power measurements $p^r$ using -assumptions 1 and 2 as:

-$$ - e_{\text{total}} = \sum_{r=1}^{r=T} (p^r + p_{\text{static}}) (t^{r-1} - t^{r}) -$$

Using assumption 3 means that we can reasonably estimate the -change in job runtime incurred by the choice of power-cap in round $r$ by -evaluating $\frac{s^r_{\text{ref}}}{s^r}$. We use this as part of our proxy -cost in two ways. First, this quantity is used to evaluate breaching of the -constraint on $t$, and second, it is used to adjust for an expected increase in -the number of rounds due to the impact on job runtime. This gives rise to the -following value for the loss at round $r$:

-$$ - \ell^r = \mathbb{\huge 1}_{\left( \frac{s^r}{s^r_{\text{ref}}}>\tau \right)} - \left( \frac{s^r_{\text{ref}}}{s^r} \left( p^r + p_{\text{static}} \right) \right) -$$ -
-
-
-
-
-
In [1]:
-
-
-
cd ..
-
- -
-
-
- -
-
- - -
- -
- - -
-
/home/fre/workspace/hnrm
-
-
-
- -
-
- -
-
-
-
In [2]:
-
-
-
%%capture
-%%bash
-./shake.sh build # Zfor the daemon 
-./shake.sh client # for the upstream client
-./shake.sh pyclient # for the shared client library
-
- -
-
-
- -
-
-
-
In [3]:
-
-
-
import json
-
-daemonCfgs = {}
-
-
-for cap in [100, 150, 200]:
-    daemonCfgs["pcap" + str(cap)] = {
-        "controlCfg": {"fixedPower": {"fromuW": cap * 1000000}}
-    }
-daemonCfgs["controlOn"] = {
-    "controlCfg": {
-        "staticPower": {"fromuW": 200000000},
-        "referenceMeasurementRoundInterval": 10,
-        "learnCfg": {"lagrangeConstraint": 1},
-        "speedThreshold": 0.9,
-        "minimumControlInterval": {"fromuS": 1000000},
-    },
-    "verbose": "Debug",
-}
-
-
-def perfwrapped(cmd, args):
-    return [
-        {
-            "cmd": cmd,
-            "args": args,
-            "sliceID": "toto",
-            "manifest": {
-                "app": {
-                    "slice": {"cpus": 1, "mems": 1},
-                    "perfwrapper": {
-                        "perfLimit": {"fromOps": 100000},
-                        "perfFreq": {"fromHz": 1},
-                    },
-                },
-                "name": "perfwrap",
-            },
-        }
-    ]
-
-
-stream = perfwrapped("stream_c", [])
-lammps = perfwrapped(
-    "mpiexec",
-    ["-n", "24", "amg", "-problem", "2", "-n", "90", "90", "90", "-P", "2", "12", "1"],
-)
-
- -
-
-
- -
-
- - -
- -
- - - - - -
-
- -
- -
- -
-
- -
-
-
-
In [4]:
-
-
-
import nrm.tooling as nrm
-
-host = nrm.Local()
-
- -
-
-
- -
-
- - -
- -
- - - - - -
-
- -
- -
- -
-
- -
-
-
-
In [5]:
-
-
-
host.start_daemon(daemonCfgs["pcap100"])
-# assert host.check_daemon()
-print(host.get_cpd())
-
- -
-
-
- -
-
- - -
- -
- - -
-
connecting
-connected to tcp://localhost:2345
-Problem 
-    { sensors = Map 
-        [ 
-            ( SensorID { sensorID = "RaplKey (PackageID 0)" }
-            , Sensor 
-                { range = 0.0 ... 300.0
-                , maxFrequency = 3.0
-                } 
-            ) 
-        ]
-    , actuators = Map 
-        [ 
-            ( ActuatorID { actuatorID = "RaplKey (PackageID 0)" }
-            , Actuator 
-                { actions = 
-                    [ DiscreteDouble 100.0
-                    , DiscreteDouble 200.0
-                    ] 
-                }
-            ) 
-        ]
-    , objectives = []
-    , constraints = []
-    } 
-
-
-
- -
- -
- - - - - -
-
- -
- -
- -
-
- -
-
-
-
-

The next cell just stops the daemon cleanly.

- -
-
-
-
-
-
In [6]:
-
-
-
host.stop_daemon()
-assert host.check_daemon() == False
-
- -
-
-
- -
-
- - -
- -
- - - - - -
-
- -
- -
- -
-
- -
-
-
-
-

Helpers

For performing experiments:

- -
-
-
-
-
-
In [7]:
-
-
-
import time
-from collections import defaultdict
-
-
-def do_workload(host, daemonCfg, workload):
-    host.start_daemon(daemonCfg)
-    print("Starting the workload")
-    host.run_workload(workload)
-    history = defaultdict(list)
-    # print(host.get_state())
-    getCPD = True
-    try:
-        while host.check_daemon() and not host.workload_finished():
-            measurement_message = host.workload_recv()
-            msg = json.loads(measurement_message)
-            if "pubMeasurements" in msg:
-                if getCPD:
-                    getCPD = False
-                    time.sleep(3)
-                    cpd = host.get_cpd()
-                    print(cpd)
-                    cpd = dict(cpd)
-                    print("Sensor identifier list:")
-                    for sensorID in [sensor[0] for sensor in cpd["sensors"]]:
-                        print("- %s" % sensorID)
-                    print("Actuator identifier list:")
-                    for sensorID in [sensor[0] for sensor in cpd["actuators"]]:
-                        print("- %s" % sensorID)
-                content = msg["pubMeasurements"][1][0]
-                t = content["time"]
-                sensorID = content["sensorID"]
-                x = content["sensorValue"]
-                print(
-                    ".",
-                    end=""
-                    # "Measurement: originating at time %s for sensor %s of value %s"
-                    #% (content["time"], content["sensorID"], content["sensorValue"])
-                )
-                history["sensor-" + sensorID].append((t, x))
-            if "pubCPD" in msg:
-                print("R")
-            if "pubAction" in msg:
-                # print(host.get_state())
-                # print(msg)
-                t, contents, meta, controller = msg["pubAction"]
-                if "bandit" in controller.keys():
-                    for key in meta.keys():
-                        history["actionType"].append((t, key))
-                    if "referenceMeasurementDecision" in meta.keys():
-                        print("(ref)", end="")
-                    elif "initialDecision" in meta.keys():
-                        print("(init)", end="")
-                    elif "innerDecision" in meta.keys():
-                        print("(inner)", end="")
-                        counter = 0
-                        for value in meta["innerDecision"]["constraints"]:
-                            history["constraint-" + str(counter)].append(
-                                (t, value["fromConstraintValue"])
-                            )
-                            counter = counter + 1
-                        counter = 0
-                        for value in meta["innerDecision"]["objectives"]:
-                            history["objective-" + str(counter)].append(
-                                (t, value["fromObjectiveValue"])
-                            )
-                            counter = counter + 1
-                        history["loss"].append((t, meta["innerDecision"]["loss"]))
-                for (arm, (visits, stat)) in controller["armstats"]:
-                    history["armstat-" + str(arm)].append((t, stat))
-                    history["visits-" + str(arm)].append((t, visits))
-                for content in contents:
-                    actuatorID = content["actuatorID"] + "(action)"
-                    x = content["actuatorValue"]
-                    history[actuatorID].append((t, x))
-                    for arm in controller["bandit"]["lagrange"]["lagrangeConstraint"][
-                        "weights"
-                    ]:
-                        value = arm["action"][0]["actuatorValue"]
-                        history[str(value / 1000000) + "-probability"].append(
-                            (t, arm["probability"]["getProbability"])
-                        )
-                        history[str(value / 1000000) + "-cumulativeLoss"].append(
-                            (t, arm["cumulativeLoss"]["getCumulativeLoss"])
-                        )
-                # print(
-                # "Action: originating at time %s for actuator %s of value %s"
-                #% (t,actuatorID,x)
-                # )
-            host.check_daemon()
-    except:
-        return history
-    host.stop_daemon()
-    return history
-
- -
-
-
- -
-
- - -
- -
- - - - - -
-
- -
- -
- -
-
- -
-
-
-
-

experiments:

- -
-
-
-
-
-
In [8]:
-
-
-
results = {}
-for key, cfg in daemonCfgs.items():
-    results[key] = do_workload(host, cfg, stream)
-
- -
-
-
- -
-
- - -
- -
- - -
-
connecting
-connected to tcp://localhost:2345
-Starting the workload
-Problem 
-    { sensors = Map 
-        [ 
-            ( SensorID { sensorID = "DownstreamCmdKey (DownstreamCmdID 706df7b0-bce1-4384-8274-e62b5a6937c3)" }
-            , Sensor 
-                { range = 0.0 ... 8.614535308e9
-                , maxFrequency = 1.0
-                } 
-            ) 
-        , 
-            ( SensorID { sensorID = "RaplKey (PackageID 0)" }
-            , Sensor 
-                { range = 0.0 ... 300.0
-                , maxFrequency = 3.0
-                } 
-            ) 
-        ] 
-    , actuators = Map 
-        [ 
-            ( ActuatorID { actuatorID = "RaplKey (PackageID 0)" }
-            , Actuator 
-                { actions = 
-                    [ DiscreteDouble 100.0
-                    , DiscreteDouble 200.0
-                    ] 
-                }
-            ) 
-        ]
-    , objectives = []
-    , constraints = []
-    } 
-Sensor identifier list:
-- DownstreamCmdKey (DownstreamCmdID 706df7b0-bce1-4384-8274-e62b5a6937c3)
-- RaplKey (PackageID 0)
-Actuator identifier list:
-- RaplKey (PackageID 0)
-...................................................................................................................................................................................................................................................................................................................................................................................connecting
-connected to tcp://localhost:2345
-Starting the workload
-Problem 
-    { sensors = Map 
-        [ 
-            ( SensorID { sensorID = "DownstreamCmdKey (DownstreamCmdID 8ae8153d-532b-42a0-9c53-185ca78697f8)" }
-            , Sensor 
-                { range = 0.0 ... 1.0153094158e10
-                , maxFrequency = 1.0
-                } 
-            ) 
-        , 
-            ( SensorID { sensorID = "RaplKey (PackageID 0)" }
-            , Sensor 
-                { range = 0.0 ... 300.0
-                , maxFrequency = 3.0
-                } 
-            ) 
-        ] 
-    , actuators = Map 
-        [ 
-            ( ActuatorID { actuatorID = "RaplKey (PackageID 0)" }
-            , Actuator 
-                { actions = 
-                    [ DiscreteDouble 100.0
-                    , DiscreteDouble 200.0
-                    ] 
-                }
-            ) 
-        ]
-    , objectives = []
-    , constraints = []
-    } 
-Sensor identifier list:
-- DownstreamCmdKey (DownstreamCmdID 8ae8153d-532b-42a0-9c53-185ca78697f8)
-- RaplKey (PackageID 0)
-Actuator identifier list:
-- RaplKey (PackageID 0)
-...........................................connecting
-connected to tcp://localhost:2345
-Starting the workload
-Problem 
-    { sensors = Map 
-        [ 
-            ( SensorID { sensorID = "DownstreamCmdKey (DownstreamCmdID b4deb461-ff70-4db1-8302-d02facbd9a4d)" }
-            , Sensor 
-                { range = 0.0 ... 8.73804945e9
-                , maxFrequency = 1.0
-                } 
-            ) 
-        , 
-            ( SensorID { sensorID = "RaplKey (PackageID 0)" }
-            , Sensor 
-                { range = 0.0 ... 300.0
-                , maxFrequency = 3.0
-                } 
-            ) 
-        ] 
-    , actuators = Map 
-        [ 
-            ( ActuatorID { actuatorID = "RaplKey (PackageID 0)" }
-            , Actuator 
-                { actions = 
-                    [ DiscreteDouble 100.0
-                    , DiscreteDouble 200.0
-                    ] 
-                }
-            ) 
-        ]
-    , objectives = []
-    , constraints = []
-    } 
-Sensor identifier list:
-- DownstreamCmdKey (DownstreamCmdID b4deb461-ff70-4db1-8302-d02facbd9a4d)
-- RaplKey (PackageID 0)
-Actuator identifier list:
-- RaplKey (PackageID 0)
-.....................................................................................................................connecting
-connected to tcp://localhost:2345
-Starting the workload
-Problem 
-    { sensors = Map 
-        [ 
-            ( SensorID { sensorID = "DownstreamCmdKey (DownstreamCmdID aade1bd4-3402-4c1e-a5b3-3a9935302a27)" }
-            , Sensor 
-                { range = 0.0 ... 100000.0
-                , maxFrequency = 1.0
-                } 
-            ) 
-        , 
-            ( SensorID { sensorID = "RaplKey (PackageID 0)" }
-            , Sensor 
-                { range = 0.0 ... 300.0
-                , maxFrequency = 3.0
-                } 
-            ) 
-        ] 
-    , actuators = Map 
-        [ 
-            ( ActuatorID { actuatorID = "RaplKey (PackageID 0)" }
-            , Actuator 
-                { actions = 
-                    [ DiscreteDouble 100.0
-                    , DiscreteDouble 200.0
-                    ] 
-                }
-            ) 
-        ]
-    , objectives = []
-    , constraints = []
-    } 
-Sensor identifier list:
-- DownstreamCmdKey (DownstreamCmdID aade1bd4-3402-4c1e-a5b3-3a9935302a27)
-- RaplKey (PackageID 0)
-Actuator identifier list:
-- RaplKey (PackageID 0)
-........................................
-
-
- -
- -
- - - - - -
-
- -
- -
- -
-
- -
-
-
-
In [9]:
-
-
-
import pandas as pd
-import matplotlib.pyplot as plt
-import seaborn as sns
-import numpy as np
-import scipy.integrate as integrate
-from functools import reduce
-
-
-def history_to_dataframe(key, history):
-    name = key
-
-    def mkdf(columnName, measurements):
-        dataframe = pd.DataFrame(
-            data=[(pd.Timestamp(t, unit="us"), m) for t, m in measurements]
-        )
-        dataframe.columns = ["time", columnName]
-        return dataframe
-
-    data_frames = [
-        mkdf(columnName, measurements) for (columnName, measurements) in history.items()
-    ]
-    merged = reduce(
-        lambda left, right: pd.merge(left, right, on=["time"], how="outer"), data_frames
-    )
-    return merged.melt(id_vars=["time"]).assign(name=name)
-
-
-result_df = pd.concat(
-    [history_to_dataframe(key, history) for key, history in results.items()]
-)
-
- -
-
-
- -
-
- - -
- -
- - - - - -
-
- -
- -
- -
-
- -
-
-
-
-

Let's display the evolution of the proxy objective and the performance of these three strategies on the final objective.

- -
-
-
-
-
-
In [10]:
-
-
-
import pandas as pd
-import matplotlib.pyplot as plt
-import seaborn as sns
-
-
-def plot_history(history):
-    nplots = len(history.keys())
-    fig = plt.figure()
-    fig, (axes) = plt.subplots(nplots, 1, sharex=True)
-    fig.subplots_adjust(wspace=0.1)
-    fig.set_size_inches(17, 25 * nplots / 10, forward=True)
-
-    minTime = min(
-        [
-            min([pd.Timestamp(m[0], unit="us") for m in measurements])
-            for cname, measurements in history.items()
-        ]
-    )
-    maxTime = max(
-        [
-            max([pd.Timestamp(m[0], unit="us") for m in measurements])
-            for cname, measurements in history.items()
-        ]
-    )
-
-    plt.xlim(minTime, maxTime)
-
-    for ((columnName, measurements), ax) in zip(history.items(), axes):
-        ax.set_title(columnName)
-        dataframe = pd.DataFrame(
-            data=[(pd.Timestamp(t, unit="us"), m) for t, m in measurements]
-        )
-        dataframe.columns = ["time", "value"]
-        if dataframe.dtypes["value"] == "object":
-            sns.catplot(ax=ax, x="time", y="value", kind="swarm", data=dataframe)
-        else:
-            sns.lineplot(ax=ax, x="time", y="value",data=dataframe)
-    return (fig,minTime, maxTime)
-
- -
-
-
- -
-
- - -
- -
- - - - - -
-
- -
- -
- -
-
- -
-
-
-
In [11]:
-
-
-
for k, h in results.items():
-    plot_history(h)
-
- -
-
-
- -
-
- - -
- -
- - -
-
/nix/store/ws7algif7c1inwk7s6hvmml5rhsfca4w-python3.7-pandas-0.24.2/lib/python3.7/site-packages/pandas/plotting/_converter.py:129: FutureWarning: Using an implicitly registered datetime converter for a matplotlib plotting method. The converter was registered by pandas on import. Future versions of pandas will require you to explicitly register matplotlib converters.
-
-To register the converters:
-	>>> from pandas.plotting import register_matplotlib_converters
-	>>> register_matplotlib_converters()
-  warnings.warn(msg, FutureWarning)
-
-
-
- -
- -
- - - - -
-
<Figure size 432x288 with 0 Axes>
-
- -
- -
- -
- - - - -
- -
- -
- -
- -
- - - - -
-
<Figure size 432x288 with 0 Axes>
-
- -
- -
- -
- - - - -
- -
- -
- -
- -
- - - - -
-
<Figure size 432x288 with 0 Axes>
-
- -
- -
- -
- - - - -
- -
- -
- -
- -
- - - - -
-
<Figure size 432x288 with 0 Axes>
-
- -
- -
- -
- - - - -
- -
- -
- -
- -
- - - - - -
-
- -
- -
- -
-
- -
-
-
-
In [ ]:
-
-
-
 
-
- -
-
-
- -
-
-
- - - - - - diff --git a/doc/notebooks/notebooks/tutorial.html b/doc/notebooks/notebooks/tutorial.html deleted file mode 100644 index 9f100f060b03f4658fc9524795e9dce08b09418d..0000000000000000000000000000000000000000 --- a/doc/notebooks/notebooks/tutorial.html +++ /dev/null @@ -1,14460 +0,0 @@ - - - - -tutorial - - - - - - -