test_tuner.py 4.01 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import unittest
import sys
sys.path.insert(1, '.')
sys.path.insert(1, '..')
import cconfigspace as ccs
from math import sin

class TestTuner(unittest.TestCase):
  def create_tuning_problem(self):
    cs = ccs.ConfigurationSpace(name = "cspace")
    h1 = ccs.NumericalHyperparameter(lower = -5.0, upper = 5.0)
    h2 = ccs.NumericalHyperparameter(lower = -5.0, upper = 5.0)
    h3 = ccs.NumericalHyperparameter(lower = -5.0, upper = 5.0)
    cs.add_hyperparameters([h1, h2, h3])
    os = ccs.ObjectiveSpace(name = "ospace")
    v1 = ccs.NumericalHyperparameter(lower = float('-inf'), upper = float('inf'))
    v2 = ccs.NumericalHyperparameter(lower = float('-inf'), upper = float('inf'))
    os.add_hyperparameters([v1, v2])
    e1 = ccs.Variable(hyperparameter = v1)
    e2 = ccs.Variable(hyperparameter = v2)
    os.add_objectives( [e1, e2] )
    return (cs, os)

  def test_create_random(self):
    (cs, os) = self.create_tuning_problem()
    t = ccs.RandomTuner(name = "tuner", configuration_space = cs, objective_space = os)
    self.assertEqual("tuner", t.name)
    self.assertEqual(ccs.TUNER_RANDOM, t.type.value)
    func = lambda x, y, z: [(x-2)*(x-2), sin(z+y)]
    evals = [ccs.Evaluation(objective_space = os, configuration = c, values = func(*(c.values))) for c in t.ask(100)]
    t.tell(evals)
    hist = t.history
    self.assertEqual(100, len(hist))
    evals = [ccs.Evaluation(objective_space = os, configuration = c, values = func(*(c.values))) for c in t.ask(100)]
    t.tell(evals)
    hist = t.history
    self.assertEqual(200, len(hist))
    optims = t.optimums
    objs = [x.objective_values for x in optims]
    objs.sort(key = lambda x: x[0])
Brice Videau's avatar
Brice Videau committed
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
    # assert pareto front
    self.assertTrue(all(objs[i][1] >= objs[i+1][1] for i in range(len(objs)-1)))

  def test_user_defined(self):
    global history
    history = []
    global optimums
    optimums = []

    def delete(data):
      return None

    def ask(data, count):
      if count is None:
        return (None, 1)
      else:
        cs = ccs.ConfigurationSpace.from_handle(ccs.ccs_configuration_space(data.common_data.configuration_space))
        return (cs.samples(count), count)

    def tell(data, evaluations):
      global history
      global optimums
      history += evaluations
      for e in evaluations:
        discard = False
        new_optimums = []
        for o in optimums:
          if discard:
            new_optimums.append(o)
          else:
            c = e.cmp(o).value
            if c == ccs.EQUIVALENT or c == ccs.WORSE:
              discard = True
              new_optimums.append(o)
            elif c == ccs.NOT_COMPARABLE:
              new_optimums.append(o)
        if not discard:
          new_optimums.append(e)
        optimums = new_optimums
      return None

    def get_history(data):
      global history
      return history

    def get_optimums(data):
      global optimums
      return optimums

    (cs, os) = self.create_tuning_problem()
    t = ccs.UserDefinedTuner(name = "tuner", configuration_space = cs, objective_space = os, delete = delete, ask = ask, tell = tell, get_optimums = get_optimums, get_history = get_history)
    self.assertEqual("tuner", t.name)
    self.assertEqual(ccs.TUNER_USER_DEFINED, t.type.value)
    self.assertEqual(cs.handle.value, t.configuration_space.handle.value)
    self.assertEqual(os.handle.value, t.objective_space.handle.value)
    func = lambda x, y, z: [(x-2)*(x-2), sin(z+y)]
    evals = [ccs.Evaluation(objective_space = os, configuration = c, values = func(*(c.values))) for c in t.ask(100)]
    t.tell(evals)
    hist = t.history
    self.assertEqual(100, len(hist))
    evals = [ccs.Evaluation(objective_space = os, configuration = c, values = func(*(c.values))) for c in t.ask(100)]
    t.tell(evals)
    hist = t.history
    self.assertEqual(200, len(hist))
    optims = t.optimums
    objs = [x.objective_values for x in optims]
    objs.sort(key = lambda x: x[0])
    # assert pareto front
    self.assertTrue(all(objs[i][1] >= objs[i+1][1] for i in range(len(objs)-1)))

111
112
113
114

if __name__ == '__main__':
    unittest.main()