| |
@@ -222,6 +222,23 @@
|
| |
self.assertEqual(t_assigned, list(range(5)))
|
| |
self.assertEqual(h_used, list(range(5)))
|
| |
|
| |
+ def test_stop_at_capacity(self):
|
| |
+ # just one host
|
| |
+ host = self.mkhost(id=1, capacity=5.0)
|
| |
+ self.sched._get_hosts.return_value = [host]
|
| |
+ self.sched.get_hosts()
|
| |
+ # and more tasks than will fit
|
| |
+ self.sched.free_tasks = [self.mktask(task_id=n, weight=1.0) for n in range(10)]
|
| |
+
|
| |
+ self.sched.do_schedule()
|
| |
+
|
| |
+ # 5 tasks with weight=1.0 should fill up capacity
|
| |
+ # (overcommit only applies for a single task going over)
|
| |
+ self.assertEqual(len(self.assigns), 5)
|
| |
+ t_assigned = [t['task_id'] for t, h in self.assigns]
|
| |
+ t_assigned.sort()
|
| |
+ self.assertEqual(t_assigned, list(range(5)))
|
| |
+
|
| |
def test_active_tasks(self):
|
| |
self.context.opts['CapacityOvercommit'] = 1.0
|
| |
hosts = [self.mkhost(id=n, capacity=2.0) for n in range(5)]
|
| |
The scheduler was incorrectly calculating min_avail (the amount of needed capacity) for tasks, in two different ways.
In the first task loop, using min instead of max is very wrong and causes the scheduler to miscalculate demand. This won't necessarily lead to obvious problems, but mostly defeats the code's attempt to rank hosts based on demand.
In the second task loop, the calculation is less wrong, but can be negative for tasks with weight less than the overcommit setting. This treats overcommit as "extra capacity" rather than the intention, which was to allow a single task to overshoot the capacity.
The added unit test demonstrates the problem from the second loop, and will fail without this fix.
Fixes https://pagure.io/koji/issue/4359