Statistics
| Branch: | Tag: | Revision:

root / test / py / cmdlib / group_unittest.py @ 916c0e6f

History | View | Annotate | Download (15.1 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2008, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Tests for LUGroup*
23

24
"""
25

    
26
import itertools
27

    
28
from ganeti import constants
29
from ganeti import opcodes
30
from ganeti import query
31

    
32
from testsupport import *
33

    
34
import testutils
35

    
36

    
37
class TestLUGroupAdd(CmdlibTestCase):
38
  def testAddExistingGroup(self):
39
    self.cfg.AddNewNodeGroup(name="existing_group")
40

    
41
    op = opcodes.OpGroupAdd(group_name="existing_group")
42
    self.ExecOpCodeExpectOpPrereqError(
43
      op, "Desired group name 'existing_group' already exists")
44

    
45
  def testAddNewGroup(self):
46
    op = opcodes.OpGroupAdd(group_name="new_group")
47

    
48
    self.ExecOpCode(op)
49

    
50
    self.mcpu.assertLogIsEmpty()
51

    
52
  def testAddNewGroupParams(self):
53
    ndparams = {constants.ND_EXCLUSIVE_STORAGE: True}
54
    hv_state = {constants.HT_FAKE: {constants.HVST_CPU_TOTAL: 8}}
55
    disk_state = {
56
      constants.DT_PLAIN: {
57
        "mock_vg": {constants.DS_DISK_TOTAL: 10}
58
      }
59
    }
60
    diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool"}}
61
    ipolicy = constants.IPOLICY_DEFAULTS
62
    op = opcodes.OpGroupAdd(group_name="new_group",
63
                            ndparams=ndparams,
64
                            hv_state=hv_state,
65
                            disk_state=disk_state,
66
                            diskparams=diskparams,
67
                            ipolicy=ipolicy)
68

    
69
    self.ExecOpCode(op)
70

    
71
    self.mcpu.assertLogIsEmpty()
72

    
73
  def testAddNewGroupInvalidDiskparams(self):
74
    diskparams = {constants.DT_RBD: {constants.LV_STRIPES: 1}}
75
    op = opcodes.OpGroupAdd(group_name="new_group",
76
                            diskparams=diskparams)
77

    
78
    self.ExecOpCodeExpectOpPrereqError(
79
      op, "Provided option keys not supported")
80

    
81
  def testAddNewGroupInvalidIPolic(self):
82
    ipolicy = {"invalid_key": "value"}
83
    op = opcodes.OpGroupAdd(group_name="new_group",
84
                            ipolicy=ipolicy)
85

    
86
    self.ExecOpCodeExpectOpPrereqError(op, "Invalid keys in ipolicy")
87

    
88

    
89
class TestLUGroupAssignNodes(CmdlibTestCase):
90
  def __init__(self, methodName='runTest'):
91
    super(TestLUGroupAssignNodes, self).__init__(methodName)
92

    
93
    self.op = opcodes.OpGroupAssignNodes(group_name="default",
94
                                         nodes=[])
95

    
96
  def testAssignSingleNode(self):
97
    node = self.cfg.AddNewNode()
98
    op = self.CopyOpCode(self.op, nodes=[node.name])
99

    
100
    self.ExecOpCode(op)
101

    
102
    self.mcpu.assertLogIsEmpty()
103

    
104
  def _BuildSplitInstanceSituation(self):
105
    node = self.cfg.AddNewNode()
106
    self.cfg.AddNewInstance(disk_template=constants.DT_DRBD8,
107
                            primary_node=self.master,
108
                            secondary_node=node)
109
    group = self.cfg.AddNewNodeGroup()
110

    
111
    return (node, group)
112

    
113
  def testSplitInstanceNoForce(self):
114
    (node, group) = self._BuildSplitInstanceSituation()
115
    op = opcodes.OpGroupAssignNodes(group_name=group.name,
116
                                    nodes=[node.name])
117

    
118
    self.ExecOpCodeExpectOpExecError(
119
      op, "instances get split by this change and --force was not given")
120

    
121
  def testSplitInstanceForce(self):
122
    (node, group) = self._BuildSplitInstanceSituation()
123

    
124
    node2 = self.cfg.AddNewNode(group=group)
125
    self.cfg.AddNewInstance(disk_template=constants.DT_DRBD8,
126
                            primary_node=self.master,
127
                            secondary_node=node2)
128

    
129
    op = opcodes.OpGroupAssignNodes(group_name=group.name,
130
                                    nodes=[node.name],
131
                                    force=True)
132

    
133
    self.ExecOpCode(op)
134

    
135
    self.mcpu.assertLogContainsRegex("will split the following instances")
136
    self.mcpu.assertLogContainsRegex(
137
      "instances continue to be split across groups")
138

    
139

    
140
  @withLockedLU
141
  def testCheckAssignmentForSplitInstances(self, lu):
142
    g1 = self.cfg.AddNewNodeGroup()
143
    g2 = self.cfg.AddNewNodeGroup()
144
    g3 = self.cfg.AddNewNodeGroup()
145

    
146
    for (n, g) in [("n1a", g1), ("n1b", g1), ("n2a", g2), ("n2b", g2),
147
                   ("n3a", g3), ("n3b", g3), ("n3c", g3)]:
148
      self.cfg.AddNewNode(uuid=n, group=g.uuid)
149

    
150
    for uuid, pnode, snode in [("inst1a", "n1a", "n1b"),
151
                               ("inst1b", "n1b", "n1a"),
152
                               ("inst2a", "n2a", "n2b"),
153
                               ("inst3a", "n3a", None),
154
                               ("inst3b", "n3b", "n1b"),
155
                               ("inst3c", "n3b", "n2b")]:
156
      dt = constants.DT_DISKLESS if snode is None else constants.DT_DRBD8
157
      self.cfg.AddNewInstance(uuid=uuid,
158
                              disk_template=dt,
159
                              primary_node=pnode,
160
                              secondary_node=snode)
161

    
162
    # Test first with the existing state.
163
    (new, prev) = lu.CheckAssignmentForSplitInstances(
164
      [], self.cfg.GetAllNodesInfo(), self.cfg.GetAllInstancesInfo())
165

    
166
    self.assertEqual([], new)
167
    self.assertEqual(set(["inst3b", "inst3c"]), set(prev))
168

    
169
    # And now some changes.
170
    (new, prev) = lu.CheckAssignmentForSplitInstances(
171
      [("n1b", g3.uuid)],
172
      self.cfg.GetAllNodesInfo(),
173
      self.cfg.GetAllInstancesInfo())
174

    
175
    self.assertEqual(set(["inst1a", "inst1b"]), set(new))
176
    self.assertEqual(set(["inst3c"]), set(prev))
177

    
178

    
179
class TestLUGroupQuery(CmdlibTestCase):
180
  def setUp(self):
181
    super(TestLUGroupQuery, self).setUp()
182
    self.fields = query._BuildGroupFields().keys()
183

    
184
  def testInvalidGroupName(self):
185
    op = opcodes.OpGroupQuery(names=["does_not_exist"],
186
                              output_fields=self.fields)
187

    
188
    self.ExecOpCodeExpectOpPrereqError(op, "Some groups do not exist")
189

    
190
  def testQueryAllGroups(self):
191
    op = opcodes.OpGroupQuery(output_fields=self.fields)
192

    
193
    self.ExecOpCode(op)
194

    
195
    self.mcpu.assertLogIsEmpty()
196

    
197
  def testQueryGroupsByNameAndUuid(self):
198
    group1 = self.cfg.AddNewNodeGroup()
199
    group2 = self.cfg.AddNewNodeGroup()
200

    
201
    node1 = self.cfg.AddNewNode(group=group1)
202
    node2 = self.cfg.AddNewNode(group=group1)
203
    self.cfg.AddNewInstance(disk_template=constants.DT_DRBD8,
204
                            primary_node=node1,
205
                            secondary_node=node2)
206
    self.cfg.AddNewInstance(primary_node=node2)
207

    
208
    op = opcodes.OpGroupQuery(names=[group1.name, group2.uuid],
209
                              output_fields=self.fields)
210

    
211
    self.ExecOpCode(op)
212

    
213
    self.mcpu.assertLogIsEmpty()
214

    
215

    
216
class TestLUGroupSetParams(CmdlibTestCase):
217
  def testNoModifications(self):
218
    op = opcodes.OpGroupSetParams(group_name=self.group.name)
219

    
220
    self.ExecOpCodeExpectOpPrereqError(op,
221
                                       "Please pass at least one modification")
222

    
223
  def testModifyingAll(self):
224
    ndparams = {constants.ND_EXCLUSIVE_STORAGE: True}
225
    hv_state = {constants.HT_FAKE: {constants.HVST_CPU_TOTAL: 8}}
226
    disk_state = {
227
      constants.DT_PLAIN: {
228
        "mock_vg": {constants.DS_DISK_TOTAL: 10}
229
      }
230
    }
231
    diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool"}}
232
    ipolicy = {constants.IPOLICY_DTS: [constants.DT_DRBD8]}
233
    op = opcodes.OpGroupSetParams(group_name=self.group.name,
234
                                  ndparams=ndparams,
235
                                  hv_state=hv_state,
236
                                  disk_state=disk_state,
237
                                  diskparams=diskparams,
238
                                  ipolicy=ipolicy)
239

    
240
    self.ExecOpCode(op)
241

    
242
    self.mcpu.assertLogIsEmpty()
243

    
244
  def testInvalidDiskparams(self):
245
    diskparams = {constants.DT_RBD: {constants.LV_STRIPES: 1}}
246
    op = opcodes.OpGroupSetParams(group_name=self.group.name,
247
                                  diskparams=diskparams)
248

    
249
    self.ExecOpCodeExpectOpPrereqError(
250
      op, "Provided option keys not supported")
251

    
252
  def testIPolicyNewViolations(self):
253
    self.cfg.AddNewInstance(beparams={constants.BE_VCPUS: 8})
254

    
255
    min_max = dict(constants.ISPECS_MINMAX_DEFAULTS)
256
    min_max[constants.ISPECS_MAX].update({constants.ISPEC_CPU_COUNT: 2})
257
    ipolicy = {constants.ISPECS_MINMAX: [min_max]}
258
    op = opcodes.OpGroupSetParams(group_name=self.group.name,
259
                                  ipolicy=ipolicy)
260

    
261
    self.ExecOpCode(op)
262

    
263
    self.assertLogContainsRegex(
264
      "After the ipolicy change the following instances violate them")
265

    
266

    
267
class TestLUGroupRemove(CmdlibTestCase):
268
  def testNonEmptyGroup(self):
269
    group = self.cfg.AddNewNodeGroup()
270
    self.cfg.AddNewNode(group=group)
271
    op = opcodes.OpGroupRemove(group_name=group.name)
272

    
273
    self.ExecOpCodeExpectOpPrereqError(op, "Group .* not empty")
274

    
275
  def testRemoveLastGroup(self):
276
    self.master.group = "invalid_group"
277
    op = opcodes.OpGroupRemove(group_name=self.group.name)
278

    
279
    self.ExecOpCodeExpectOpPrereqError(
280
      op, "Group .* is the only group, cannot be removed")
281

    
282
  def testRemoveGroup(self):
283
    group = self.cfg.AddNewNodeGroup()
284
    op = opcodes.OpGroupRemove(group_name=group.name)
285

    
286
    self.ExecOpCode(op)
287

    
288
    self.mcpu.assertLogIsEmpty()
289

    
290

    
291
class TestLUGroupRename(CmdlibTestCase):
292
  def testRenameToExistingName(self):
293
    group = self.cfg.AddNewNodeGroup()
294
    op = opcodes.OpGroupRename(group_name=group.name,
295
                               new_name=self.group.name)
296

    
297
    self.ExecOpCodeExpectOpPrereqError(
298
      op, "Desired new name .* clashes with existing node group")
299

    
300
  def testRename(self):
301
    group = self.cfg.AddNewNodeGroup()
302
    op = opcodes.OpGroupRename(group_name=group.name,
303
                               new_name="new_group_name")
304

    
305
    self.ExecOpCode(op)
306

    
307
    self.mcpu.assertLogIsEmpty()
308

    
309

    
310
class TestLUGroupEvacuate(CmdlibTestCase):
311
  def testEvacuateEmptyGroup(self):
312
    group = self.cfg.AddNewNodeGroup()
313
    op = opcodes.OpGroupEvacuate(group_name=group.name)
314

    
315
    self.iallocator_cls.return_value.result = ([], [], [])
316

    
317
    self.ExecOpCode(op)
318

    
319
  def testEvacuateOnlyGroup(self):
320
    op = opcodes.OpGroupEvacuate(group_name=self.group.name)
321

    
322
    self.ExecOpCodeExpectOpPrereqError(
323
      op, "There are no possible target groups")
324

    
325
  def testEvacuateWithTargetGroups(self):
326
    group = self.cfg.AddNewNodeGroup()
327
    self.cfg.AddNewNode(group=group)
328
    self.cfg.AddNewNode(group=group)
329

    
330
    target_group1 = self.cfg.AddNewNodeGroup()
331
    target_group2 = self.cfg.AddNewNodeGroup()
332
    op = opcodes.OpGroupEvacuate(group_name=group.name,
333
                                 target_groups=[target_group1.name,
334
                                                target_group2.name])
335

    
336
    self.iallocator_cls.return_value.result = ([], [], [])
337

    
338
    self.ExecOpCode(op)
339

    
340
  def testFailingIAllocator(self):
341
    group = self.cfg.AddNewNodeGroup()
342
    op = opcodes.OpGroupEvacuate(group_name=group.name)
343

    
344
    self.iallocator_cls.return_value.success = False
345

    
346
    self.ExecOpCodeExpectOpPrereqError(
347
      op, "Can't compute group evacuation using iallocator")
348

    
349

    
350
class TestLUGroupVerifyDisks(CmdlibTestCase):
351
  def testNoInstances(self):
352
    op = opcodes.OpGroupVerifyDisks(group_name=self.group.name)
353

    
354
    self.ExecOpCode(op)
355

    
356
    self.mcpu.assertLogIsEmpty()
357

    
358
  def testOfflineAndFailingNode(self):
359
    node = self.cfg.AddNewNode(offline=True)
360
    self.cfg.AddNewInstance(primary_node=node,
361
                            admin_state=constants.ADMINST_UP)
362
    self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP)
363
    self.rpc.call_lv_list.return_value = \
364
      self.RpcResultsBuilder() \
365
        .AddFailedNode(self.master) \
366
        .AddOfflineNode(node) \
367
        .Build()
368

    
369
    op = opcodes.OpGroupVerifyDisks(group_name=self.group.name)
370

    
371
    (nerrors, offline, missing) = self.ExecOpCode(op)
372

    
373
    self.assertEqual(1, len(nerrors))
374
    self.assertEqual(0, len(offline))
375
    self.assertEqual(2, len(missing))
376

    
377
  def testValidNodeResult(self):
378
    self.cfg.AddNewInstance(
379
      disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN),
380
             self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)
381
             ],
382
      admin_state=constants.ADMINST_UP)
383
    self.rpc.call_lv_list.return_value = \
384
      self.RpcResultsBuilder() \
385
        .AddSuccessfulNode(self.master, {
386
          "mockvg/mock_disk_1": (None, None, True),
387
          "mockvg/mock_disk_2": (None, None, False)
388
        }) \
389
        .Build()
390

    
391
    op = opcodes.OpGroupVerifyDisks(group_name=self.group.name)
392

    
393
    (nerrors, offline, missing) = self.ExecOpCode(op)
394

    
395
    self.assertEqual(0, len(nerrors))
396
    self.assertEqual(1, len(offline))
397
    self.assertEqual(0, len(missing))
398

    
399
  def testDrbdDisk(self):
400
    node1 = self.cfg.AddNewNode()
401
    node2 = self.cfg.AddNewNode()
402
    node3 = self.cfg.AddNewNode()
403
    node4 = self.cfg.AddNewNode()
404

    
405
    valid_disk = self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
406
                                     primary_node=node1,
407
                                     secondary_node=node2)
408
    broken_disk = self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
409
                                      primary_node=node1,
410
                                      secondary_node=node2)
411
    failing_node_disk = self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
412
                                            primary_node=node3,
413
                                            secondary_node=node4)
414

    
415
    self.cfg.AddNewInstance(disks=[valid_disk, broken_disk],
416
                            primary_node=node1,
417
                            admin_state=constants.ADMINST_UP)
418
    self.cfg.AddNewInstance(disks=[failing_node_disk],
419
                            primary_node=node3,
420
                            admin_state=constants.ADMINST_UP)
421

    
422
    lv_list_result = dict(("/".join(disk.logical_id), (None, None, True))
423
                          for disk in itertools.chain(valid_disk.children,
424
                                                      broken_disk.children))
425
    self.rpc.call_lv_list.return_value = \
426
      self.RpcResultsBuilder() \
427
        .AddSuccessfulNode(node1, lv_list_result) \
428
        .AddSuccessfulNode(node2, lv_list_result) \
429
        .AddFailedNode(node3) \
430
        .AddFailedNode(node4) \
431
        .Build()
432

    
433
    def GetDrbdNeedsActivationResult(node_uuid, *_):
434
      if node_uuid == node1.uuid:
435
        return self.RpcResultsBuilder() \
436
                 .CreateSuccessfulNodeResult(node1, [])
437
      elif node_uuid == node2.uuid:
438
        return self.RpcResultsBuilder() \
439
                 .CreateSuccessfulNodeResult(node2, [broken_disk.uuid])
440
      elif node_uuid == node3.uuid or node_uuid == node4.uuid:
441
        return self.RpcResultsBuilder() \
442
                 .CreateFailedNodeResult(node_uuid)
443

    
444
    self.rpc.call_drbd_needs_activation.side_effect = \
445
      GetDrbdNeedsActivationResult
446

    
447
    op = opcodes.OpGroupVerifyDisks(group_name=self.group.name)
448

    
449
    (nerrors, offline, missing) = self.ExecOpCode(op)
450

    
451
    self.assertEqual(2, len(nerrors))
452
    self.assertEqual(1, len(offline))
453
    self.assertEqual(1, len(missing))
454

    
455

    
456
if __name__ == "__main__":
457
  testutils.GanetiTestProgram()