Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 332a83ca

History | View | Annotate | Download (59.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
86
  """Fills an instance policy with defaults.
87

88
  """
89
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90
  ret_dict = {}
91
  for key in constants.IPOLICY_ISPECS:
92
    ret_dict[key] = FillDict(default_ipolicy[key],
93
                             custom_ipolicy.get(key, {}),
94
                             skip_keys=skip_keys)
95
  # list items
96
  for key in [constants.IPOLICY_DTS]:
97
    ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
98
  # other items which we know we can directly copy (immutables)
99
  for key in constants.IPOLICY_PARAMETERS:
100
    ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
101

    
102
  return ret_dict
103

    
104

    
105
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
106
  """Fills the disk parameter defaults.
107

108
  @see: L{FillDict} for parameters and return value
109

110
  """
111
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
112

    
113
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
114
                             skip_keys=skip_keys))
115
              for dt in constants.DISK_TEMPLATES)
116

    
117

    
118
def UpgradeGroupedParams(target, defaults):
119
  """Update all groups for the target parameter.
120

121
  @type target: dict of dicts
122
  @param target: {group: {parameter: value}}
123
  @type defaults: dict
124
  @param defaults: default parameter values
125

126
  """
127
  if target is None:
128
    target = {constants.PP_DEFAULT: defaults}
129
  else:
130
    for group in target:
131
      target[group] = FillDict(defaults, target[group])
132
  return target
133

    
134

    
135
def UpgradeBeParams(target):
136
  """Update the be parameters dict to the new format.
137

138
  @type target: dict
139
  @param target: "be" parameters dict
140

141
  """
142
  if constants.BE_MEMORY in target:
143
    memory = target[constants.BE_MEMORY]
144
    target[constants.BE_MAXMEM] = memory
145
    target[constants.BE_MINMEM] = memory
146
    del target[constants.BE_MEMORY]
147

    
148

    
149
def UpgradeDiskParams(diskparams):
150
  """Upgrade the disk parameters.
151

152
  @type diskparams: dict
153
  @param diskparams: disk parameters to upgrade
154
  @rtype: dict
155
  @return: the upgraded disk parameters dict
156

157
  """
158
  if not diskparams:
159
    result = {}
160
  else:
161
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
162

    
163
  return result
164

    
165

    
166
def UpgradeNDParams(ndparams):
167
  """Upgrade ndparams structure.
168

169
  @type ndparams: dict
170
  @param ndparams: disk parameters to upgrade
171
  @rtype: dict
172
  @return: the upgraded node parameters dict
173

174
  """
175
  if ndparams is None:
176
    ndparams = {}
177

    
178
  if (constants.ND_OOB_PROGRAM in ndparams and
179
      ndparams[constants.ND_OOB_PROGRAM] is None):
180
    # will be reset by the line below
181
    del ndparams[constants.ND_OOB_PROGRAM]
182
  return FillDict(constants.NDC_DEFAULTS, ndparams)
183

    
184

    
185
def MakeEmptyIPolicy():
186
  """Create empty IPolicy dictionary.
187

188
  """
189
  return dict([
190
    (constants.ISPECS_MIN, {}),
191
    (constants.ISPECS_MAX, {}),
192
    (constants.ISPECS_STD, {}),
193
    ])
194

    
195

    
196
class ConfigObject(outils.ValidatedSlots):
197
  """A generic config object.
198

199
  It has the following properties:
200

201
    - provides somewhat safe recursive unpickling and pickling for its classes
202
    - unset attributes which are defined in slots are always returned
203
      as None instead of raising an error
204

205
  Classes derived from this must always declare __slots__ (we use many
206
  config objects and the memory reduction is useful)
207

208
  """
209
  __slots__ = []
210

    
211
  def __getattr__(self, name):
212
    if name not in self.GetAllSlots():
213
      raise AttributeError("Invalid object attribute %s.%s" %
214
                           (type(self).__name__, name))
215
    return None
216

    
217
  def __setstate__(self, state):
218
    slots = self.GetAllSlots()
219
    for name in state:
220
      if name in slots:
221
        setattr(self, name, state[name])
222

    
223
  def Validate(self):
224
    """Validates the slots.
225

226
    """
227

    
228
  def ToDict(self):
229
    """Convert to a dict holding only standard python types.
230

231
    The generic routine just dumps all of this object's attributes in
232
    a dict. It does not work if the class has children who are
233
    ConfigObjects themselves (e.g. the nics list in an Instance), in
234
    which case the object should subclass the function in order to
235
    make sure all objects returned are only standard python types.
236

237
    """
238
    result = {}
239
    for name in self.GetAllSlots():
240
      value = getattr(self, name, None)
241
      if value is not None:
242
        result[name] = value
243
    return result
244

    
245
  __getstate__ = ToDict
246

    
247
  @classmethod
248
  def FromDict(cls, val):
249
    """Create an object from a dictionary.
250

251
    This generic routine takes a dict, instantiates a new instance of
252
    the given class, and sets attributes based on the dict content.
253

254
    As for `ToDict`, this does not work if the class has children
255
    who are ConfigObjects themselves (e.g. the nics list in an
256
    Instance), in which case the object should subclass the function
257
    and alter the objects.
258

259
    """
260
    if not isinstance(val, dict):
261
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
262
                                      " expected dict, got %s" % type(val))
263
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
264
    obj = cls(**val_str) # pylint: disable=W0142
265
    return obj
266

    
267
  def Copy(self):
268
    """Makes a deep copy of the current object and its children.
269

270
    """
271
    dict_form = self.ToDict()
272
    clone_obj = self.__class__.FromDict(dict_form)
273
    return clone_obj
274

    
275
  def __repr__(self):
276
    """Implement __repr__ for ConfigObjects."""
277
    return repr(self.ToDict())
278

    
279
  def UpgradeConfig(self):
280
    """Fill defaults for missing configuration values.
281

282
    This method will be called at configuration load time, and its
283
    implementation will be object dependent.
284

285
    """
286
    pass
287

    
288

    
289
class TaggableObject(ConfigObject):
290
  """An generic class supporting tags.
291

292
  """
293
  __slots__ = ["tags"]
294
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
295

    
296
  @classmethod
297
  def ValidateTag(cls, tag):
298
    """Check if a tag is valid.
299

300
    If the tag is invalid, an errors.TagError will be raised. The
301
    function has no return value.
302

303
    """
304
    if not isinstance(tag, basestring):
305
      raise errors.TagError("Invalid tag type (not a string)")
306
    if len(tag) > constants.MAX_TAG_LEN:
307
      raise errors.TagError("Tag too long (>%d characters)" %
308
                            constants.MAX_TAG_LEN)
309
    if not tag:
310
      raise errors.TagError("Tags cannot be empty")
311
    if not cls.VALID_TAG_RE.match(tag):
312
      raise errors.TagError("Tag contains invalid characters")
313

    
314
  def GetTags(self):
315
    """Return the tags list.
316

317
    """
318
    tags = getattr(self, "tags", None)
319
    if tags is None:
320
      tags = self.tags = set()
321
    return tags
322

    
323
  def AddTag(self, tag):
324
    """Add a new tag.
325

326
    """
327
    self.ValidateTag(tag)
328
    tags = self.GetTags()
329
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
330
      raise errors.TagError("Too many tags")
331
    self.GetTags().add(tag)
332

    
333
  def RemoveTag(self, tag):
334
    """Remove a tag.
335

336
    """
337
    self.ValidateTag(tag)
338
    tags = self.GetTags()
339
    try:
340
      tags.remove(tag)
341
    except KeyError:
342
      raise errors.TagError("Tag not found")
343

    
344
  def ToDict(self):
345
    """Taggable-object-specific conversion to standard python types.
346

347
    This replaces the tags set with a list.
348

349
    """
350
    bo = super(TaggableObject, self).ToDict()
351

    
352
    tags = bo.get("tags", None)
353
    if isinstance(tags, set):
354
      bo["tags"] = list(tags)
355
    return bo
356

    
357
  @classmethod
358
  def FromDict(cls, val):
359
    """Custom function for instances.
360

361
    """
362
    obj = super(TaggableObject, cls).FromDict(val)
363
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
364
      obj.tags = set(obj.tags)
365
    return obj
366

    
367

    
368
class MasterNetworkParameters(ConfigObject):
369
  """Network configuration parameters for the master
370

371
  @ivar name: master name
372
  @ivar ip: master IP
373
  @ivar netmask: master netmask
374
  @ivar netdev: master network device
375
  @ivar ip_family: master IP family
376

377
  """
378
  __slots__ = [
379
    "name",
380
    "ip",
381
    "netmask",
382
    "netdev",
383
    "ip_family",
384
    ]
385

    
386

    
387
class ConfigData(ConfigObject):
388
  """Top-level config object."""
389
  __slots__ = [
390
    "version",
391
    "cluster",
392
    "nodes",
393
    "nodegroups",
394
    "instances",
395
    "networks",
396
    "serial_no",
397
    ] + _TIMESTAMPS
398

    
399
  def ToDict(self):
400
    """Custom function for top-level config data.
401

402
    This just replaces the list of instances, nodes and the cluster
403
    with standard python types.
404

405
    """
406
    mydict = super(ConfigData, self).ToDict()
407
    mydict["cluster"] = mydict["cluster"].ToDict()
408
    for key in "nodes", "instances", "nodegroups", "networks":
409
      mydict[key] = outils.ContainerToDicts(mydict[key])
410

    
411
    return mydict
412

    
413
  @classmethod
414
  def FromDict(cls, val):
415
    """Custom function for top-level config data
416

417
    """
418
    obj = super(ConfigData, cls).FromDict(val)
419
    obj.cluster = Cluster.FromDict(obj.cluster)
420
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
421
    obj.instances = \
422
      outils.ContainerFromDicts(obj.instances, dict, Instance)
423
    obj.nodegroups = \
424
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
425
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
426
    return obj
427

    
428
  def HasAnyDiskOfType(self, dev_type):
429
    """Check if in there is at disk of the given type in the configuration.
430

431
    @type dev_type: L{constants.LDS_BLOCK}
432
    @param dev_type: the type to look for
433
    @rtype: boolean
434
    @return: boolean indicating if a disk of the given type was found or not
435

436
    """
437
    for instance in self.instances.values():
438
      for disk in instance.disks:
439
        if disk.IsBasedOnDiskType(dev_type):
440
          return True
441
    return False
442

    
443
  def UpgradeConfig(self):
444
    """Fill defaults for missing configuration values.
445

446
    """
447
    self.cluster.UpgradeConfig()
448
    for node in self.nodes.values():
449
      node.UpgradeConfig()
450
    for instance in self.instances.values():
451
      instance.UpgradeConfig()
452
    if self.nodegroups is None:
453
      self.nodegroups = {}
454
    for nodegroup in self.nodegroups.values():
455
      nodegroup.UpgradeConfig()
456
    if self.cluster.drbd_usermode_helper is None:
457
      # To decide if we set an helper let's check if at least one instance has
458
      # a DRBD disk. This does not cover all the possible scenarios but it
459
      # gives a good approximation.
460
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
461
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
462
    if self.networks is None:
463
      self.networks = {}
464
    for network in self.networks.values():
465
      network.UpgradeConfig()
466

    
467

    
468
class NIC(ConfigObject):
469
  """Config object representing a network card."""
470
  __slots__ = ["mac", "ip", "network", "nicparams", "netinfo"]
471

    
472
  @classmethod
473
  def CheckParameterSyntax(cls, nicparams):
474
    """Check the given parameters for validity.
475

476
    @type nicparams:  dict
477
    @param nicparams: dictionary with parameter names/value
478
    @raise errors.ConfigurationError: when a parameter is not valid
479

480
    """
481
    mode = nicparams[constants.NIC_MODE]
482
    if (mode not in constants.NIC_VALID_MODES and
483
        mode != constants.VALUE_AUTO):
484
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
485

    
486
    if (mode == constants.NIC_MODE_BRIDGED and
487
        not nicparams[constants.NIC_LINK]):
488
      raise errors.ConfigurationError("Missing bridged NIC link")
489

    
490

    
491
class Disk(ConfigObject):
492
  """Config object representing a block device."""
493
  __slots__ = ["dev_type", "logical_id", "physical_id",
494
               "children", "iv_name", "size", "mode", "params"]
495

    
496
  def CreateOnSecondary(self):
497
    """Test if this device needs to be created on a secondary node."""
498
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
499

    
500
  def AssembleOnSecondary(self):
501
    """Test if this device needs to be assembled on a secondary node."""
502
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
503

    
504
  def OpenOnSecondary(self):
505
    """Test if this device needs to be opened on a secondary node."""
506
    return self.dev_type in (constants.LD_LV,)
507

    
508
  def StaticDevPath(self):
509
    """Return the device path if this device type has a static one.
510

511
    Some devices (LVM for example) live always at the same /dev/ path,
512
    irrespective of their status. For such devices, we return this
513
    path, for others we return None.
514

515
    @warning: The path returned is not a normalized pathname; callers
516
        should check that it is a valid path.
517

518
    """
519
    if self.dev_type == constants.LD_LV:
520
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
521
    elif self.dev_type == constants.LD_BLOCKDEV:
522
      return self.logical_id[1]
523
    elif self.dev_type == constants.LD_RBD:
524
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
525
    return None
526

    
527
  def ChildrenNeeded(self):
528
    """Compute the needed number of children for activation.
529

530
    This method will return either -1 (all children) or a positive
531
    number denoting the minimum number of children needed for
532
    activation (only mirrored devices will usually return >=0).
533

534
    Currently, only DRBD8 supports diskless activation (therefore we
535
    return 0), for all other we keep the previous semantics and return
536
    -1.
537

538
    """
539
    if self.dev_type == constants.LD_DRBD8:
540
      return 0
541
    return -1
542

    
543
  def IsBasedOnDiskType(self, dev_type):
544
    """Check if the disk or its children are based on the given type.
545

546
    @type dev_type: L{constants.LDS_BLOCK}
547
    @param dev_type: the type to look for
548
    @rtype: boolean
549
    @return: boolean indicating if a device of the given type was found or not
550

551
    """
552
    if self.children:
553
      for child in self.children:
554
        if child.IsBasedOnDiskType(dev_type):
555
          return True
556
    return self.dev_type == dev_type
557

    
558
  def GetNodes(self, node):
559
    """This function returns the nodes this device lives on.
560

561
    Given the node on which the parent of the device lives on (or, in
562
    case of a top-level device, the primary node of the devices'
563
    instance), this function will return a list of nodes on which this
564
    devices needs to (or can) be assembled.
565

566
    """
567
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
568
                         constants.LD_BLOCKDEV, constants.LD_RBD,
569
                         constants.LD_EXT]:
570
      result = [node]
571
    elif self.dev_type in constants.LDS_DRBD:
572
      result = [self.logical_id[0], self.logical_id[1]]
573
      if node not in result:
574
        raise errors.ConfigurationError("DRBD device passed unknown node")
575
    else:
576
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
577
    return result
578

    
579
  def ComputeNodeTree(self, parent_node):
580
    """Compute the node/disk tree for this disk and its children.
581

582
    This method, given the node on which the parent disk lives, will
583
    return the list of all (node, disk) pairs which describe the disk
584
    tree in the most compact way. For example, a drbd/lvm stack
585
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
586
    which represents all the top-level devices on the nodes.
587

588
    """
589
    my_nodes = self.GetNodes(parent_node)
590
    result = [(node, self) for node in my_nodes]
591
    if not self.children:
592
      # leaf device
593
      return result
594
    for node in my_nodes:
595
      for child in self.children:
596
        child_result = child.ComputeNodeTree(node)
597
        if len(child_result) == 1:
598
          # child (and all its descendants) is simple, doesn't split
599
          # over multiple hosts, so we don't need to describe it, our
600
          # own entry for this node describes it completely
601
          continue
602
        else:
603
          # check if child nodes differ from my nodes; note that
604
          # subdisk can differ from the child itself, and be instead
605
          # one of its descendants
606
          for subnode, subdisk in child_result:
607
            if subnode not in my_nodes:
608
              result.append((subnode, subdisk))
609
            # otherwise child is under our own node, so we ignore this
610
            # entry (but probably the other results in the list will
611
            # be different)
612
    return result
613

    
614
  def ComputeGrowth(self, amount):
615
    """Compute the per-VG growth requirements.
616

617
    This only works for VG-based disks.
618

619
    @type amount: integer
620
    @param amount: the desired increase in (user-visible) disk space
621
    @rtype: dict
622
    @return: a dictionary of volume-groups and the required size
623

624
    """
625
    if self.dev_type == constants.LD_LV:
626
      return {self.logical_id[0]: amount}
627
    elif self.dev_type == constants.LD_DRBD8:
628
      if self.children:
629
        return self.children[0].ComputeGrowth(amount)
630
      else:
631
        return {}
632
    else:
633
      # Other disk types do not require VG space
634
      return {}
635

    
636
  def RecordGrow(self, amount):
637
    """Update the size of this disk after growth.
638

639
    This method recurses over the disks's children and updates their
640
    size correspondigly. The method needs to be kept in sync with the
641
    actual algorithms from bdev.
642

643
    """
644
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
645
                         constants.LD_RBD, constants.LD_EXT):
646
      self.size += amount
647
    elif self.dev_type == constants.LD_DRBD8:
648
      if self.children:
649
        self.children[0].RecordGrow(amount)
650
      self.size += amount
651
    else:
652
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
653
                                   " disk type %s" % self.dev_type)
654

    
655
  def Update(self, size=None, mode=None):
656
    """Apply changes to size and mode.
657

658
    """
659
    if self.dev_type == constants.LD_DRBD8:
660
      if self.children:
661
        self.children[0].Update(size=size, mode=mode)
662
    else:
663
      assert not self.children
664

    
665
    if size is not None:
666
      self.size = size
667
    if mode is not None:
668
      self.mode = mode
669

    
670
  def UnsetSize(self):
671
    """Sets recursively the size to zero for the disk and its children.
672

673
    """
674
    if self.children:
675
      for child in self.children:
676
        child.UnsetSize()
677
    self.size = 0
678

    
679
  def SetPhysicalID(self, target_node, nodes_ip):
680
    """Convert the logical ID to the physical ID.
681

682
    This is used only for drbd, which needs ip/port configuration.
683

684
    The routine descends down and updates its children also, because
685
    this helps when the only the top device is passed to the remote
686
    node.
687

688
    Arguments:
689
      - target_node: the node we wish to configure for
690
      - nodes_ip: a mapping of node name to ip
691

692
    The target_node must exist in in nodes_ip, and must be one of the
693
    nodes in the logical ID for each of the DRBD devices encountered
694
    in the disk tree.
695

696
    """
697
    if self.children:
698
      for child in self.children:
699
        child.SetPhysicalID(target_node, nodes_ip)
700

    
701
    if self.logical_id is None and self.physical_id is not None:
702
      return
703
    if self.dev_type in constants.LDS_DRBD:
704
      pnode, snode, port, pminor, sminor, secret = self.logical_id
705
      if target_node not in (pnode, snode):
706
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
707
                                        target_node)
708
      pnode_ip = nodes_ip.get(pnode, None)
709
      snode_ip = nodes_ip.get(snode, None)
710
      if pnode_ip is None or snode_ip is None:
711
        raise errors.ConfigurationError("Can't find primary or secondary node"
712
                                        " for %s" % str(self))
713
      p_data = (pnode_ip, port)
714
      s_data = (snode_ip, port)
715
      if pnode == target_node:
716
        self.physical_id = p_data + s_data + (pminor, secret)
717
      else: # it must be secondary, we tested above
718
        self.physical_id = s_data + p_data + (sminor, secret)
719
    else:
720
      self.physical_id = self.logical_id
721
    return
722

    
723
  def ToDict(self):
724
    """Disk-specific conversion to standard python types.
725

726
    This replaces the children lists of objects with lists of
727
    standard python types.
728

729
    """
730
    bo = super(Disk, self).ToDict()
731

    
732
    for attr in ("children",):
733
      alist = bo.get(attr, None)
734
      if alist:
735
        bo[attr] = outils.ContainerToDicts(alist)
736
    return bo
737

    
738
  @classmethod
739
  def FromDict(cls, val):
740
    """Custom function for Disks
741

742
    """
743
    obj = super(Disk, cls).FromDict(val)
744
    if obj.children:
745
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
746
    if obj.logical_id and isinstance(obj.logical_id, list):
747
      obj.logical_id = tuple(obj.logical_id)
748
    if obj.physical_id and isinstance(obj.physical_id, list):
749
      obj.physical_id = tuple(obj.physical_id)
750
    if obj.dev_type in constants.LDS_DRBD:
751
      # we need a tuple of length six here
752
      if len(obj.logical_id) < 6:
753
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
754
    return obj
755

    
756
  def __str__(self):
757
    """Custom str() formatter for disks.
758

759
    """
760
    if self.dev_type == constants.LD_LV:
761
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
762
    elif self.dev_type in constants.LDS_DRBD:
763
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
764
      val = "<DRBD8("
765
      if self.physical_id is None:
766
        phy = "unconfigured"
767
      else:
768
        phy = ("configured as %s:%s %s:%s" %
769
               (self.physical_id[0], self.physical_id[1],
770
                self.physical_id[2], self.physical_id[3]))
771

    
772
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
773
              (node_a, minor_a, node_b, minor_b, port, phy))
774
      if self.children and self.children.count(None) == 0:
775
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
776
      else:
777
        val += "no local storage"
778
    else:
779
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
780
             (self.dev_type, self.logical_id, self.physical_id, self.children))
781
    if self.iv_name is None:
782
      val += ", not visible"
783
    else:
784
      val += ", visible as /dev/%s" % self.iv_name
785
    if isinstance(self.size, int):
786
      val += ", size=%dm)>" % self.size
787
    else:
788
      val += ", size='%s')>" % (self.size,)
789
    return val
790

    
791
  def Verify(self):
792
    """Checks that this disk is correctly configured.
793

794
    """
795
    all_errors = []
796
    if self.mode not in constants.DISK_ACCESS_SET:
797
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
798
    return all_errors
799

    
800
  def UpgradeConfig(self):
801
    """Fill defaults for missing configuration values.
802

803
    """
804
    if self.children:
805
      for child in self.children:
806
        child.UpgradeConfig()
807

    
808
    # FIXME: Make this configurable in Ganeti 2.7
809
    self.params = {}
810
    # add here config upgrade for this disk
811

    
812
  @staticmethod
813
  def ComputeLDParams(disk_template, disk_params):
814
    """Computes Logical Disk parameters from Disk Template parameters.
815

816
    @type disk_template: string
817
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
818
    @type disk_params: dict
819
    @param disk_params: disk template parameters;
820
                        dict(template_name -> parameters
821
    @rtype: list(dict)
822
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
823
      contains the LD parameters of the node. The tree is flattened in-order.
824

825
    """
826
    if disk_template not in constants.DISK_TEMPLATES:
827
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
828

    
829
    assert disk_template in disk_params
830

    
831
    result = list()
832
    dt_params = disk_params[disk_template]
833
    if disk_template == constants.DT_DRBD8:
834
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
835
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
836
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
837
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
838
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
839
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
840
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
841
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
842
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
843
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
844
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
845
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
846
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
847
        }))
848

    
849
      # data LV
850
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
851
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
852
        }))
853

    
854
      # metadata LV
855
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
856
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
857
        }))
858

    
859
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
860
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
861

    
862
    elif disk_template == constants.DT_PLAIN:
863
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
864
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
865
        }))
866

    
867
    elif disk_template == constants.DT_BLOCK:
868
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
869

    
870
    elif disk_template == constants.DT_RBD:
871
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
872
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
873
        }))
874

    
875
    elif disk_template == constants.DT_EXT:
876
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
877

    
878
    return result
879

    
880

    
881
class InstancePolicy(ConfigObject):
882
  """Config object representing instance policy limits dictionary.
883

884

885
  Note that this object is not actually used in the config, it's just
886
  used as a placeholder for a few functions.
887

888
  """
889
  @classmethod
890
  def CheckParameterSyntax(cls, ipolicy, check_std):
891
    """ Check the instance policy for validity.
892

893
    """
894
    for param in constants.ISPECS_PARAMETERS:
895
      InstancePolicy.CheckISpecSyntax(ipolicy, param, check_std)
896
    if constants.IPOLICY_DTS in ipolicy:
897
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
898
    for key in constants.IPOLICY_PARAMETERS:
899
      if key in ipolicy:
900
        InstancePolicy.CheckParameter(key, ipolicy[key])
901
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
902
    if wrong_keys:
903
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
904
                                      utils.CommaJoin(wrong_keys))
905

    
906
  @classmethod
907
  def CheckISpecSyntax(cls, ipolicy, name, check_std):
908
    """Check the instance policy for validity on a given key.
909

910
    We check if the instance policy makes sense for a given key, that is
911
    if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
912

913
    @type ipolicy: dict
914
    @param ipolicy: dictionary with min, max, std specs
915
    @type name: string
916
    @param name: what are the limits for
917
    @type check_std: bool
918
    @param check_std: Whether to check std value or just assume compliance
919
    @raise errors.ConfigureError: when specs for given name are not valid
920

921
    """
922
    min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
923

    
924
    if check_std:
925
      std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
926
      std_msg = std_v
927
    else:
928
      std_v = min_v
929
      std_msg = "-"
930

    
931
    max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
932
    err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
933
           (name,
934
            ipolicy[constants.ISPECS_MIN].get(name, "-"),
935
            ipolicy[constants.ISPECS_MAX].get(name, "-"),
936
            std_msg))
937
    if min_v > std_v or std_v > max_v:
938
      raise errors.ConfigurationError(err)
939

    
940
  @classmethod
941
  def CheckDiskTemplates(cls, disk_templates):
942
    """Checks the disk templates for validity.
943

944
    """
945
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
946
    if wrong:
947
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
948
                                      utils.CommaJoin(wrong))
949

    
950
  @classmethod
951
  def CheckParameter(cls, key, value):
952
    """Checks a parameter.
953

954
    Currently we expect all parameters to be float values.
955

956
    """
957
    try:
958
      float(value)
959
    except (TypeError, ValueError), err:
960
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
961
                                      " '%s', error: %s" % (key, value, err))
962

    
963

    
964
class Instance(TaggableObject):
965
  """Config object representing an instance."""
966
  __slots__ = [
967
    "name",
968
    "primary_node",
969
    "os",
970
    "hypervisor",
971
    "hvparams",
972
    "beparams",
973
    "osparams",
974
    "admin_state",
975
    "nics",
976
    "disks",
977
    "disk_template",
978
    "network_port",
979
    "serial_no",
980
    ] + _TIMESTAMPS + _UUID
981

    
982
  def _ComputeSecondaryNodes(self):
983
    """Compute the list of secondary nodes.
984

985
    This is a simple wrapper over _ComputeAllNodes.
986

987
    """
988
    all_nodes = set(self._ComputeAllNodes())
989
    all_nodes.discard(self.primary_node)
990
    return tuple(all_nodes)
991

    
992
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
993
                             "List of names of secondary nodes")
994

    
995
  def _ComputeAllNodes(self):
996
    """Compute the list of all nodes.
997

998
    Since the data is already there (in the drbd disks), keeping it as
999
    a separate normal attribute is redundant and if not properly
1000
    synchronised can cause problems. Thus it's better to compute it
1001
    dynamically.
1002

1003
    """
1004
    def _Helper(nodes, device):
1005
      """Recursively computes nodes given a top device."""
1006
      if device.dev_type in constants.LDS_DRBD:
1007
        nodea, nodeb = device.logical_id[:2]
1008
        nodes.add(nodea)
1009
        nodes.add(nodeb)
1010
      if device.children:
1011
        for child in device.children:
1012
          _Helper(nodes, child)
1013

    
1014
    all_nodes = set()
1015
    all_nodes.add(self.primary_node)
1016
    for device in self.disks:
1017
      _Helper(all_nodes, device)
1018
    return tuple(all_nodes)
1019

    
1020
  all_nodes = property(_ComputeAllNodes, None, None,
1021
                       "List of names of all the nodes of the instance")
1022

    
1023
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1024
    """Provide a mapping of nodes to LVs this instance owns.
1025

1026
    This function figures out what logical volumes should belong on
1027
    which nodes, recursing through a device tree.
1028

1029
    @param lvmap: optional dictionary to receive the
1030
        'node' : ['lv', ...] data.
1031

1032
    @return: None if lvmap arg is given, otherwise, a dictionary of
1033
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1034
        volumeN is of the form "vg_name/lv_name", compatible with
1035
        GetVolumeList()
1036

1037
    """
1038
    if node is None:
1039
      node = self.primary_node
1040

    
1041
    if lvmap is None:
1042
      lvmap = {
1043
        node: [],
1044
        }
1045
      ret = lvmap
1046
    else:
1047
      if not node in lvmap:
1048
        lvmap[node] = []
1049
      ret = None
1050

    
1051
    if not devs:
1052
      devs = self.disks
1053

    
1054
    for dev in devs:
1055
      if dev.dev_type == constants.LD_LV:
1056
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1057

    
1058
      elif dev.dev_type in constants.LDS_DRBD:
1059
        if dev.children:
1060
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1061
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1062

    
1063
      elif dev.children:
1064
        self.MapLVsByNode(lvmap, dev.children, node)
1065

    
1066
    return ret
1067

    
1068
  def FindDisk(self, idx):
1069
    """Find a disk given having a specified index.
1070

1071
    This is just a wrapper that does validation of the index.
1072

1073
    @type idx: int
1074
    @param idx: the disk index
1075
    @rtype: L{Disk}
1076
    @return: the corresponding disk
1077
    @raise errors.OpPrereqError: when the given index is not valid
1078

1079
    """
1080
    try:
1081
      idx = int(idx)
1082
      return self.disks[idx]
1083
    except (TypeError, ValueError), err:
1084
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1085
                                 errors.ECODE_INVAL)
1086
    except IndexError:
1087
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1088
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1089
                                 errors.ECODE_INVAL)
1090

    
1091
  def ToDict(self):
1092
    """Instance-specific conversion to standard python types.
1093

1094
    This replaces the children lists of objects with lists of standard
1095
    python types.
1096

1097
    """
1098
    bo = super(Instance, self).ToDict()
1099

    
1100
    for attr in "nics", "disks":
1101
      alist = bo.get(attr, None)
1102
      if alist:
1103
        nlist = outils.ContainerToDicts(alist)
1104
      else:
1105
        nlist = []
1106
      bo[attr] = nlist
1107
    return bo
1108

    
1109
  @classmethod
1110
  def FromDict(cls, val):
1111
    """Custom function for instances.
1112

1113
    """
1114
    if "admin_state" not in val:
1115
      if val.get("admin_up", False):
1116
        val["admin_state"] = constants.ADMINST_UP
1117
      else:
1118
        val["admin_state"] = constants.ADMINST_DOWN
1119
    if "admin_up" in val:
1120
      del val["admin_up"]
1121
    obj = super(Instance, cls).FromDict(val)
1122
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1123
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1124
    return obj
1125

    
1126
  def UpgradeConfig(self):
1127
    """Fill defaults for missing configuration values.
1128

1129
    """
1130
    for nic in self.nics:
1131
      nic.UpgradeConfig()
1132
    for disk in self.disks:
1133
      disk.UpgradeConfig()
1134
    if self.hvparams:
1135
      for key in constants.HVC_GLOBALS:
1136
        try:
1137
          del self.hvparams[key]
1138
        except KeyError:
1139
          pass
1140
    if self.osparams is None:
1141
      self.osparams = {}
1142
    UpgradeBeParams(self.beparams)
1143

    
1144

    
1145
class OS(ConfigObject):
1146
  """Config object representing an operating system.
1147

1148
  @type supported_parameters: list
1149
  @ivar supported_parameters: a list of tuples, name and description,
1150
      containing the supported parameters by this OS
1151

1152
  @type VARIANT_DELIM: string
1153
  @cvar VARIANT_DELIM: the variant delimiter
1154

1155
  """
1156
  __slots__ = [
1157
    "name",
1158
    "path",
1159
    "api_versions",
1160
    "create_script",
1161
    "export_script",
1162
    "import_script",
1163
    "rename_script",
1164
    "verify_script",
1165
    "supported_variants",
1166
    "supported_parameters",
1167
    ]
1168

    
1169
  VARIANT_DELIM = "+"
1170

    
1171
  @classmethod
1172
  def SplitNameVariant(cls, name):
1173
    """Splits the name into the proper name and variant.
1174

1175
    @param name: the OS (unprocessed) name
1176
    @rtype: list
1177
    @return: a list of two elements; if the original name didn't
1178
        contain a variant, it's returned as an empty string
1179

1180
    """
1181
    nv = name.split(cls.VARIANT_DELIM, 1)
1182
    if len(nv) == 1:
1183
      nv.append("")
1184
    return nv
1185

    
1186
  @classmethod
1187
  def GetName(cls, name):
1188
    """Returns the proper name of the os (without the variant).
1189

1190
    @param name: the OS (unprocessed) name
1191

1192
    """
1193
    return cls.SplitNameVariant(name)[0]
1194

    
1195
  @classmethod
1196
  def GetVariant(cls, name):
1197
    """Returns the variant the os (without the base name).
1198

1199
    @param name: the OS (unprocessed) name
1200

1201
    """
1202
    return cls.SplitNameVariant(name)[1]
1203

    
1204

    
1205
class ExtStorage(ConfigObject):
1206
  """Config object representing an External Storage Provider.
1207

1208
  """
1209
  __slots__ = [
1210
    "name",
1211
    "path",
1212
    "create_script",
1213
    "remove_script",
1214
    "grow_script",
1215
    "attach_script",
1216
    "detach_script",
1217
    "setinfo_script",
1218
    "verify_script",
1219
    "supported_parameters",
1220
    ]
1221

    
1222

    
1223
class NodeHvState(ConfigObject):
1224
  """Hypvervisor state on a node.
1225

1226
  @ivar mem_total: Total amount of memory
1227
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1228
    available)
1229
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1230
    rounding
1231
  @ivar mem_inst: Memory used by instances living on node
1232
  @ivar cpu_total: Total node CPU core count
1233
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1234

1235
  """
1236
  __slots__ = [
1237
    "mem_total",
1238
    "mem_node",
1239
    "mem_hv",
1240
    "mem_inst",
1241
    "cpu_total",
1242
    "cpu_node",
1243
    ] + _TIMESTAMPS
1244

    
1245

    
1246
class NodeDiskState(ConfigObject):
1247
  """Disk state on a node.
1248

1249
  """
1250
  __slots__ = [
1251
    "total",
1252
    "reserved",
1253
    "overhead",
1254
    ] + _TIMESTAMPS
1255

    
1256

    
1257
class Node(TaggableObject):
1258
  """Config object representing a node.
1259

1260
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1261
  @ivar hv_state_static: Hypervisor state overriden by user
1262
  @ivar disk_state: Disk state (e.g. free space)
1263
  @ivar disk_state_static: Disk state overriden by user
1264

1265
  """
1266
  __slots__ = [
1267
    "name",
1268
    "primary_ip",
1269
    "secondary_ip",
1270
    "serial_no",
1271
    "master_candidate",
1272
    "offline",
1273
    "drained",
1274
    "group",
1275
    "master_capable",
1276
    "vm_capable",
1277
    "ndparams",
1278
    "powered",
1279
    "hv_state",
1280
    "hv_state_static",
1281
    "disk_state",
1282
    "disk_state_static",
1283
    ] + _TIMESTAMPS + _UUID
1284

    
1285
  def UpgradeConfig(self):
1286
    """Fill defaults for missing configuration values.
1287

1288
    """
1289
    # pylint: disable=E0203
1290
    # because these are "defined" via slots, not manually
1291
    if self.master_capable is None:
1292
      self.master_capable = True
1293

    
1294
    if self.vm_capable is None:
1295
      self.vm_capable = True
1296

    
1297
    if self.ndparams is None:
1298
      self.ndparams = {}
1299
    # And remove any global parameter
1300
    for key in constants.NDC_GLOBALS:
1301
      if key in self.ndparams:
1302
        logging.warning("Ignoring %s node parameter for node %s",
1303
                        key, self.name)
1304
        del self.ndparams[key]
1305

    
1306
    if self.powered is None:
1307
      self.powered = True
1308

    
1309
  def ToDict(self):
1310
    """Custom function for serializing.
1311

1312
    """
1313
    data = super(Node, self).ToDict()
1314

    
1315
    hv_state = data.get("hv_state", None)
1316
    if hv_state is not None:
1317
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1318

    
1319
    disk_state = data.get("disk_state", None)
1320
    if disk_state is not None:
1321
      data["disk_state"] = \
1322
        dict((key, outils.ContainerToDicts(value))
1323
             for (key, value) in disk_state.items())
1324

    
1325
    return data
1326

    
1327
  @classmethod
1328
  def FromDict(cls, val):
1329
    """Custom function for deserializing.
1330

1331
    """
1332
    obj = super(Node, cls).FromDict(val)
1333

    
1334
    if obj.hv_state is not None:
1335
      obj.hv_state = \
1336
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1337

    
1338
    if obj.disk_state is not None:
1339
      obj.disk_state = \
1340
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1341
             for (key, value) in obj.disk_state.items())
1342

    
1343
    return obj
1344

    
1345

    
1346
class NodeGroup(TaggableObject):
1347
  """Config object representing a node group."""
1348
  __slots__ = [
1349
    "name",
1350
    "members",
1351
    "ndparams",
1352
    "diskparams",
1353
    "ipolicy",
1354
    "serial_no",
1355
    "hv_state_static",
1356
    "disk_state_static",
1357
    "alloc_policy",
1358
    "networks",
1359
    ] + _TIMESTAMPS + _UUID
1360

    
1361
  def ToDict(self):
1362
    """Custom function for nodegroup.
1363

1364
    This discards the members object, which gets recalculated and is only kept
1365
    in memory.
1366

1367
    """
1368
    mydict = super(NodeGroup, self).ToDict()
1369
    del mydict["members"]
1370
    return mydict
1371

    
1372
  @classmethod
1373
  def FromDict(cls, val):
1374
    """Custom function for nodegroup.
1375

1376
    The members slot is initialized to an empty list, upon deserialization.
1377

1378
    """
1379
    obj = super(NodeGroup, cls).FromDict(val)
1380
    obj.members = []
1381
    return obj
1382

    
1383
  def UpgradeConfig(self):
1384
    """Fill defaults for missing configuration values.
1385

1386
    """
1387
    if self.ndparams is None:
1388
      self.ndparams = {}
1389

    
1390
    if self.serial_no is None:
1391
      self.serial_no = 1
1392

    
1393
    if self.alloc_policy is None:
1394
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1395

    
1396
    # We only update mtime, and not ctime, since we would not be able
1397
    # to provide a correct value for creation time.
1398
    if self.mtime is None:
1399
      self.mtime = time.time()
1400

    
1401
    if self.diskparams is None:
1402
      self.diskparams = {}
1403
    if self.ipolicy is None:
1404
      self.ipolicy = MakeEmptyIPolicy()
1405

    
1406
    if self.networks is None:
1407
      self.networks = {}
1408

    
1409
  def FillND(self, node):
1410
    """Return filled out ndparams for L{objects.Node}
1411

1412
    @type node: L{objects.Node}
1413
    @param node: A Node object to fill
1414
    @return a copy of the node's ndparams with defaults filled
1415

1416
    """
1417
    return self.SimpleFillND(node.ndparams)
1418

    
1419
  def SimpleFillND(self, ndparams):
1420
    """Fill a given ndparams dict with defaults.
1421

1422
    @type ndparams: dict
1423
    @param ndparams: the dict to fill
1424
    @rtype: dict
1425
    @return: a copy of the passed in ndparams with missing keys filled
1426
        from the node group defaults
1427

1428
    """
1429
    return FillDict(self.ndparams, ndparams)
1430

    
1431

    
1432
class Cluster(TaggableObject):
1433
  """Config object representing the cluster."""
1434
  __slots__ = [
1435
    "serial_no",
1436
    "rsahostkeypub",
1437
    "highest_used_port",
1438
    "tcpudp_port_pool",
1439
    "mac_prefix",
1440
    "volume_group_name",
1441
    "reserved_lvs",
1442
    "drbd_usermode_helper",
1443
    "default_bridge",
1444
    "default_hypervisor",
1445
    "master_node",
1446
    "master_ip",
1447
    "master_netdev",
1448
    "master_netmask",
1449
    "use_external_mip_script",
1450
    "cluster_name",
1451
    "file_storage_dir",
1452
    "shared_file_storage_dir",
1453
    "enabled_hypervisors",
1454
    "hvparams",
1455
    "ipolicy",
1456
    "os_hvp",
1457
    "beparams",
1458
    "osparams",
1459
    "nicparams",
1460
    "ndparams",
1461
    "diskparams",
1462
    "candidate_pool_size",
1463
    "modify_etc_hosts",
1464
    "modify_ssh_setup",
1465
    "maintain_node_health",
1466
    "uid_pool",
1467
    "default_iallocator",
1468
    "hidden_os",
1469
    "blacklisted_os",
1470
    "primary_ip_family",
1471
    "prealloc_wipe_disks",
1472
    "hv_state_static",
1473
    "disk_state_static",
1474
    ] + _TIMESTAMPS + _UUID
1475

    
1476
  def UpgradeConfig(self):
1477
    """Fill defaults for missing configuration values.
1478

1479
    """
1480
    # pylint: disable=E0203
1481
    # because these are "defined" via slots, not manually
1482
    if self.hvparams is None:
1483
      self.hvparams = constants.HVC_DEFAULTS
1484
    else:
1485
      for hypervisor in self.hvparams:
1486
        self.hvparams[hypervisor] = FillDict(
1487
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1488

    
1489
    if self.os_hvp is None:
1490
      self.os_hvp = {}
1491

    
1492
    # osparams added before 2.2
1493
    if self.osparams is None:
1494
      self.osparams = {}
1495

    
1496
    self.ndparams = UpgradeNDParams(self.ndparams)
1497

    
1498
    self.beparams = UpgradeGroupedParams(self.beparams,
1499
                                         constants.BEC_DEFAULTS)
1500
    for beparams_group in self.beparams:
1501
      UpgradeBeParams(self.beparams[beparams_group])
1502

    
1503
    migrate_default_bridge = not self.nicparams
1504
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1505
                                          constants.NICC_DEFAULTS)
1506
    if migrate_default_bridge:
1507
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1508
        self.default_bridge
1509

    
1510
    if self.modify_etc_hosts is None:
1511
      self.modify_etc_hosts = True
1512

    
1513
    if self.modify_ssh_setup is None:
1514
      self.modify_ssh_setup = True
1515

    
1516
    # default_bridge is no longer used in 2.1. The slot is left there to
1517
    # support auto-upgrading. It can be removed once we decide to deprecate
1518
    # upgrading straight from 2.0.
1519
    if self.default_bridge is not None:
1520
      self.default_bridge = None
1521

    
1522
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1523
    # code can be removed once upgrading straight from 2.0 is deprecated.
1524
    if self.default_hypervisor is not None:
1525
      self.enabled_hypervisors = ([self.default_hypervisor] +
1526
                                  [hvname for hvname in self.enabled_hypervisors
1527
                                   if hvname != self.default_hypervisor])
1528
      self.default_hypervisor = None
1529

    
1530
    # maintain_node_health added after 2.1.1
1531
    if self.maintain_node_health is None:
1532
      self.maintain_node_health = False
1533

    
1534
    if self.uid_pool is None:
1535
      self.uid_pool = []
1536

    
1537
    if self.default_iallocator is None:
1538
      self.default_iallocator = ""
1539

    
1540
    # reserved_lvs added before 2.2
1541
    if self.reserved_lvs is None:
1542
      self.reserved_lvs = []
1543

    
1544
    # hidden and blacklisted operating systems added before 2.2.1
1545
    if self.hidden_os is None:
1546
      self.hidden_os = []
1547

    
1548
    if self.blacklisted_os is None:
1549
      self.blacklisted_os = []
1550

    
1551
    # primary_ip_family added before 2.3
1552
    if self.primary_ip_family is None:
1553
      self.primary_ip_family = AF_INET
1554

    
1555
    if self.master_netmask is None:
1556
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1557
      self.master_netmask = ipcls.iplen
1558

    
1559
    if self.prealloc_wipe_disks is None:
1560
      self.prealloc_wipe_disks = False
1561

    
1562
    # shared_file_storage_dir added before 2.5
1563
    if self.shared_file_storage_dir is None:
1564
      self.shared_file_storage_dir = ""
1565

    
1566
    if self.use_external_mip_script is None:
1567
      self.use_external_mip_script = False
1568

    
1569
    if self.diskparams:
1570
      self.diskparams = UpgradeDiskParams(self.diskparams)
1571
    else:
1572
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1573

    
1574
    # instance policy added before 2.6
1575
    if self.ipolicy is None:
1576
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1577
    else:
1578
      # we can either make sure to upgrade the ipolicy always, or only
1579
      # do it in some corner cases (e.g. missing keys); note that this
1580
      # will break any removal of keys from the ipolicy dict
1581
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1582

    
1583
  @property
1584
  def primary_hypervisor(self):
1585
    """The first hypervisor is the primary.
1586

1587
    Useful, for example, for L{Node}'s hv/disk state.
1588

1589
    """
1590
    return self.enabled_hypervisors[0]
1591

    
1592
  def ToDict(self):
1593
    """Custom function for cluster.
1594

1595
    """
1596
    mydict = super(Cluster, self).ToDict()
1597

    
1598
    if self.tcpudp_port_pool is None:
1599
      tcpudp_port_pool = []
1600
    else:
1601
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1602

    
1603
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1604

    
1605
    return mydict
1606

    
1607
  @classmethod
1608
  def FromDict(cls, val):
1609
    """Custom function for cluster.
1610

1611
    """
1612
    obj = super(Cluster, cls).FromDict(val)
1613

    
1614
    if obj.tcpudp_port_pool is None:
1615
      obj.tcpudp_port_pool = set()
1616
    elif not isinstance(obj.tcpudp_port_pool, set):
1617
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1618

    
1619
    return obj
1620

    
1621
  def SimpleFillDP(self, diskparams):
1622
    """Fill a given diskparams dict with cluster defaults.
1623

1624
    @param diskparams: The diskparams
1625
    @return: The defaults dict
1626

1627
    """
1628
    return FillDiskParams(self.diskparams, diskparams)
1629

    
1630
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1631
    """Get the default hypervisor parameters for the cluster.
1632

1633
    @param hypervisor: the hypervisor name
1634
    @param os_name: if specified, we'll also update the defaults for this OS
1635
    @param skip_keys: if passed, list of keys not to use
1636
    @return: the defaults dict
1637

1638
    """
1639
    if skip_keys is None:
1640
      skip_keys = []
1641

    
1642
    fill_stack = [self.hvparams.get(hypervisor, {})]
1643
    if os_name is not None:
1644
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1645
      fill_stack.append(os_hvp)
1646

    
1647
    ret_dict = {}
1648
    for o_dict in fill_stack:
1649
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1650

    
1651
    return ret_dict
1652

    
1653
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1654
    """Fill a given hvparams dict with cluster defaults.
1655

1656
    @type hv_name: string
1657
    @param hv_name: the hypervisor to use
1658
    @type os_name: string
1659
    @param os_name: the OS to use for overriding the hypervisor defaults
1660
    @type skip_globals: boolean
1661
    @param skip_globals: if True, the global hypervisor parameters will
1662
        not be filled
1663
    @rtype: dict
1664
    @return: a copy of the given hvparams with missing keys filled from
1665
        the cluster defaults
1666

1667
    """
1668
    if skip_globals:
1669
      skip_keys = constants.HVC_GLOBALS
1670
    else:
1671
      skip_keys = []
1672

    
1673
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1674
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1675

    
1676
  def FillHV(self, instance, skip_globals=False):
1677
    """Fill an instance's hvparams dict with cluster defaults.
1678

1679
    @type instance: L{objects.Instance}
1680
    @param instance: the instance parameter to fill
1681
    @type skip_globals: boolean
1682
    @param skip_globals: if True, the global hypervisor parameters will
1683
        not be filled
1684
    @rtype: dict
1685
    @return: a copy of the instance's hvparams with missing keys filled from
1686
        the cluster defaults
1687

1688
    """
1689
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1690
                             instance.hvparams, skip_globals)
1691

    
1692
  def SimpleFillBE(self, beparams):
1693
    """Fill a given beparams dict with cluster defaults.
1694

1695
    @type beparams: dict
1696
    @param beparams: the dict to fill
1697
    @rtype: dict
1698
    @return: a copy of the passed in beparams with missing keys filled
1699
        from the cluster defaults
1700

1701
    """
1702
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1703

    
1704
  def FillBE(self, instance):
1705
    """Fill an instance's beparams dict with cluster defaults.
1706

1707
    @type instance: L{objects.Instance}
1708
    @param instance: the instance parameter to fill
1709
    @rtype: dict
1710
    @return: a copy of the instance's beparams with missing keys filled from
1711
        the cluster defaults
1712

1713
    """
1714
    return self.SimpleFillBE(instance.beparams)
1715

    
1716
  def SimpleFillNIC(self, nicparams):
1717
    """Fill a given nicparams dict with cluster defaults.
1718

1719
    @type nicparams: dict
1720
    @param nicparams: the dict to fill
1721
    @rtype: dict
1722
    @return: a copy of the passed in nicparams with missing keys filled
1723
        from the cluster defaults
1724

1725
    """
1726
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1727

    
1728
  def SimpleFillOS(self, os_name, os_params):
1729
    """Fill an instance's osparams dict with cluster defaults.
1730

1731
    @type os_name: string
1732
    @param os_name: the OS name to use
1733
    @type os_params: dict
1734
    @param os_params: the dict to fill with default values
1735
    @rtype: dict
1736
    @return: a copy of the instance's osparams with missing keys filled from
1737
        the cluster defaults
1738

1739
    """
1740
    name_only = os_name.split("+", 1)[0]
1741
    # base OS
1742
    result = self.osparams.get(name_only, {})
1743
    # OS with variant
1744
    result = FillDict(result, self.osparams.get(os_name, {}))
1745
    # specified params
1746
    return FillDict(result, os_params)
1747

    
1748
  @staticmethod
1749
  def SimpleFillHvState(hv_state):
1750
    """Fill an hv_state sub dict with cluster defaults.
1751

1752
    """
1753
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1754

    
1755
  @staticmethod
1756
  def SimpleFillDiskState(disk_state):
1757
    """Fill an disk_state sub dict with cluster defaults.
1758

1759
    """
1760
    return FillDict(constants.DS_DEFAULTS, disk_state)
1761

    
1762
  def FillND(self, node, nodegroup):
1763
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1764

1765
    @type node: L{objects.Node}
1766
    @param node: A Node object to fill
1767
    @type nodegroup: L{objects.NodeGroup}
1768
    @param nodegroup: A Node object to fill
1769
    @return a copy of the node's ndparams with defaults filled
1770

1771
    """
1772
    return self.SimpleFillND(nodegroup.FillND(node))
1773

    
1774
  def SimpleFillND(self, ndparams):
1775
    """Fill a given ndparams dict with defaults.
1776

1777
    @type ndparams: dict
1778
    @param ndparams: the dict to fill
1779
    @rtype: dict
1780
    @return: a copy of the passed in ndparams with missing keys filled
1781
        from the cluster defaults
1782

1783
    """
1784
    return FillDict(self.ndparams, ndparams)
1785

    
1786
  def SimpleFillIPolicy(self, ipolicy):
1787
    """ Fill instance policy dict with defaults.
1788

1789
    @type ipolicy: dict
1790
    @param ipolicy: the dict to fill
1791
    @rtype: dict
1792
    @return: a copy of passed ipolicy with missing keys filled from
1793
      the cluster defaults
1794

1795
    """
1796
    return FillIPolicy(self.ipolicy, ipolicy)
1797

    
1798

    
1799
class BlockDevStatus(ConfigObject):
1800
  """Config object representing the status of a block device."""
1801
  __slots__ = [
1802
    "dev_path",
1803
    "major",
1804
    "minor",
1805
    "sync_percent",
1806
    "estimated_time",
1807
    "is_degraded",
1808
    "ldisk_status",
1809
    ]
1810

    
1811

    
1812
class ImportExportStatus(ConfigObject):
1813
  """Config object representing the status of an import or export."""
1814
  __slots__ = [
1815
    "recent_output",
1816
    "listen_port",
1817
    "connected",
1818
    "progress_mbytes",
1819
    "progress_throughput",
1820
    "progress_eta",
1821
    "progress_percent",
1822
    "exit_status",
1823
    "error_message",
1824
    ] + _TIMESTAMPS
1825

    
1826

    
1827
class ImportExportOptions(ConfigObject):
1828
  """Options for import/export daemon
1829

1830
  @ivar key_name: X509 key name (None for cluster certificate)
1831
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1832
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1833
  @ivar magic: Used to ensure the connection goes to the right disk
1834
  @ivar ipv6: Whether to use IPv6
1835
  @ivar connect_timeout: Number of seconds for establishing connection
1836

1837
  """
1838
  __slots__ = [
1839
    "key_name",
1840
    "ca_pem",
1841
    "compress",
1842
    "magic",
1843
    "ipv6",
1844
    "connect_timeout",
1845
    ]
1846

    
1847

    
1848
class ConfdRequest(ConfigObject):
1849
  """Object holding a confd request.
1850

1851
  @ivar protocol: confd protocol version
1852
  @ivar type: confd query type
1853
  @ivar query: query request
1854
  @ivar rsalt: requested reply salt
1855

1856
  """
1857
  __slots__ = [
1858
    "protocol",
1859
    "type",
1860
    "query",
1861
    "rsalt",
1862
    ]
1863

    
1864

    
1865
class ConfdReply(ConfigObject):
1866
  """Object holding a confd reply.
1867

1868
  @ivar protocol: confd protocol version
1869
  @ivar status: reply status code (ok, error)
1870
  @ivar answer: confd query reply
1871
  @ivar serial: configuration serial number
1872

1873
  """
1874
  __slots__ = [
1875
    "protocol",
1876
    "status",
1877
    "answer",
1878
    "serial",
1879
    ]
1880

    
1881

    
1882
class QueryFieldDefinition(ConfigObject):
1883
  """Object holding a query field definition.
1884

1885
  @ivar name: Field name
1886
  @ivar title: Human-readable title
1887
  @ivar kind: Field type
1888
  @ivar doc: Human-readable description
1889

1890
  """
1891
  __slots__ = [
1892
    "name",
1893
    "title",
1894
    "kind",
1895
    "doc",
1896
    ]
1897

    
1898

    
1899
class _QueryResponseBase(ConfigObject):
1900
  __slots__ = [
1901
    "fields",
1902
    ]
1903

    
1904
  def ToDict(self):
1905
    """Custom function for serializing.
1906

1907
    """
1908
    mydict = super(_QueryResponseBase, self).ToDict()
1909
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
1910
    return mydict
1911

    
1912
  @classmethod
1913
  def FromDict(cls, val):
1914
    """Custom function for de-serializing.
1915

1916
    """
1917
    obj = super(_QueryResponseBase, cls).FromDict(val)
1918
    obj.fields = \
1919
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1920
    return obj
1921

    
1922

    
1923
class QueryResponse(_QueryResponseBase):
1924
  """Object holding the response to a query.
1925

1926
  @ivar fields: List of L{QueryFieldDefinition} objects
1927
  @ivar data: Requested data
1928

1929
  """
1930
  __slots__ = [
1931
    "data",
1932
    ]
1933

    
1934

    
1935
class QueryFieldsRequest(ConfigObject):
1936
  """Object holding a request for querying available fields.
1937

1938
  """
1939
  __slots__ = [
1940
    "what",
1941
    "fields",
1942
    ]
1943

    
1944

    
1945
class QueryFieldsResponse(_QueryResponseBase):
1946
  """Object holding the response to a query for fields.
1947

1948
  @ivar fields: List of L{QueryFieldDefinition} objects
1949

1950
  """
1951
  __slots__ = []
1952

    
1953

    
1954
class MigrationStatus(ConfigObject):
1955
  """Object holding the status of a migration.
1956

1957
  """
1958
  __slots__ = [
1959
    "status",
1960
    "transferred_ram",
1961
    "total_ram",
1962
    ]
1963

    
1964

    
1965
class InstanceConsole(ConfigObject):
1966
  """Object describing how to access the console of an instance.
1967

1968
  """
1969
  __slots__ = [
1970
    "instance",
1971
    "kind",
1972
    "message",
1973
    "host",
1974
    "port",
1975
    "user",
1976
    "command",
1977
    "display",
1978
    ]
1979

    
1980
  def Validate(self):
1981
    """Validates contents of this object.
1982

1983
    """
1984
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1985
    assert self.instance, "Missing instance name"
1986
    assert self.message or self.kind in [constants.CONS_SSH,
1987
                                         constants.CONS_SPICE,
1988
                                         constants.CONS_VNC]
1989
    assert self.host or self.kind == constants.CONS_MESSAGE
1990
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1991
                                      constants.CONS_SSH]
1992
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1993
                                      constants.CONS_SPICE,
1994
                                      constants.CONS_VNC]
1995
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1996
                                         constants.CONS_SPICE,
1997
                                         constants.CONS_VNC]
1998
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1999
                                         constants.CONS_SPICE,
2000
                                         constants.CONS_SSH]
2001
    return True
2002

    
2003

    
2004
class Network(TaggableObject):
2005
  """Object representing a network definition for ganeti.
2006

2007
  """
2008
  __slots__ = [
2009
    "name",
2010
    "serial_no",
2011
    "mac_prefix",
2012
    "network",
2013
    "network6",
2014
    "gateway",
2015
    "gateway6",
2016
    "reservations",
2017
    "ext_reservations",
2018
    ] + _TIMESTAMPS + _UUID
2019

    
2020
  def HooksDict(self, prefix=""):
2021
    """Export a dictionary used by hooks with a network's information.
2022

2023
    @type prefix: String
2024
    @param prefix: Prefix to prepend to the dict entries
2025

2026
    """
2027
    result = {
2028
      "%sNETWORK_NAME" % prefix: self.name,
2029
      "%sNETWORK_UUID" % prefix: self.uuid,
2030
      "%sNETWORK_TAGS" % prefix: " ".join(self.tags),
2031
    }
2032
    if self.network:
2033
      result["%sNETWORK_SUBNET" % prefix] = self.network
2034
    if self.gateway:
2035
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2036
    if self.network6:
2037
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2038
    if self.gateway6:
2039
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2040
    if self.mac_prefix:
2041
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2042

    
2043
    return result
2044

    
2045
  @classmethod
2046
  def FromDict(cls, val):
2047
    """Custom function for networks.
2048

2049
    Remove deprecated network_type and family.
2050

2051
    """
2052
    if "network_type" in val:
2053
      del val["network_type"]
2054
    if "family" in val:
2055
      del val["family"]
2056
    obj = super(Network, cls).FromDict(val)
2057
    return obj
2058

    
2059

    
2060
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2061
  """Simple wrapper over ConfigParse that allows serialization.
2062

2063
  This class is basically ConfigParser.SafeConfigParser with two
2064
  additional methods that allow it to serialize/unserialize to/from a
2065
  buffer.
2066

2067
  """
2068
  def Dumps(self):
2069
    """Dump this instance and return the string representation."""
2070
    buf = StringIO()
2071
    self.write(buf)
2072
    return buf.getvalue()
2073

    
2074
  @classmethod
2075
  def Loads(cls, data):
2076
    """Load data from a string."""
2077
    buf = StringIO(data)
2078
    cfp = cls()
2079
    cfp.readfp(buf)
2080
    return cfp
2081

    
2082

    
2083
class LvmPvInfo(ConfigObject):
2084
  """Information about an LVM physical volume (PV).
2085

2086
  @type name: string
2087
  @ivar name: name of the PV
2088
  @type vg_name: string
2089
  @ivar vg_name: name of the volume group containing the PV
2090
  @type size: float
2091
  @ivar size: size of the PV in MiB
2092
  @type free: float
2093
  @ivar free: free space in the PV, in MiB
2094
  @type attributes: string
2095
  @ivar attributes: PV attributes
2096
  @type lv_list: list of strings
2097
  @ivar lv_list: names of the LVs hosted on the PV
2098
  """
2099
  __slots__ = [
2100
    "name",
2101
    "vg_name",
2102
    "size",
2103
    "free",
2104
    "attributes",
2105
    "lv_list"
2106
    ]
2107

    
2108
  def IsEmpty(self):
2109
    """Is this PV empty?
2110

2111
    """
2112
    return self.size <= (self.free + 1)
2113

    
2114
  def IsAllocatable(self):
2115
    """Is this PV allocatable?
2116

2117
    """
2118
    return ("a" in self.attributes)