Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 5ae4945a

History | View | Annotate | Download (58.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import time
42
from cStringIO import StringIO
43

    
44
from ganeti import errors
45
from ganeti import constants
46
from ganeti import netutils
47
from ganeti import utils
48

    
49
from socket import AF_INET
50

    
51

    
52
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
53
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
54

    
55
_TIMESTAMPS = ["ctime", "mtime"]
56
_UUID = ["uuid"]
57

    
58

    
59
def FillDict(defaults_dict, custom_dict, skip_keys=None):
60
  """Basic function to apply settings on top a default dict.
61

62
  @type defaults_dict: dict
63
  @param defaults_dict: dictionary holding the default values
64
  @type custom_dict: dict
65
  @param custom_dict: dictionary holding customized value
66
  @type skip_keys: list
67
  @param skip_keys: which keys not to fill
68
  @rtype: dict
69
  @return: dict with the 'full' values
70

71
  """
72
  ret_dict = copy.deepcopy(defaults_dict)
73
  ret_dict.update(custom_dict)
74
  if skip_keys:
75
    for k in skip_keys:
76
      try:
77
        del ret_dict[k]
78
      except KeyError:
79
        pass
80
  return ret_dict
81

    
82

    
83
def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
84
  """Fills an instance policy with defaults.
85

86
  """
87
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
88
  ret_dict = {}
89
  for key in constants.IPOLICY_ISPECS:
90
    ret_dict[key] = FillDict(default_ipolicy[key],
91
                             custom_ipolicy.get(key, {}),
92
                             skip_keys=skip_keys)
93
  # list items
94
  for key in [constants.IPOLICY_DTS]:
95
    ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
96
  # other items which we know we can directly copy (immutables)
97
  for key in constants.IPOLICY_PARAMETERS:
98
    ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
99

    
100
  return ret_dict
101

    
102

    
103
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
104
  """Fills the disk parameter defaults.
105

106
  @see: L{FillDict} for parameters and return value
107

108
  """
109
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
110

    
111
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
112
                             skip_keys=skip_keys))
113
              for dt in constants.DISK_TEMPLATES)
114

    
115

    
116
def UpgradeGroupedParams(target, defaults):
117
  """Update all groups for the target parameter.
118

119
  @type target: dict of dicts
120
  @param target: {group: {parameter: value}}
121
  @type defaults: dict
122
  @param defaults: default parameter values
123

124
  """
125
  if target is None:
126
    target = {constants.PP_DEFAULT: defaults}
127
  else:
128
    for group in target:
129
      target[group] = FillDict(defaults, target[group])
130
  return target
131

    
132

    
133
def UpgradeBeParams(target):
134
  """Update the be parameters dict to the new format.
135

136
  @type target: dict
137
  @param target: "be" parameters dict
138

139
  """
140
  if constants.BE_MEMORY in target:
141
    memory = target[constants.BE_MEMORY]
142
    target[constants.BE_MAXMEM] = memory
143
    target[constants.BE_MINMEM] = memory
144
    del target[constants.BE_MEMORY]
145

    
146

    
147
def UpgradeDiskParams(diskparams):
148
  """Upgrade the disk parameters.
149

150
  @type diskparams: dict
151
  @param diskparams: disk parameters to upgrade
152
  @rtype: dict
153
  @return: the upgraded disk parameters dict
154

155
  """
156
  if not diskparams:
157
    result = {}
158
  else:
159
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
160

    
161
  return result
162

    
163

    
164
def UpgradeNDParams(ndparams):
165
  """Upgrade ndparams structure.
166

167
  @type ndparams: dict
168
  @param ndparams: disk parameters to upgrade
169
  @rtype: dict
170
  @return: the upgraded node parameters dict
171

172
  """
173
  if ndparams is None:
174
    ndparams = {}
175

    
176
  if (constants.ND_OOB_PROGRAM in ndparams and
177
      ndparams[constants.ND_OOB_PROGRAM] is None):
178
    # will be reset by the line below
179
    del ndparams[constants.ND_OOB_PROGRAM]
180
  return FillDict(constants.NDC_DEFAULTS, ndparams)
181

    
182

    
183
def MakeEmptyIPolicy():
184
  """Create empty IPolicy dictionary.
185

186
  """
187
  return dict([
188
    (constants.ISPECS_MIN, {}),
189
    (constants.ISPECS_MAX, {}),
190
    (constants.ISPECS_STD, {}),
191
    ])
192

    
193

    
194
class ConfigObject(object):
195
  """A generic config object.
196

197
  It has the following properties:
198

199
    - provides somewhat safe recursive unpickling and pickling for its classes
200
    - unset attributes which are defined in slots are always returned
201
      as None instead of raising an error
202

203
  Classes derived from this must always declare __slots__ (we use many
204
  config objects and the memory reduction is useful)
205

206
  """
207
  __slots__ = []
208

    
209
  def __init__(self, **kwargs):
210
    for k, v in kwargs.iteritems():
211
      setattr(self, k, v)
212

    
213
  def __getattr__(self, name):
214
    if name not in self._all_slots():
215
      raise AttributeError("Invalid object attribute %s.%s" %
216
                           (type(self).__name__, name))
217
    return None
218

    
219
  def __setstate__(self, state):
220
    slots = self._all_slots()
221
    for name in state:
222
      if name in slots:
223
        setattr(self, name, state[name])
224

    
225
  @classmethod
226
  def _all_slots(cls):
227
    """Compute the list of all declared slots for a class.
228

229
    """
230
    slots = []
231
    for parent in cls.__mro__:
232
      slots.extend(getattr(parent, "__slots__", []))
233
    return slots
234

    
235
  #: Public getter for the defined slots
236
  GetAllSlots = _all_slots
237

    
238
  def ToDict(self):
239
    """Convert to a dict holding only standard python types.
240

241
    The generic routine just dumps all of this object's attributes in
242
    a dict. It does not work if the class has children who are
243
    ConfigObjects themselves (e.g. the nics list in an Instance), in
244
    which case the object should subclass the function in order to
245
    make sure all objects returned are only standard python types.
246

247
    """
248
    result = {}
249
    for name in self._all_slots():
250
      value = getattr(self, name, None)
251
      if value is not None:
252
        result[name] = value
253
    return result
254

    
255
  __getstate__ = ToDict
256

    
257
  @classmethod
258
  def FromDict(cls, val):
259
    """Create an object from a dictionary.
260

261
    This generic routine takes a dict, instantiates a new instance of
262
    the given class, and sets attributes based on the dict content.
263

264
    As for `ToDict`, this does not work if the class has children
265
    who are ConfigObjects themselves (e.g. the nics list in an
266
    Instance), in which case the object should subclass the function
267
    and alter the objects.
268

269
    """
270
    if not isinstance(val, dict):
271
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
272
                                      " expected dict, got %s" % type(val))
273
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
274
    obj = cls(**val_str) # pylint: disable=W0142
275
    return obj
276

    
277
  @staticmethod
278
  def _ContainerToDicts(container):
279
    """Convert the elements of a container to standard python types.
280

281
    This method converts a container with elements derived from
282
    ConfigData to standard python types. If the container is a dict,
283
    we don't touch the keys, only the values.
284

285
    """
286
    if isinstance(container, dict):
287
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
288
    elif isinstance(container, (list, tuple, set, frozenset)):
289
      ret = [elem.ToDict() for elem in container]
290
    else:
291
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
292
                      type(container))
293
    return ret
294

    
295
  @staticmethod
296
  def _ContainerFromDicts(source, c_type, e_type):
297
    """Convert a container from standard python types.
298

299
    This method converts a container with standard python types to
300
    ConfigData objects. If the container is a dict, we don't touch the
301
    keys, only the values.
302

303
    """
304
    if not isinstance(c_type, type):
305
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
306
                      " not a type" % type(c_type))
307
    if source is None:
308
      source = c_type()
309
    if c_type is dict:
310
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
311
    elif c_type in (list, tuple, set, frozenset):
312
      ret = c_type([e_type.FromDict(elem) for elem in source])
313
    else:
314
      raise TypeError("Invalid container type %s passed to"
315
                      " _ContainerFromDicts" % c_type)
316
    return ret
317

    
318
  def Copy(self):
319
    """Makes a deep copy of the current object and its children.
320

321
    """
322
    dict_form = self.ToDict()
323
    clone_obj = self.__class__.FromDict(dict_form)
324
    return clone_obj
325

    
326
  def __repr__(self):
327
    """Implement __repr__ for ConfigObjects."""
328
    return repr(self.ToDict())
329

    
330
  def UpgradeConfig(self):
331
    """Fill defaults for missing configuration values.
332

333
    This method will be called at configuration load time, and its
334
    implementation will be object dependent.
335

336
    """
337
    pass
338

    
339

    
340
class TaggableObject(ConfigObject):
341
  """An generic class supporting tags.
342

343
  """
344
  __slots__ = ["tags"]
345
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
346

    
347
  @classmethod
348
  def ValidateTag(cls, tag):
349
    """Check if a tag is valid.
350

351
    If the tag is invalid, an errors.TagError will be raised. The
352
    function has no return value.
353

354
    """
355
    if not isinstance(tag, basestring):
356
      raise errors.TagError("Invalid tag type (not a string)")
357
    if len(tag) > constants.MAX_TAG_LEN:
358
      raise errors.TagError("Tag too long (>%d characters)" %
359
                            constants.MAX_TAG_LEN)
360
    if not tag:
361
      raise errors.TagError("Tags cannot be empty")
362
    if not cls.VALID_TAG_RE.match(tag):
363
      raise errors.TagError("Tag contains invalid characters")
364

    
365
  def GetTags(self):
366
    """Return the tags list.
367

368
    """
369
    tags = getattr(self, "tags", None)
370
    if tags is None:
371
      tags = self.tags = set()
372
    return tags
373

    
374
  def AddTag(self, tag):
375
    """Add a new tag.
376

377
    """
378
    self.ValidateTag(tag)
379
    tags = self.GetTags()
380
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
381
      raise errors.TagError("Too many tags")
382
    self.GetTags().add(tag)
383

    
384
  def RemoveTag(self, tag):
385
    """Remove a tag.
386

387
    """
388
    self.ValidateTag(tag)
389
    tags = self.GetTags()
390
    try:
391
      tags.remove(tag)
392
    except KeyError:
393
      raise errors.TagError("Tag not found")
394

    
395
  def ToDict(self):
396
    """Taggable-object-specific conversion to standard python types.
397

398
    This replaces the tags set with a list.
399

400
    """
401
    bo = super(TaggableObject, self).ToDict()
402

    
403
    tags = bo.get("tags", None)
404
    if isinstance(tags, set):
405
      bo["tags"] = list(tags)
406
    return bo
407

    
408
  @classmethod
409
  def FromDict(cls, val):
410
    """Custom function for instances.
411

412
    """
413
    obj = super(TaggableObject, cls).FromDict(val)
414
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
415
      obj.tags = set(obj.tags)
416
    return obj
417

    
418

    
419
class MasterNetworkParameters(ConfigObject):
420
  """Network configuration parameters for the master
421

422
  @ivar name: master name
423
  @ivar ip: master IP
424
  @ivar netmask: master netmask
425
  @ivar netdev: master network device
426
  @ivar ip_family: master IP family
427

428
  """
429
  __slots__ = [
430
    "name",
431
    "ip",
432
    "netmask",
433
    "netdev",
434
    "ip_family"
435
    ]
436

    
437

    
438
class ConfigData(ConfigObject):
439
  """Top-level config object."""
440
  __slots__ = [
441
    "version",
442
    "cluster",
443
    "nodes",
444
    "nodegroups",
445
    "instances",
446
    "serial_no",
447
    ] + _TIMESTAMPS
448

    
449
  def ToDict(self):
450
    """Custom function for top-level config data.
451

452
    This just replaces the list of instances, nodes and the cluster
453
    with standard python types.
454

455
    """
456
    mydict = super(ConfigData, self).ToDict()
457
    mydict["cluster"] = mydict["cluster"].ToDict()
458
    for key in "nodes", "instances", "nodegroups":
459
      mydict[key] = self._ContainerToDicts(mydict[key])
460

    
461
    return mydict
462

    
463
  @classmethod
464
  def FromDict(cls, val):
465
    """Custom function for top-level config data
466

467
    """
468
    obj = super(ConfigData, cls).FromDict(val)
469
    obj.cluster = Cluster.FromDict(obj.cluster)
470
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
471
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
472
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
473
    return obj
474

    
475
  def HasAnyDiskOfType(self, dev_type):
476
    """Check if in there is at disk of the given type in the configuration.
477

478
    @type dev_type: L{constants.LDS_BLOCK}
479
    @param dev_type: the type to look for
480
    @rtype: boolean
481
    @return: boolean indicating if a disk of the given type was found or not
482

483
    """
484
    for instance in self.instances.values():
485
      for disk in instance.disks:
486
        if disk.IsBasedOnDiskType(dev_type):
487
          return True
488
    return False
489

    
490
  def UpgradeConfig(self):
491
    """Fill defaults for missing configuration values.
492

493
    """
494
    self.cluster.UpgradeConfig()
495
    for node in self.nodes.values():
496
      node.UpgradeConfig()
497
    for instance in self.instances.values():
498
      instance.UpgradeConfig()
499
    if self.nodegroups is None:
500
      self.nodegroups = {}
501
    for nodegroup in self.nodegroups.values():
502
      nodegroup.UpgradeConfig()
503
    if self.cluster.drbd_usermode_helper is None:
504
      # To decide if we set an helper let's check if at least one instance has
505
      # a DRBD disk. This does not cover all the possible scenarios but it
506
      # gives a good approximation.
507
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
508
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
509

    
510

    
511
class NIC(ConfigObject):
512
  """Config object representing a network card."""
513
  __slots__ = ["mac", "ip", "nicparams"]
514

    
515
  @classmethod
516
  def CheckParameterSyntax(cls, nicparams):
517
    """Check the given parameters for validity.
518

519
    @type nicparams:  dict
520
    @param nicparams: dictionary with parameter names/value
521
    @raise errors.ConfigurationError: when a parameter is not valid
522

523
    """
524
    if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
525
        nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
526
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
527
      raise errors.ConfigurationError(err)
528

    
529
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
530
        not nicparams[constants.NIC_LINK]):
531
      err = "Missing bridged nic link"
532
      raise errors.ConfigurationError(err)
533

    
534

    
535
class Disk(ConfigObject):
536
  """Config object representing a block device."""
537
  __slots__ = ["dev_type", "logical_id", "physical_id",
538
               "children", "iv_name", "size", "mode", "params"]
539

    
540
  def CreateOnSecondary(self):
541
    """Test if this device needs to be created on a secondary node."""
542
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
543

    
544
  def AssembleOnSecondary(self):
545
    """Test if this device needs to be assembled on a secondary node."""
546
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
547

    
548
  def OpenOnSecondary(self):
549
    """Test if this device needs to be opened on a secondary node."""
550
    return self.dev_type in (constants.LD_LV,)
551

    
552
  def StaticDevPath(self):
553
    """Return the device path if this device type has a static one.
554

555
    Some devices (LVM for example) live always at the same /dev/ path,
556
    irrespective of their status. For such devices, we return this
557
    path, for others we return None.
558

559
    @warning: The path returned is not a normalized pathname; callers
560
        should check that it is a valid path.
561

562
    """
563
    if self.dev_type == constants.LD_LV:
564
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
565
    elif self.dev_type == constants.LD_BLOCKDEV:
566
      return self.logical_id[1]
567
    elif self.dev_type == constants.LD_RBD:
568
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
569
    return None
570

    
571
  def ChildrenNeeded(self):
572
    """Compute the needed number of children for activation.
573

574
    This method will return either -1 (all children) or a positive
575
    number denoting the minimum number of children needed for
576
    activation (only mirrored devices will usually return >=0).
577

578
    Currently, only DRBD8 supports diskless activation (therefore we
579
    return 0), for all other we keep the previous semantics and return
580
    -1.
581

582
    """
583
    if self.dev_type == constants.LD_DRBD8:
584
      return 0
585
    return -1
586

    
587
  def IsBasedOnDiskType(self, dev_type):
588
    """Check if the disk or its children are based on the given type.
589

590
    @type dev_type: L{constants.LDS_BLOCK}
591
    @param dev_type: the type to look for
592
    @rtype: boolean
593
    @return: boolean indicating if a device of the given type was found or not
594

595
    """
596
    if self.children:
597
      for child in self.children:
598
        if child.IsBasedOnDiskType(dev_type):
599
          return True
600
    return self.dev_type == dev_type
601

    
602
  def GetNodes(self, node):
603
    """This function returns the nodes this device lives on.
604

605
    Given the node on which the parent of the device lives on (or, in
606
    case of a top-level device, the primary node of the devices'
607
    instance), this function will return a list of nodes on which this
608
    devices needs to (or can) be assembled.
609

610
    """
611
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
612
                         constants.LD_BLOCKDEV, constants.LD_RBD]:
613
      result = [node]
614
    elif self.dev_type in constants.LDS_DRBD:
615
      result = [self.logical_id[0], self.logical_id[1]]
616
      if node not in result:
617
        raise errors.ConfigurationError("DRBD device passed unknown node")
618
    else:
619
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
620
    return result
621

    
622
  def ComputeNodeTree(self, parent_node):
623
    """Compute the node/disk tree for this disk and its children.
624

625
    This method, given the node on which the parent disk lives, will
626
    return the list of all (node, disk) pairs which describe the disk
627
    tree in the most compact way. For example, a drbd/lvm stack
628
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
629
    which represents all the top-level devices on the nodes.
630

631
    """
632
    my_nodes = self.GetNodes(parent_node)
633
    result = [(node, self) for node in my_nodes]
634
    if not self.children:
635
      # leaf device
636
      return result
637
    for node in my_nodes:
638
      for child in self.children:
639
        child_result = child.ComputeNodeTree(node)
640
        if len(child_result) == 1:
641
          # child (and all its descendants) is simple, doesn't split
642
          # over multiple hosts, so we don't need to describe it, our
643
          # own entry for this node describes it completely
644
          continue
645
        else:
646
          # check if child nodes differ from my nodes; note that
647
          # subdisk can differ from the child itself, and be instead
648
          # one of its descendants
649
          for subnode, subdisk in child_result:
650
            if subnode not in my_nodes:
651
              result.append((subnode, subdisk))
652
            # otherwise child is under our own node, so we ignore this
653
            # entry (but probably the other results in the list will
654
            # be different)
655
    return result
656

    
657
  def ComputeGrowth(self, amount):
658
    """Compute the per-VG growth requirements.
659

660
    This only works for VG-based disks.
661

662
    @type amount: integer
663
    @param amount: the desired increase in (user-visible) disk space
664
    @rtype: dict
665
    @return: a dictionary of volume-groups and the required size
666

667
    """
668
    if self.dev_type == constants.LD_LV:
669
      return {self.logical_id[0]: amount}
670
    elif self.dev_type == constants.LD_DRBD8:
671
      if self.children:
672
        return self.children[0].ComputeGrowth(amount)
673
      else:
674
        return {}
675
    else:
676
      # Other disk types do not require VG space
677
      return {}
678

    
679
  def RecordGrow(self, amount):
680
    """Update the size of this disk after growth.
681

682
    This method recurses over the disks's children and updates their
683
    size correspondigly. The method needs to be kept in sync with the
684
    actual algorithms from bdev.
685

686
    """
687
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
688
                         constants.LD_RBD):
689
      self.size += amount
690
    elif self.dev_type == constants.LD_DRBD8:
691
      if self.children:
692
        self.children[0].RecordGrow(amount)
693
      self.size += amount
694
    else:
695
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
696
                                   " disk type %s" % self.dev_type)
697

    
698
  def Update(self, size=None, mode=None):
699
    """Apply changes to size and mode.
700

701
    """
702
    if self.dev_type == constants.LD_DRBD8:
703
      if self.children:
704
        self.children[0].Update(size=size, mode=mode)
705
    else:
706
      assert not self.children
707

    
708
    if size is not None:
709
      self.size = size
710
    if mode is not None:
711
      self.mode = mode
712

    
713
  def UnsetSize(self):
714
    """Sets recursively the size to zero for the disk and its children.
715

716
    """
717
    if self.children:
718
      for child in self.children:
719
        child.UnsetSize()
720
    self.size = 0
721

    
722
  def SetPhysicalID(self, target_node, nodes_ip):
723
    """Convert the logical ID to the physical ID.
724

725
    This is used only for drbd, which needs ip/port configuration.
726

727
    The routine descends down and updates its children also, because
728
    this helps when the only the top device is passed to the remote
729
    node.
730

731
    Arguments:
732
      - target_node: the node we wish to configure for
733
      - nodes_ip: a mapping of node name to ip
734

735
    The target_node must exist in in nodes_ip, and must be one of the
736
    nodes in the logical ID for each of the DRBD devices encountered
737
    in the disk tree.
738

739
    """
740
    if self.children:
741
      for child in self.children:
742
        child.SetPhysicalID(target_node, nodes_ip)
743

    
744
    if self.logical_id is None and self.physical_id is not None:
745
      return
746
    if self.dev_type in constants.LDS_DRBD:
747
      pnode, snode, port, pminor, sminor, secret = self.logical_id
748
      if target_node not in (pnode, snode):
749
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
750
                                        target_node)
751
      pnode_ip = nodes_ip.get(pnode, None)
752
      snode_ip = nodes_ip.get(snode, None)
753
      if pnode_ip is None or snode_ip is None:
754
        raise errors.ConfigurationError("Can't find primary or secondary node"
755
                                        " for %s" % str(self))
756
      p_data = (pnode_ip, port)
757
      s_data = (snode_ip, port)
758
      if pnode == target_node:
759
        self.physical_id = p_data + s_data + (pminor, secret)
760
      else: # it must be secondary, we tested above
761
        self.physical_id = s_data + p_data + (sminor, secret)
762
    else:
763
      self.physical_id = self.logical_id
764
    return
765

    
766
  def ToDict(self):
767
    """Disk-specific conversion to standard python types.
768

769
    This replaces the children lists of objects with lists of
770
    standard python types.
771

772
    """
773
    bo = super(Disk, self).ToDict()
774

    
775
    for attr in ("children",):
776
      alist = bo.get(attr, None)
777
      if alist:
778
        bo[attr] = self._ContainerToDicts(alist)
779
    return bo
780

    
781
  @classmethod
782
  def FromDict(cls, val):
783
    """Custom function for Disks
784

785
    """
786
    obj = super(Disk, cls).FromDict(val)
787
    if obj.children:
788
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
789
    if obj.logical_id and isinstance(obj.logical_id, list):
790
      obj.logical_id = tuple(obj.logical_id)
791
    if obj.physical_id and isinstance(obj.physical_id, list):
792
      obj.physical_id = tuple(obj.physical_id)
793
    if obj.dev_type in constants.LDS_DRBD:
794
      # we need a tuple of length six here
795
      if len(obj.logical_id) < 6:
796
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
797
    return obj
798

    
799
  def __str__(self):
800
    """Custom str() formatter for disks.
801

802
    """
803
    if self.dev_type == constants.LD_LV:
804
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
805
    elif self.dev_type in constants.LDS_DRBD:
806
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
807
      val = "<DRBD8("
808
      if self.physical_id is None:
809
        phy = "unconfigured"
810
      else:
811
        phy = ("configured as %s:%s %s:%s" %
812
               (self.physical_id[0], self.physical_id[1],
813
                self.physical_id[2], self.physical_id[3]))
814

    
815
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
816
              (node_a, minor_a, node_b, minor_b, port, phy))
817
      if self.children and self.children.count(None) == 0:
818
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
819
      else:
820
        val += "no local storage"
821
    else:
822
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
823
             (self.dev_type, self.logical_id, self.physical_id, self.children))
824
    if self.iv_name is None:
825
      val += ", not visible"
826
    else:
827
      val += ", visible as /dev/%s" % self.iv_name
828
    if isinstance(self.size, int):
829
      val += ", size=%dm)>" % self.size
830
    else:
831
      val += ", size='%s')>" % (self.size,)
832
    return val
833

    
834
  def Verify(self):
835
    """Checks that this disk is correctly configured.
836

837
    """
838
    all_errors = []
839
    if self.mode not in constants.DISK_ACCESS_SET:
840
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
841
    return all_errors
842

    
843
  def UpgradeConfig(self):
844
    """Fill defaults for missing configuration values.
845

846
    """
847
    if self.children:
848
      for child in self.children:
849
        child.UpgradeConfig()
850

    
851
    # FIXME: Make this configurable in Ganeti 2.7
852
    self.params = {}
853
    # add here config upgrade for this disk
854

    
855
  @staticmethod
856
  def ComputeLDParams(disk_template, disk_params):
857
    """Computes Logical Disk parameters from Disk Template parameters.
858

859
    @type disk_template: string
860
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
861
    @type disk_params: dict
862
    @param disk_params: disk template parameters;
863
                        dict(template_name -> parameters
864
    @rtype: list(dict)
865
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
866
      contains the LD parameters of the node. The tree is flattened in-order.
867

868
    """
869
    if disk_template not in constants.DISK_TEMPLATES:
870
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
871

    
872
    assert disk_template in disk_params
873

    
874
    result = list()
875
    dt_params = disk_params[disk_template]
876
    if disk_template == constants.DT_DRBD8:
877
      drbd_params = {
878
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
879
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
880
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
881
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
882
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
883
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
884
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
885
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
886
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
887
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
888
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
889
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
890
        }
891

    
892
      drbd_params = \
893
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8],
894
                 drbd_params)
895

    
896
      result.append(drbd_params)
897

    
898
      # data LV
899
      data_params = {
900
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
901
        }
902
      data_params = \
903
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
904
                 data_params)
905
      result.append(data_params)
906

    
907
      # metadata LV
908
      meta_params = {
909
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
910
        }
911
      meta_params = \
912
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
913
                 meta_params)
914
      result.append(meta_params)
915

    
916
    elif (disk_template == constants.DT_FILE or
917
          disk_template == constants.DT_SHARED_FILE):
918
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
919

    
920
    elif disk_template == constants.DT_PLAIN:
921
      params = {
922
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
923
        }
924
      params = \
925
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
926
                 params)
927
      result.append(params)
928

    
929
    elif disk_template == constants.DT_BLOCK:
930
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
931

    
932
    elif disk_template == constants.DT_RBD:
933
      params = {
934
        constants.LDP_POOL: dt_params[constants.RBD_POOL]
935
        }
936
      params = \
937
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD],
938
                 params)
939
      result.append(params)
940

    
941
    return result
942

    
943

    
944
class InstancePolicy(ConfigObject):
945
  """Config object representing instance policy limits dictionary.
946

947

948
  Note that this object is not actually used in the config, it's just
949
  used as a placeholder for a few functions.
950

951
  """
952
  @classmethod
953
  def CheckParameterSyntax(cls, ipolicy, check_std):
954
    """ Check the instance policy for validity.
955

956
    """
957
    for param in constants.ISPECS_PARAMETERS:
958
      InstancePolicy.CheckISpecSyntax(ipolicy, param, check_std)
959
    if constants.IPOLICY_DTS in ipolicy:
960
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
961
    for key in constants.IPOLICY_PARAMETERS:
962
      if key in ipolicy:
963
        InstancePolicy.CheckParameter(key, ipolicy[key])
964
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
965
    if wrong_keys:
966
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
967
                                      utils.CommaJoin(wrong_keys))
968

    
969
  @classmethod
970
  def CheckISpecSyntax(cls, ipolicy, name, check_std):
971
    """Check the instance policy for validity on a given key.
972

973
    We check if the instance policy makes sense for a given key, that is
974
    if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
975

976
    @type ipolicy: dict
977
    @param ipolicy: dictionary with min, max, std specs
978
    @type name: string
979
    @param name: what are the limits for
980
    @type check_std: bool
981
    @param check_std: Whether to check std value or just assume compliance
982
    @raise errors.ConfigureError: when specs for given name are not valid
983

984
    """
985
    min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
986

    
987
    if check_std:
988
      std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
989
      std_msg = std_v
990
    else:
991
      std_v = min_v
992
      std_msg = "-"
993

    
994
    max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
995
    err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
996
           (name,
997
            ipolicy[constants.ISPECS_MIN].get(name, "-"),
998
            ipolicy[constants.ISPECS_MAX].get(name, "-"),
999
            std_msg))
1000
    if min_v > std_v or std_v > max_v:
1001
      raise errors.ConfigurationError(err)
1002

    
1003
  @classmethod
1004
  def CheckDiskTemplates(cls, disk_templates):
1005
    """Checks the disk templates for validity.
1006

1007
    """
1008
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1009
    if wrong:
1010
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1011
                                      utils.CommaJoin(wrong))
1012

    
1013
  @classmethod
1014
  def CheckParameter(cls, key, value):
1015
    """Checks a parameter.
1016

1017
    Currently we expect all parameters to be float values.
1018

1019
    """
1020
    try:
1021
      float(value)
1022
    except (TypeError, ValueError), err:
1023
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1024
                                      " '%s', error: %s" % (key, value, err))
1025

    
1026

    
1027
class Instance(TaggableObject):
1028
  """Config object representing an instance."""
1029
  __slots__ = [
1030
    "name",
1031
    "primary_node",
1032
    "os",
1033
    "hypervisor",
1034
    "hvparams",
1035
    "beparams",
1036
    "osparams",
1037
    "admin_state",
1038
    "nics",
1039
    "disks",
1040
    "disk_template",
1041
    "network_port",
1042
    "serial_no",
1043
    ] + _TIMESTAMPS + _UUID
1044

    
1045
  def _ComputeSecondaryNodes(self):
1046
    """Compute the list of secondary nodes.
1047

1048
    This is a simple wrapper over _ComputeAllNodes.
1049

1050
    """
1051
    all_nodes = set(self._ComputeAllNodes())
1052
    all_nodes.discard(self.primary_node)
1053
    return tuple(all_nodes)
1054

    
1055
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1056
                             "List of secondary nodes")
1057

    
1058
  def _ComputeAllNodes(self):
1059
    """Compute the list of all nodes.
1060

1061
    Since the data is already there (in the drbd disks), keeping it as
1062
    a separate normal attribute is redundant and if not properly
1063
    synchronised can cause problems. Thus it's better to compute it
1064
    dynamically.
1065

1066
    """
1067
    def _Helper(nodes, device):
1068
      """Recursively computes nodes given a top device."""
1069
      if device.dev_type in constants.LDS_DRBD:
1070
        nodea, nodeb = device.logical_id[:2]
1071
        nodes.add(nodea)
1072
        nodes.add(nodeb)
1073
      if device.children:
1074
        for child in device.children:
1075
          _Helper(nodes, child)
1076

    
1077
    all_nodes = set()
1078
    all_nodes.add(self.primary_node)
1079
    for device in self.disks:
1080
      _Helper(all_nodes, device)
1081
    return tuple(all_nodes)
1082

    
1083
  all_nodes = property(_ComputeAllNodes, None, None,
1084
                       "List of all nodes of the instance")
1085

    
1086
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1087
    """Provide a mapping of nodes to LVs this instance owns.
1088

1089
    This function figures out what logical volumes should belong on
1090
    which nodes, recursing through a device tree.
1091

1092
    @param lvmap: optional dictionary to receive the
1093
        'node' : ['lv', ...] data.
1094

1095
    @return: None if lvmap arg is given, otherwise, a dictionary of
1096
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1097
        volumeN is of the form "vg_name/lv_name", compatible with
1098
        GetVolumeList()
1099

1100
    """
1101
    if node is None:
1102
      node = self.primary_node
1103

    
1104
    if lvmap is None:
1105
      lvmap = {
1106
        node: [],
1107
        }
1108
      ret = lvmap
1109
    else:
1110
      if not node in lvmap:
1111
        lvmap[node] = []
1112
      ret = None
1113

    
1114
    if not devs:
1115
      devs = self.disks
1116

    
1117
    for dev in devs:
1118
      if dev.dev_type == constants.LD_LV:
1119
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1120

    
1121
      elif dev.dev_type in constants.LDS_DRBD:
1122
        if dev.children:
1123
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1124
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1125

    
1126
      elif dev.children:
1127
        self.MapLVsByNode(lvmap, dev.children, node)
1128

    
1129
    return ret
1130

    
1131
  def FindDisk(self, idx):
1132
    """Find a disk given having a specified index.
1133

1134
    This is just a wrapper that does validation of the index.
1135

1136
    @type idx: int
1137
    @param idx: the disk index
1138
    @rtype: L{Disk}
1139
    @return: the corresponding disk
1140
    @raise errors.OpPrereqError: when the given index is not valid
1141

1142
    """
1143
    try:
1144
      idx = int(idx)
1145
      return self.disks[idx]
1146
    except (TypeError, ValueError), err:
1147
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1148
                                 errors.ECODE_INVAL)
1149
    except IndexError:
1150
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1151
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1152
                                 errors.ECODE_INVAL)
1153

    
1154
  def ToDict(self):
1155
    """Instance-specific conversion to standard python types.
1156

1157
    This replaces the children lists of objects with lists of standard
1158
    python types.
1159

1160
    """
1161
    bo = super(Instance, self).ToDict()
1162

    
1163
    for attr in "nics", "disks":
1164
      alist = bo.get(attr, None)
1165
      if alist:
1166
        nlist = self._ContainerToDicts(alist)
1167
      else:
1168
        nlist = []
1169
      bo[attr] = nlist
1170
    return bo
1171

    
1172
  @classmethod
1173
  def FromDict(cls, val):
1174
    """Custom function for instances.
1175

1176
    """
1177
    if "admin_state" not in val:
1178
      if val.get("admin_up", False):
1179
        val["admin_state"] = constants.ADMINST_UP
1180
      else:
1181
        val["admin_state"] = constants.ADMINST_DOWN
1182
    if "admin_up" in val:
1183
      del val["admin_up"]
1184
    obj = super(Instance, cls).FromDict(val)
1185
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
1186
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
1187
    return obj
1188

    
1189
  def UpgradeConfig(self):
1190
    """Fill defaults for missing configuration values.
1191

1192
    """
1193
    for nic in self.nics:
1194
      nic.UpgradeConfig()
1195
    for disk in self.disks:
1196
      disk.UpgradeConfig()
1197
    if self.hvparams:
1198
      for key in constants.HVC_GLOBALS:
1199
        try:
1200
          del self.hvparams[key]
1201
        except KeyError:
1202
          pass
1203
    if self.osparams is None:
1204
      self.osparams = {}
1205
    UpgradeBeParams(self.beparams)
1206

    
1207

    
1208
class OS(ConfigObject):
1209
  """Config object representing an operating system.
1210

1211
  @type supported_parameters: list
1212
  @ivar supported_parameters: a list of tuples, name and description,
1213
      containing the supported parameters by this OS
1214

1215
  @type VARIANT_DELIM: string
1216
  @cvar VARIANT_DELIM: the variant delimiter
1217

1218
  """
1219
  __slots__ = [
1220
    "name",
1221
    "path",
1222
    "api_versions",
1223
    "create_script",
1224
    "export_script",
1225
    "import_script",
1226
    "rename_script",
1227
    "verify_script",
1228
    "supported_variants",
1229
    "supported_parameters",
1230
    ]
1231

    
1232
  VARIANT_DELIM = "+"
1233

    
1234
  @classmethod
1235
  def SplitNameVariant(cls, name):
1236
    """Splits the name into the proper name and variant.
1237

1238
    @param name: the OS (unprocessed) name
1239
    @rtype: list
1240
    @return: a list of two elements; if the original name didn't
1241
        contain a variant, it's returned as an empty string
1242

1243
    """
1244
    nv = name.split(cls.VARIANT_DELIM, 1)
1245
    if len(nv) == 1:
1246
      nv.append("")
1247
    return nv
1248

    
1249
  @classmethod
1250
  def GetName(cls, name):
1251
    """Returns the proper name of the os (without the variant).
1252

1253
    @param name: the OS (unprocessed) name
1254

1255
    """
1256
    return cls.SplitNameVariant(name)[0]
1257

    
1258
  @classmethod
1259
  def GetVariant(cls, name):
1260
    """Returns the variant the os (without the base name).
1261

1262
    @param name: the OS (unprocessed) name
1263

1264
    """
1265
    return cls.SplitNameVariant(name)[1]
1266

    
1267

    
1268
class NodeHvState(ConfigObject):
1269
  """Hypvervisor state on a node.
1270

1271
  @ivar mem_total: Total amount of memory
1272
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1273
    available)
1274
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1275
    rounding
1276
  @ivar mem_inst: Memory used by instances living on node
1277
  @ivar cpu_total: Total node CPU core count
1278
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1279

1280
  """
1281
  __slots__ = [
1282
    "mem_total",
1283
    "mem_node",
1284
    "mem_hv",
1285
    "mem_inst",
1286
    "cpu_total",
1287
    "cpu_node",
1288
    ] + _TIMESTAMPS
1289

    
1290

    
1291
class NodeDiskState(ConfigObject):
1292
  """Disk state on a node.
1293

1294
  """
1295
  __slots__ = [
1296
    "total",
1297
    "reserved",
1298
    "overhead",
1299
    ] + _TIMESTAMPS
1300

    
1301

    
1302
class Node(TaggableObject):
1303
  """Config object representing a node.
1304

1305
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1306
  @ivar hv_state_static: Hypervisor state overriden by user
1307
  @ivar disk_state: Disk state (e.g. free space)
1308
  @ivar disk_state_static: Disk state overriden by user
1309

1310
  """
1311
  __slots__ = [
1312
    "name",
1313
    "primary_ip",
1314
    "secondary_ip",
1315
    "serial_no",
1316
    "master_candidate",
1317
    "offline",
1318
    "drained",
1319
    "group",
1320
    "master_capable",
1321
    "vm_capable",
1322
    "ndparams",
1323
    "powered",
1324
    "hv_state",
1325
    "hv_state_static",
1326
    "disk_state",
1327
    "disk_state_static",
1328
    ] + _TIMESTAMPS + _UUID
1329

    
1330
  def UpgradeConfig(self):
1331
    """Fill defaults for missing configuration values.
1332

1333
    """
1334
    # pylint: disable=E0203
1335
    # because these are "defined" via slots, not manually
1336
    if self.master_capable is None:
1337
      self.master_capable = True
1338

    
1339
    if self.vm_capable is None:
1340
      self.vm_capable = True
1341

    
1342
    if self.ndparams is None:
1343
      self.ndparams = {}
1344

    
1345
    if self.powered is None:
1346
      self.powered = True
1347

    
1348
  def ToDict(self):
1349
    """Custom function for serializing.
1350

1351
    """
1352
    data = super(Node, self).ToDict()
1353

    
1354
    hv_state = data.get("hv_state", None)
1355
    if hv_state is not None:
1356
      data["hv_state"] = self._ContainerToDicts(hv_state)
1357

    
1358
    disk_state = data.get("disk_state", None)
1359
    if disk_state is not None:
1360
      data["disk_state"] = \
1361
        dict((key, self._ContainerToDicts(value))
1362
             for (key, value) in disk_state.items())
1363

    
1364
    return data
1365

    
1366
  @classmethod
1367
  def FromDict(cls, val):
1368
    """Custom function for deserializing.
1369

1370
    """
1371
    obj = super(Node, cls).FromDict(val)
1372

    
1373
    if obj.hv_state is not None:
1374
      obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1375

    
1376
    if obj.disk_state is not None:
1377
      obj.disk_state = \
1378
        dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
1379
             for (key, value) in obj.disk_state.items())
1380

    
1381
    return obj
1382

    
1383

    
1384
class NodeGroup(TaggableObject):
1385
  """Config object representing a node group."""
1386
  __slots__ = [
1387
    "name",
1388
    "members",
1389
    "ndparams",
1390
    "diskparams",
1391
    "ipolicy",
1392
    "serial_no",
1393
    "hv_state_static",
1394
    "disk_state_static",
1395
    "alloc_policy",
1396
    ] + _TIMESTAMPS + _UUID
1397

    
1398
  def ToDict(self):
1399
    """Custom function for nodegroup.
1400

1401
    This discards the members object, which gets recalculated and is only kept
1402
    in memory.
1403

1404
    """
1405
    mydict = super(NodeGroup, self).ToDict()
1406
    del mydict["members"]
1407
    return mydict
1408

    
1409
  @classmethod
1410
  def FromDict(cls, val):
1411
    """Custom function for nodegroup.
1412

1413
    The members slot is initialized to an empty list, upon deserialization.
1414

1415
    """
1416
    obj = super(NodeGroup, cls).FromDict(val)
1417
    obj.members = []
1418
    return obj
1419

    
1420
  def UpgradeConfig(self):
1421
    """Fill defaults for missing configuration values.
1422

1423
    """
1424
    if self.ndparams is None:
1425
      self.ndparams = {}
1426

    
1427
    if self.serial_no is None:
1428
      self.serial_no = 1
1429

    
1430
    if self.alloc_policy is None:
1431
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1432

    
1433
    # We only update mtime, and not ctime, since we would not be able
1434
    # to provide a correct value for creation time.
1435
    if self.mtime is None:
1436
      self.mtime = time.time()
1437

    
1438
    if self.diskparams is None:
1439
      self.diskparams = {}
1440
    if self.ipolicy is None:
1441
      self.ipolicy = MakeEmptyIPolicy()
1442

    
1443
  def FillND(self, node):
1444
    """Return filled out ndparams for L{objects.Node}
1445

1446
    @type node: L{objects.Node}
1447
    @param node: A Node object to fill
1448
    @return a copy of the node's ndparams with defaults filled
1449

1450
    """
1451
    return self.SimpleFillND(node.ndparams)
1452

    
1453
  def SimpleFillND(self, ndparams):
1454
    """Fill a given ndparams dict with defaults.
1455

1456
    @type ndparams: dict
1457
    @param ndparams: the dict to fill
1458
    @rtype: dict
1459
    @return: a copy of the passed in ndparams with missing keys filled
1460
        from the node group defaults
1461

1462
    """
1463
    return FillDict(self.ndparams, ndparams)
1464

    
1465

    
1466
class Cluster(TaggableObject):
1467
  """Config object representing the cluster."""
1468
  __slots__ = [
1469
    "serial_no",
1470
    "rsahostkeypub",
1471
    "highest_used_port",
1472
    "tcpudp_port_pool",
1473
    "mac_prefix",
1474
    "volume_group_name",
1475
    "reserved_lvs",
1476
    "drbd_usermode_helper",
1477
    "default_bridge",
1478
    "default_hypervisor",
1479
    "master_node",
1480
    "master_ip",
1481
    "master_netdev",
1482
    "master_netmask",
1483
    "use_external_mip_script",
1484
    "cluster_name",
1485
    "file_storage_dir",
1486
    "shared_file_storage_dir",
1487
    "enabled_hypervisors",
1488
    "hvparams",
1489
    "ipolicy",
1490
    "os_hvp",
1491
    "beparams",
1492
    "osparams",
1493
    "nicparams",
1494
    "ndparams",
1495
    "diskparams",
1496
    "candidate_pool_size",
1497
    "modify_etc_hosts",
1498
    "modify_ssh_setup",
1499
    "maintain_node_health",
1500
    "uid_pool",
1501
    "default_iallocator",
1502
    "hidden_os",
1503
    "blacklisted_os",
1504
    "primary_ip_family",
1505
    "prealloc_wipe_disks",
1506
    "hv_state_static",
1507
    "disk_state_static",
1508
    ] + _TIMESTAMPS + _UUID
1509

    
1510
  def UpgradeConfig(self):
1511
    """Fill defaults for missing configuration values.
1512

1513
    """
1514
    # pylint: disable=E0203
1515
    # because these are "defined" via slots, not manually
1516
    if self.hvparams is None:
1517
      self.hvparams = constants.HVC_DEFAULTS
1518
    else:
1519
      for hypervisor in self.hvparams:
1520
        self.hvparams[hypervisor] = FillDict(
1521
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1522

    
1523
    if self.os_hvp is None:
1524
      self.os_hvp = {}
1525

    
1526
    # osparams added before 2.2
1527
    if self.osparams is None:
1528
      self.osparams = {}
1529

    
1530
    self.ndparams = UpgradeNDParams(self.ndparams)
1531

    
1532
    self.beparams = UpgradeGroupedParams(self.beparams,
1533
                                         constants.BEC_DEFAULTS)
1534
    for beparams_group in self.beparams:
1535
      UpgradeBeParams(self.beparams[beparams_group])
1536

    
1537
    migrate_default_bridge = not self.nicparams
1538
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1539
                                          constants.NICC_DEFAULTS)
1540
    if migrate_default_bridge:
1541
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1542
        self.default_bridge
1543

    
1544
    if self.modify_etc_hosts is None:
1545
      self.modify_etc_hosts = True
1546

    
1547
    if self.modify_ssh_setup is None:
1548
      self.modify_ssh_setup = True
1549

    
1550
    # default_bridge is no longer used in 2.1. The slot is left there to
1551
    # support auto-upgrading. It can be removed once we decide to deprecate
1552
    # upgrading straight from 2.0.
1553
    if self.default_bridge is not None:
1554
      self.default_bridge = None
1555

    
1556
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1557
    # code can be removed once upgrading straight from 2.0 is deprecated.
1558
    if self.default_hypervisor is not None:
1559
      self.enabled_hypervisors = ([self.default_hypervisor] +
1560
                                  [hvname for hvname in self.enabled_hypervisors
1561
                                   if hvname != self.default_hypervisor])
1562
      self.default_hypervisor = None
1563

    
1564
    # maintain_node_health added after 2.1.1
1565
    if self.maintain_node_health is None:
1566
      self.maintain_node_health = False
1567

    
1568
    if self.uid_pool is None:
1569
      self.uid_pool = []
1570

    
1571
    if self.default_iallocator is None:
1572
      self.default_iallocator = ""
1573

    
1574
    # reserved_lvs added before 2.2
1575
    if self.reserved_lvs is None:
1576
      self.reserved_lvs = []
1577

    
1578
    # hidden and blacklisted operating systems added before 2.2.1
1579
    if self.hidden_os is None:
1580
      self.hidden_os = []
1581

    
1582
    if self.blacklisted_os is None:
1583
      self.blacklisted_os = []
1584

    
1585
    # primary_ip_family added before 2.3
1586
    if self.primary_ip_family is None:
1587
      self.primary_ip_family = AF_INET
1588

    
1589
    if self.master_netmask is None:
1590
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1591
      self.master_netmask = ipcls.iplen
1592

    
1593
    if self.prealloc_wipe_disks is None:
1594
      self.prealloc_wipe_disks = False
1595

    
1596
    # shared_file_storage_dir added before 2.5
1597
    if self.shared_file_storage_dir is None:
1598
      self.shared_file_storage_dir = ""
1599

    
1600
    if self.use_external_mip_script is None:
1601
      self.use_external_mip_script = False
1602

    
1603
    if self.diskparams:
1604
      self.diskparams = UpgradeDiskParams(self.diskparams)
1605
    else:
1606
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1607

    
1608
    # instance policy added before 2.6
1609
    if self.ipolicy is None:
1610
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1611
    else:
1612
      # we can either make sure to upgrade the ipolicy always, or only
1613
      # do it in some corner cases (e.g. missing keys); note that this
1614
      # will break any removal of keys from the ipolicy dict
1615
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1616

    
1617
  @property
1618
  def primary_hypervisor(self):
1619
    """The first hypervisor is the primary.
1620

1621
    Useful, for example, for L{Node}'s hv/disk state.
1622

1623
    """
1624
    return self.enabled_hypervisors[0]
1625

    
1626
  def ToDict(self):
1627
    """Custom function for cluster.
1628

1629
    """
1630
    mydict = super(Cluster, self).ToDict()
1631
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1632
    return mydict
1633

    
1634
  @classmethod
1635
  def FromDict(cls, val):
1636
    """Custom function for cluster.
1637

1638
    """
1639
    obj = super(Cluster, cls).FromDict(val)
1640
    if not isinstance(obj.tcpudp_port_pool, set):
1641
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1642
    return obj
1643

    
1644
  def SimpleFillDP(self, diskparams):
1645
    """Fill a given diskparams dict with cluster defaults.
1646

1647
    @param diskparams: The diskparams
1648
    @return: The defaults dict
1649

1650
    """
1651
    return FillDiskParams(self.diskparams, diskparams)
1652

    
1653
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1654
    """Get the default hypervisor parameters for the cluster.
1655

1656
    @param hypervisor: the hypervisor name
1657
    @param os_name: if specified, we'll also update the defaults for this OS
1658
    @param skip_keys: if passed, list of keys not to use
1659
    @return: the defaults dict
1660

1661
    """
1662
    if skip_keys is None:
1663
      skip_keys = []
1664

    
1665
    fill_stack = [self.hvparams.get(hypervisor, {})]
1666
    if os_name is not None:
1667
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1668
      fill_stack.append(os_hvp)
1669

    
1670
    ret_dict = {}
1671
    for o_dict in fill_stack:
1672
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1673

    
1674
    return ret_dict
1675

    
1676
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1677
    """Fill a given hvparams dict with cluster defaults.
1678

1679
    @type hv_name: string
1680
    @param hv_name: the hypervisor to use
1681
    @type os_name: string
1682
    @param os_name: the OS to use for overriding the hypervisor defaults
1683
    @type skip_globals: boolean
1684
    @param skip_globals: if True, the global hypervisor parameters will
1685
        not be filled
1686
    @rtype: dict
1687
    @return: a copy of the given hvparams with missing keys filled from
1688
        the cluster defaults
1689

1690
    """
1691
    if skip_globals:
1692
      skip_keys = constants.HVC_GLOBALS
1693
    else:
1694
      skip_keys = []
1695

    
1696
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1697
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1698

    
1699
  def FillHV(self, instance, skip_globals=False):
1700
    """Fill an instance's hvparams dict with cluster defaults.
1701

1702
    @type instance: L{objects.Instance}
1703
    @param instance: the instance parameter to fill
1704
    @type skip_globals: boolean
1705
    @param skip_globals: if True, the global hypervisor parameters will
1706
        not be filled
1707
    @rtype: dict
1708
    @return: a copy of the instance's hvparams with missing keys filled from
1709
        the cluster defaults
1710

1711
    """
1712
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1713
                             instance.hvparams, skip_globals)
1714

    
1715
  def SimpleFillBE(self, beparams):
1716
    """Fill a given beparams dict with cluster defaults.
1717

1718
    @type beparams: dict
1719
    @param beparams: the dict to fill
1720
    @rtype: dict
1721
    @return: a copy of the passed in beparams with missing keys filled
1722
        from the cluster defaults
1723

1724
    """
1725
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1726

    
1727
  def FillBE(self, instance):
1728
    """Fill an instance's beparams dict with cluster defaults.
1729

1730
    @type instance: L{objects.Instance}
1731
    @param instance: the instance parameter to fill
1732
    @rtype: dict
1733
    @return: a copy of the instance's beparams with missing keys filled from
1734
        the cluster defaults
1735

1736
    """
1737
    return self.SimpleFillBE(instance.beparams)
1738

    
1739
  def SimpleFillNIC(self, nicparams):
1740
    """Fill a given nicparams dict with cluster defaults.
1741

1742
    @type nicparams: dict
1743
    @param nicparams: the dict to fill
1744
    @rtype: dict
1745
    @return: a copy of the passed in nicparams with missing keys filled
1746
        from the cluster defaults
1747

1748
    """
1749
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1750

    
1751
  def SimpleFillOS(self, os_name, os_params):
1752
    """Fill an instance's osparams dict with cluster defaults.
1753

1754
    @type os_name: string
1755
    @param os_name: the OS name to use
1756
    @type os_params: dict
1757
    @param os_params: the dict to fill with default values
1758
    @rtype: dict
1759
    @return: a copy of the instance's osparams with missing keys filled from
1760
        the cluster defaults
1761

1762
    """
1763
    name_only = os_name.split("+", 1)[0]
1764
    # base OS
1765
    result = self.osparams.get(name_only, {})
1766
    # OS with variant
1767
    result = FillDict(result, self.osparams.get(os_name, {}))
1768
    # specified params
1769
    return FillDict(result, os_params)
1770

    
1771
  @staticmethod
1772
  def SimpleFillHvState(hv_state):
1773
    """Fill an hv_state sub dict with cluster defaults.
1774

1775
    """
1776
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1777

    
1778
  @staticmethod
1779
  def SimpleFillDiskState(disk_state):
1780
    """Fill an disk_state sub dict with cluster defaults.
1781

1782
    """
1783
    return FillDict(constants.DS_DEFAULTS, disk_state)
1784

    
1785
  def FillND(self, node, nodegroup):
1786
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1787

1788
    @type node: L{objects.Node}
1789
    @param node: A Node object to fill
1790
    @type nodegroup: L{objects.NodeGroup}
1791
    @param nodegroup: A Node object to fill
1792
    @return a copy of the node's ndparams with defaults filled
1793

1794
    """
1795
    return self.SimpleFillND(nodegroup.FillND(node))
1796

    
1797
  def SimpleFillND(self, ndparams):
1798
    """Fill a given ndparams dict with defaults.
1799

1800
    @type ndparams: dict
1801
    @param ndparams: the dict to fill
1802
    @rtype: dict
1803
    @return: a copy of the passed in ndparams with missing keys filled
1804
        from the cluster defaults
1805

1806
    """
1807
    return FillDict(self.ndparams, ndparams)
1808

    
1809
  def SimpleFillIPolicy(self, ipolicy):
1810
    """ Fill instance policy dict with defaults.
1811

1812
    @type ipolicy: dict
1813
    @param ipolicy: the dict to fill
1814
    @rtype: dict
1815
    @return: a copy of passed ipolicy with missing keys filled from
1816
      the cluster defaults
1817

1818
    """
1819
    return FillIPolicy(self.ipolicy, ipolicy)
1820

    
1821

    
1822
class BlockDevStatus(ConfigObject):
1823
  """Config object representing the status of a block device."""
1824
  __slots__ = [
1825
    "dev_path",
1826
    "major",
1827
    "minor",
1828
    "sync_percent",
1829
    "estimated_time",
1830
    "is_degraded",
1831
    "ldisk_status",
1832
    ]
1833

    
1834

    
1835
class ImportExportStatus(ConfigObject):
1836
  """Config object representing the status of an import or export."""
1837
  __slots__ = [
1838
    "recent_output",
1839
    "listen_port",
1840
    "connected",
1841
    "progress_mbytes",
1842
    "progress_throughput",
1843
    "progress_eta",
1844
    "progress_percent",
1845
    "exit_status",
1846
    "error_message",
1847
    ] + _TIMESTAMPS
1848

    
1849

    
1850
class ImportExportOptions(ConfigObject):
1851
  """Options for import/export daemon
1852

1853
  @ivar key_name: X509 key name (None for cluster certificate)
1854
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1855
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1856
  @ivar magic: Used to ensure the connection goes to the right disk
1857
  @ivar ipv6: Whether to use IPv6
1858
  @ivar connect_timeout: Number of seconds for establishing connection
1859

1860
  """
1861
  __slots__ = [
1862
    "key_name",
1863
    "ca_pem",
1864
    "compress",
1865
    "magic",
1866
    "ipv6",
1867
    "connect_timeout",
1868
    ]
1869

    
1870

    
1871
class ConfdRequest(ConfigObject):
1872
  """Object holding a confd request.
1873

1874
  @ivar protocol: confd protocol version
1875
  @ivar type: confd query type
1876
  @ivar query: query request
1877
  @ivar rsalt: requested reply salt
1878

1879
  """
1880
  __slots__ = [
1881
    "protocol",
1882
    "type",
1883
    "query",
1884
    "rsalt",
1885
    ]
1886

    
1887

    
1888
class ConfdReply(ConfigObject):
1889
  """Object holding a confd reply.
1890

1891
  @ivar protocol: confd protocol version
1892
  @ivar status: reply status code (ok, error)
1893
  @ivar answer: confd query reply
1894
  @ivar serial: configuration serial number
1895

1896
  """
1897
  __slots__ = [
1898
    "protocol",
1899
    "status",
1900
    "answer",
1901
    "serial",
1902
    ]
1903

    
1904

    
1905
class QueryFieldDefinition(ConfigObject):
1906
  """Object holding a query field definition.
1907

1908
  @ivar name: Field name
1909
  @ivar title: Human-readable title
1910
  @ivar kind: Field type
1911
  @ivar doc: Human-readable description
1912

1913
  """
1914
  __slots__ = [
1915
    "name",
1916
    "title",
1917
    "kind",
1918
    "doc",
1919
    ]
1920

    
1921

    
1922
class _QueryResponseBase(ConfigObject):
1923
  __slots__ = [
1924
    "fields",
1925
    ]
1926

    
1927
  def ToDict(self):
1928
    """Custom function for serializing.
1929

1930
    """
1931
    mydict = super(_QueryResponseBase, self).ToDict()
1932
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1933
    return mydict
1934

    
1935
  @classmethod
1936
  def FromDict(cls, val):
1937
    """Custom function for de-serializing.
1938

1939
    """
1940
    obj = super(_QueryResponseBase, cls).FromDict(val)
1941
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1942
    return obj
1943

    
1944

    
1945
class QueryResponse(_QueryResponseBase):
1946
  """Object holding the response to a query.
1947

1948
  @ivar fields: List of L{QueryFieldDefinition} objects
1949
  @ivar data: Requested data
1950

1951
  """
1952
  __slots__ = [
1953
    "data",
1954
    ]
1955

    
1956

    
1957
class QueryFieldsRequest(ConfigObject):
1958
  """Object holding a request for querying available fields.
1959

1960
  """
1961
  __slots__ = [
1962
    "what",
1963
    "fields",
1964
    ]
1965

    
1966

    
1967
class QueryFieldsResponse(_QueryResponseBase):
1968
  """Object holding the response to a query for fields.
1969

1970
  @ivar fields: List of L{QueryFieldDefinition} objects
1971

1972
  """
1973
  __slots__ = []
1974

    
1975

    
1976
class MigrationStatus(ConfigObject):
1977
  """Object holding the status of a migration.
1978

1979
  """
1980
  __slots__ = [
1981
    "status",
1982
    "transferred_ram",
1983
    "total_ram",
1984
    ]
1985

    
1986

    
1987
class InstanceConsole(ConfigObject):
1988
  """Object describing how to access the console of an instance.
1989

1990
  """
1991
  __slots__ = [
1992
    "instance",
1993
    "kind",
1994
    "message",
1995
    "host",
1996
    "port",
1997
    "user",
1998
    "command",
1999
    "display",
2000
    ]
2001

    
2002
  def Validate(self):
2003
    """Validates contents of this object.
2004

2005
    """
2006
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2007
    assert self.instance, "Missing instance name"
2008
    assert self.message or self.kind in [constants.CONS_SSH,
2009
                                         constants.CONS_SPICE,
2010
                                         constants.CONS_VNC]
2011
    assert self.host or self.kind == constants.CONS_MESSAGE
2012
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2013
                                      constants.CONS_SSH]
2014
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2015
                                      constants.CONS_SPICE,
2016
                                      constants.CONS_VNC]
2017
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2018
                                         constants.CONS_SPICE,
2019
                                         constants.CONS_VNC]
2020
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2021
                                         constants.CONS_SPICE,
2022
                                         constants.CONS_SSH]
2023
    return True
2024

    
2025

    
2026
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2027
  """Simple wrapper over ConfigParse that allows serialization.
2028

2029
  This class is basically ConfigParser.SafeConfigParser with two
2030
  additional methods that allow it to serialize/unserialize to/from a
2031
  buffer.
2032

2033
  """
2034
  def Dumps(self):
2035
    """Dump this instance and return the string representation."""
2036
    buf = StringIO()
2037
    self.write(buf)
2038
    return buf.getvalue()
2039

    
2040
  @classmethod
2041
  def Loads(cls, data):
2042
    """Load data from a string."""
2043
    buf = StringIO(data)
2044
    cfp = cls()
2045
    cfp.readfp(buf)
2046
    return cfp