Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ c1912a48

History | View | Annotate | Download (59.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import time
42
from cStringIO import StringIO
43

    
44
from ganeti import errors
45
from ganeti import constants
46
from ganeti import netutils
47
from ganeti import objectutils
48
from ganeti import utils
49

    
50
from socket import AF_INET
51

    
52

    
53
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
54
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
55

    
56
_TIMESTAMPS = ["ctime", "mtime"]
57
_UUID = ["uuid"]
58

    
59

    
60
def FillDict(defaults_dict, custom_dict, skip_keys=None):
61
  """Basic function to apply settings on top a default dict.
62

63
  @type defaults_dict: dict
64
  @param defaults_dict: dictionary holding the default values
65
  @type custom_dict: dict
66
  @param custom_dict: dictionary holding customized value
67
  @type skip_keys: list
68
  @param skip_keys: which keys not to fill
69
  @rtype: dict
70
  @return: dict with the 'full' values
71

72
  """
73
  ret_dict = copy.deepcopy(defaults_dict)
74
  ret_dict.update(custom_dict)
75
  if skip_keys:
76
    for k in skip_keys:
77
      try:
78
        del ret_dict[k]
79
      except KeyError:
80
        pass
81
  return ret_dict
82

    
83

    
84
def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
85
  """Fills an instance policy with defaults.
86

87
  """
88
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
89
  ret_dict = {}
90
  for key in constants.IPOLICY_ISPECS:
91
    ret_dict[key] = FillDict(default_ipolicy[key],
92
                             custom_ipolicy.get(key, {}),
93
                             skip_keys=skip_keys)
94
  # list items
95
  for key in [constants.IPOLICY_DTS]:
96
    ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
97
  # other items which we know we can directly copy (immutables)
98
  for key in constants.IPOLICY_PARAMETERS:
99
    ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
100

    
101
  return ret_dict
102

    
103

    
104
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
105
  """Fills the disk parameter defaults.
106

107
  @see: L{FillDict} for parameters and return value
108

109
  """
110
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
111

    
112
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
113
                             skip_keys=skip_keys))
114
              for dt in constants.DISK_TEMPLATES)
115

    
116

    
117
def UpgradeGroupedParams(target, defaults):
118
  """Update all groups for the target parameter.
119

120
  @type target: dict of dicts
121
  @param target: {group: {parameter: value}}
122
  @type defaults: dict
123
  @param defaults: default parameter values
124

125
  """
126
  if target is None:
127
    target = {constants.PP_DEFAULT: defaults}
128
  else:
129
    for group in target:
130
      target[group] = FillDict(defaults, target[group])
131
  return target
132

    
133

    
134
def UpgradeBeParams(target):
135
  """Update the be parameters dict to the new format.
136

137
  @type target: dict
138
  @param target: "be" parameters dict
139

140
  """
141
  if constants.BE_MEMORY in target:
142
    memory = target[constants.BE_MEMORY]
143
    target[constants.BE_MAXMEM] = memory
144
    target[constants.BE_MINMEM] = memory
145
    del target[constants.BE_MEMORY]
146

    
147

    
148
def UpgradeDiskParams(diskparams):
149
  """Upgrade the disk parameters.
150

151
  @type diskparams: dict
152
  @param diskparams: disk parameters to upgrade
153
  @rtype: dict
154
  @return: the upgraded disk parameters dict
155

156
  """
157
  if not diskparams:
158
    result = {}
159
  else:
160
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
161

    
162
  return result
163

    
164

    
165
def UpgradeNDParams(ndparams):
166
  """Upgrade ndparams structure.
167

168
  @type ndparams: dict
169
  @param ndparams: disk parameters to upgrade
170
  @rtype: dict
171
  @return: the upgraded node parameters dict
172

173
  """
174
  if ndparams is None:
175
    ndparams = {}
176

    
177
  if (constants.ND_OOB_PROGRAM in ndparams and
178
      ndparams[constants.ND_OOB_PROGRAM] is None):
179
    # will be reset by the line below
180
    del ndparams[constants.ND_OOB_PROGRAM]
181
  return FillDict(constants.NDC_DEFAULTS, ndparams)
182

    
183

    
184
def MakeEmptyIPolicy():
185
  """Create empty IPolicy dictionary.
186

187
  """
188
  return dict([
189
    (constants.ISPECS_MIN, {}),
190
    (constants.ISPECS_MAX, {}),
191
    (constants.ISPECS_STD, {}),
192
    ])
193

    
194

    
195
class ConfigObject(objectutils.ValidatedSlots):
196
  """A generic config object.
197

198
  It has the following properties:
199

200
    - provides somewhat safe recursive unpickling and pickling for its classes
201
    - unset attributes which are defined in slots are always returned
202
      as None instead of raising an error
203

204
  Classes derived from this must always declare __slots__ (we use many
205
  config objects and the memory reduction is useful)
206

207
  """
208
  __slots__ = []
209

    
210
  def __getattr__(self, name):
211
    if name not in self.GetAllSlots():
212
      raise AttributeError("Invalid object attribute %s.%s" %
213
                           (type(self).__name__, name))
214
    return None
215

    
216
  def __setstate__(self, state):
217
    slots = self.GetAllSlots()
218
    for name in state:
219
      if name in slots:
220
        setattr(self, name, state[name])
221

    
222
  def Validate(self):
223
    """Validates the slots.
224

225
    """
226

    
227
  def ToDict(self):
228
    """Convert to a dict holding only standard python types.
229

230
    The generic routine just dumps all of this object's attributes in
231
    a dict. It does not work if the class has children who are
232
    ConfigObjects themselves (e.g. the nics list in an Instance), in
233
    which case the object should subclass the function in order to
234
    make sure all objects returned are only standard python types.
235

236
    """
237
    result = {}
238
    for name in self.GetAllSlots():
239
      value = getattr(self, name, None)
240
      if value is not None:
241
        result[name] = value
242
    return result
243

    
244
  __getstate__ = ToDict
245

    
246
  @classmethod
247
  def FromDict(cls, val):
248
    """Create an object from a dictionary.
249

250
    This generic routine takes a dict, instantiates a new instance of
251
    the given class, and sets attributes based on the dict content.
252

253
    As for `ToDict`, this does not work if the class has children
254
    who are ConfigObjects themselves (e.g. the nics list in an
255
    Instance), in which case the object should subclass the function
256
    and alter the objects.
257

258
    """
259
    if not isinstance(val, dict):
260
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
261
                                      " expected dict, got %s" % type(val))
262
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
263
    obj = cls(**val_str) # pylint: disable=W0142
264
    return obj
265

    
266
  @staticmethod
267
  def _ContainerToDicts(container):
268
    """Convert the elements of a container to standard python types.
269

270
    This method converts a container with elements derived from
271
    ConfigData to standard python types. If the container is a dict,
272
    we don't touch the keys, only the values.
273

274
    """
275
    if isinstance(container, dict):
276
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
277
    elif isinstance(container, (list, tuple, set, frozenset)):
278
      ret = [elem.ToDict() for elem in container]
279
    else:
280
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
281
                      type(container))
282
    return ret
283

    
284
  @staticmethod
285
  def _ContainerFromDicts(source, c_type, e_type):
286
    """Convert a container from standard python types.
287

288
    This method converts a container with standard python types to
289
    ConfigData objects. If the container is a dict, we don't touch the
290
    keys, only the values.
291

292
    """
293
    if not isinstance(c_type, type):
294
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
295
                      " not a type" % type(c_type))
296
    if source is None:
297
      source = c_type()
298
    if c_type is dict:
299
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
300
    elif c_type in (list, tuple, set, frozenset):
301
      ret = c_type([e_type.FromDict(elem) for elem in source])
302
    else:
303
      raise TypeError("Invalid container type %s passed to"
304
                      " _ContainerFromDicts" % c_type)
305
    return ret
306

    
307
  def Copy(self):
308
    """Makes a deep copy of the current object and its children.
309

310
    """
311
    dict_form = self.ToDict()
312
    clone_obj = self.__class__.FromDict(dict_form)
313
    return clone_obj
314

    
315
  def __repr__(self):
316
    """Implement __repr__ for ConfigObjects."""
317
    return repr(self.ToDict())
318

    
319
  def UpgradeConfig(self):
320
    """Fill defaults for missing configuration values.
321

322
    This method will be called at configuration load time, and its
323
    implementation will be object dependent.
324

325
    """
326
    pass
327

    
328

    
329
class TaggableObject(ConfigObject):
330
  """An generic class supporting tags.
331

332
  """
333
  __slots__ = ["tags"]
334
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
335

    
336
  @classmethod
337
  def ValidateTag(cls, tag):
338
    """Check if a tag is valid.
339

340
    If the tag is invalid, an errors.TagError will be raised. The
341
    function has no return value.
342

343
    """
344
    if not isinstance(tag, basestring):
345
      raise errors.TagError("Invalid tag type (not a string)")
346
    if len(tag) > constants.MAX_TAG_LEN:
347
      raise errors.TagError("Tag too long (>%d characters)" %
348
                            constants.MAX_TAG_LEN)
349
    if not tag:
350
      raise errors.TagError("Tags cannot be empty")
351
    if not cls.VALID_TAG_RE.match(tag):
352
      raise errors.TagError("Tag contains invalid characters")
353

    
354
  def GetTags(self):
355
    """Return the tags list.
356

357
    """
358
    tags = getattr(self, "tags", None)
359
    if tags is None:
360
      tags = self.tags = set()
361
    return tags
362

    
363
  def AddTag(self, tag):
364
    """Add a new tag.
365

366
    """
367
    self.ValidateTag(tag)
368
    tags = self.GetTags()
369
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
370
      raise errors.TagError("Too many tags")
371
    self.GetTags().add(tag)
372

    
373
  def RemoveTag(self, tag):
374
    """Remove a tag.
375

376
    """
377
    self.ValidateTag(tag)
378
    tags = self.GetTags()
379
    try:
380
      tags.remove(tag)
381
    except KeyError:
382
      raise errors.TagError("Tag not found")
383

    
384
  def ToDict(self):
385
    """Taggable-object-specific conversion to standard python types.
386

387
    This replaces the tags set with a list.
388

389
    """
390
    bo = super(TaggableObject, self).ToDict()
391

    
392
    tags = bo.get("tags", None)
393
    if isinstance(tags, set):
394
      bo["tags"] = list(tags)
395
    return bo
396

    
397
  @classmethod
398
  def FromDict(cls, val):
399
    """Custom function for instances.
400

401
    """
402
    obj = super(TaggableObject, cls).FromDict(val)
403
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
404
      obj.tags = set(obj.tags)
405
    return obj
406

    
407

    
408
class MasterNetworkParameters(ConfigObject):
409
  """Network configuration parameters for the master
410

411
  @ivar name: master name
412
  @ivar ip: master IP
413
  @ivar netmask: master netmask
414
  @ivar netdev: master network device
415
  @ivar ip_family: master IP family
416

417
  """
418
  __slots__ = [
419
    "name",
420
    "ip",
421
    "netmask",
422
    "netdev",
423
    "ip_family",
424
    ]
425

    
426

    
427
class ConfigData(ConfigObject):
428
  """Top-level config object."""
429
  __slots__ = [
430
    "version",
431
    "cluster",
432
    "nodes",
433
    "nodegroups",
434
    "instances",
435
    "networks",
436
    "serial_no",
437
    ] + _TIMESTAMPS
438

    
439
  def ToDict(self):
440
    """Custom function for top-level config data.
441

442
    This just replaces the list of instances, nodes and the cluster
443
    with standard python types.
444

445
    """
446
    mydict = super(ConfigData, self).ToDict()
447
    mydict["cluster"] = mydict["cluster"].ToDict()
448
    for key in "nodes", "instances", "nodegroups", "networks":
449
      mydict[key] = self._ContainerToDicts(mydict[key])
450

    
451
    return mydict
452

    
453
  @classmethod
454
  def FromDict(cls, val):
455
    """Custom function for top-level config data
456

457
    """
458
    obj = super(ConfigData, cls).FromDict(val)
459
    obj.cluster = Cluster.FromDict(obj.cluster)
460
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
461
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
462
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
463
    obj.networks = cls._ContainerFromDicts(obj.networks, dict, Network)
464
    return obj
465

    
466
  def HasAnyDiskOfType(self, dev_type):
467
    """Check if in there is at disk of the given type in the configuration.
468

469
    @type dev_type: L{constants.LDS_BLOCK}
470
    @param dev_type: the type to look for
471
    @rtype: boolean
472
    @return: boolean indicating if a disk of the given type was found or not
473

474
    """
475
    for instance in self.instances.values():
476
      for disk in instance.disks:
477
        if disk.IsBasedOnDiskType(dev_type):
478
          return True
479
    return False
480

    
481
  def UpgradeConfig(self):
482
    """Fill defaults for missing configuration values.
483

484
    """
485
    self.cluster.UpgradeConfig()
486
    for node in self.nodes.values():
487
      node.UpgradeConfig()
488
    for instance in self.instances.values():
489
      instance.UpgradeConfig()
490
    if self.nodegroups is None:
491
      self.nodegroups = {}
492
    for nodegroup in self.nodegroups.values():
493
      nodegroup.UpgradeConfig()
494
    if self.cluster.drbd_usermode_helper is None:
495
      # To decide if we set an helper let's check if at least one instance has
496
      # a DRBD disk. This does not cover all the possible scenarios but it
497
      # gives a good approximation.
498
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
499
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
500
    if self.networks is None:
501
      self.networks = {}
502

    
503

    
504
class NIC(ConfigObject):
505
  """Config object representing a network card."""
506
  __slots__ = ["mac", "ip", "network", "nicparams", "netinfo"]
507

    
508
  @classmethod
509
  def CheckParameterSyntax(cls, nicparams):
510
    """Check the given parameters for validity.
511

512
    @type nicparams:  dict
513
    @param nicparams: dictionary with parameter names/value
514
    @raise errors.ConfigurationError: when a parameter is not valid
515

516
    """
517
    mode = nicparams[constants.NIC_MODE]
518
    if (mode not in constants.NIC_VALID_MODES and
519
        mode != constants.VALUE_AUTO):
520
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
521

    
522
    if (mode == constants.NIC_MODE_BRIDGED and
523
        not nicparams[constants.NIC_LINK]):
524
      raise errors.ConfigurationError("Missing bridged NIC link")
525

    
526

    
527
class Disk(ConfigObject):
528
  """Config object representing a block device."""
529
  __slots__ = ["dev_type", "logical_id", "physical_id",
530
               "children", "iv_name", "size", "mode", "params"]
531

    
532
  def CreateOnSecondary(self):
533
    """Test if this device needs to be created on a secondary node."""
534
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
535

    
536
  def AssembleOnSecondary(self):
537
    """Test if this device needs to be assembled on a secondary node."""
538
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
539

    
540
  def OpenOnSecondary(self):
541
    """Test if this device needs to be opened on a secondary node."""
542
    return self.dev_type in (constants.LD_LV,)
543

    
544
  def StaticDevPath(self):
545
    """Return the device path if this device type has a static one.
546

547
    Some devices (LVM for example) live always at the same /dev/ path,
548
    irrespective of their status. For such devices, we return this
549
    path, for others we return None.
550

551
    @warning: The path returned is not a normalized pathname; callers
552
        should check that it is a valid path.
553

554
    """
555
    if self.dev_type == constants.LD_LV:
556
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
557
    elif self.dev_type == constants.LD_BLOCKDEV:
558
      return self.logical_id[1]
559
    elif self.dev_type == constants.LD_RBD:
560
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
561
    return None
562

    
563
  def ChildrenNeeded(self):
564
    """Compute the needed number of children for activation.
565

566
    This method will return either -1 (all children) or a positive
567
    number denoting the minimum number of children needed for
568
    activation (only mirrored devices will usually return >=0).
569

570
    Currently, only DRBD8 supports diskless activation (therefore we
571
    return 0), for all other we keep the previous semantics and return
572
    -1.
573

574
    """
575
    if self.dev_type == constants.LD_DRBD8:
576
      return 0
577
    return -1
578

    
579
  def IsBasedOnDiskType(self, dev_type):
580
    """Check if the disk or its children are based on the given type.
581

582
    @type dev_type: L{constants.LDS_BLOCK}
583
    @param dev_type: the type to look for
584
    @rtype: boolean
585
    @return: boolean indicating if a device of the given type was found or not
586

587
    """
588
    if self.children:
589
      for child in self.children:
590
        if child.IsBasedOnDiskType(dev_type):
591
          return True
592
    return self.dev_type == dev_type
593

    
594
  def GetNodes(self, node):
595
    """This function returns the nodes this device lives on.
596

597
    Given the node on which the parent of the device lives on (or, in
598
    case of a top-level device, the primary node of the devices'
599
    instance), this function will return a list of nodes on which this
600
    devices needs to (or can) be assembled.
601

602
    """
603
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
604
                         constants.LD_BLOCKDEV, constants.LD_RBD,
605
                         constants.LD_EXT]:
606
      result = [node]
607
    elif self.dev_type in constants.LDS_DRBD:
608
      result = [self.logical_id[0], self.logical_id[1]]
609
      if node not in result:
610
        raise errors.ConfigurationError("DRBD device passed unknown node")
611
    else:
612
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
613
    return result
614

    
615
  def ComputeNodeTree(self, parent_node):
616
    """Compute the node/disk tree for this disk and its children.
617

618
    This method, given the node on which the parent disk lives, will
619
    return the list of all (node, disk) pairs which describe the disk
620
    tree in the most compact way. For example, a drbd/lvm stack
621
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
622
    which represents all the top-level devices on the nodes.
623

624
    """
625
    my_nodes = self.GetNodes(parent_node)
626
    result = [(node, self) for node in my_nodes]
627
    if not self.children:
628
      # leaf device
629
      return result
630
    for node in my_nodes:
631
      for child in self.children:
632
        child_result = child.ComputeNodeTree(node)
633
        if len(child_result) == 1:
634
          # child (and all its descendants) is simple, doesn't split
635
          # over multiple hosts, so we don't need to describe it, our
636
          # own entry for this node describes it completely
637
          continue
638
        else:
639
          # check if child nodes differ from my nodes; note that
640
          # subdisk can differ from the child itself, and be instead
641
          # one of its descendants
642
          for subnode, subdisk in child_result:
643
            if subnode not in my_nodes:
644
              result.append((subnode, subdisk))
645
            # otherwise child is under our own node, so we ignore this
646
            # entry (but probably the other results in the list will
647
            # be different)
648
    return result
649

    
650
  def ComputeGrowth(self, amount):
651
    """Compute the per-VG growth requirements.
652

653
    This only works for VG-based disks.
654

655
    @type amount: integer
656
    @param amount: the desired increase in (user-visible) disk space
657
    @rtype: dict
658
    @return: a dictionary of volume-groups and the required size
659

660
    """
661
    if self.dev_type == constants.LD_LV:
662
      return {self.logical_id[0]: amount}
663
    elif self.dev_type == constants.LD_DRBD8:
664
      if self.children:
665
        return self.children[0].ComputeGrowth(amount)
666
      else:
667
        return {}
668
    else:
669
      # Other disk types do not require VG space
670
      return {}
671

    
672
  def RecordGrow(self, amount):
673
    """Update the size of this disk after growth.
674

675
    This method recurses over the disks's children and updates their
676
    size correspondigly. The method needs to be kept in sync with the
677
    actual algorithms from bdev.
678

679
    """
680
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
681
                         constants.LD_RBD, constants.LD_EXT):
682
      self.size += amount
683
    elif self.dev_type == constants.LD_DRBD8:
684
      if self.children:
685
        self.children[0].RecordGrow(amount)
686
      self.size += amount
687
    else:
688
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
689
                                   " disk type %s" % self.dev_type)
690

    
691
  def Update(self, size=None, mode=None):
692
    """Apply changes to size and mode.
693

694
    """
695
    if self.dev_type == constants.LD_DRBD8:
696
      if self.children:
697
        self.children[0].Update(size=size, mode=mode)
698
    else:
699
      assert not self.children
700

    
701
    if size is not None:
702
      self.size = size
703
    if mode is not None:
704
      self.mode = mode
705

    
706
  def UnsetSize(self):
707
    """Sets recursively the size to zero for the disk and its children.
708

709
    """
710
    if self.children:
711
      for child in self.children:
712
        child.UnsetSize()
713
    self.size = 0
714

    
715
  def SetPhysicalID(self, target_node, nodes_ip):
716
    """Convert the logical ID to the physical ID.
717

718
    This is used only for drbd, which needs ip/port configuration.
719

720
    The routine descends down and updates its children also, because
721
    this helps when the only the top device is passed to the remote
722
    node.
723

724
    Arguments:
725
      - target_node: the node we wish to configure for
726
      - nodes_ip: a mapping of node name to ip
727

728
    The target_node must exist in in nodes_ip, and must be one of the
729
    nodes in the logical ID for each of the DRBD devices encountered
730
    in the disk tree.
731

732
    """
733
    if self.children:
734
      for child in self.children:
735
        child.SetPhysicalID(target_node, nodes_ip)
736

    
737
    if self.logical_id is None and self.physical_id is not None:
738
      return
739
    if self.dev_type in constants.LDS_DRBD:
740
      pnode, snode, port, pminor, sminor, secret = self.logical_id
741
      if target_node not in (pnode, snode):
742
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
743
                                        target_node)
744
      pnode_ip = nodes_ip.get(pnode, None)
745
      snode_ip = nodes_ip.get(snode, None)
746
      if pnode_ip is None or snode_ip is None:
747
        raise errors.ConfigurationError("Can't find primary or secondary node"
748
                                        " for %s" % str(self))
749
      p_data = (pnode_ip, port)
750
      s_data = (snode_ip, port)
751
      if pnode == target_node:
752
        self.physical_id = p_data + s_data + (pminor, secret)
753
      else: # it must be secondary, we tested above
754
        self.physical_id = s_data + p_data + (sminor, secret)
755
    else:
756
      self.physical_id = self.logical_id
757
    return
758

    
759
  def ToDict(self):
760
    """Disk-specific conversion to standard python types.
761

762
    This replaces the children lists of objects with lists of
763
    standard python types.
764

765
    """
766
    bo = super(Disk, self).ToDict()
767

    
768
    for attr in ("children",):
769
      alist = bo.get(attr, None)
770
      if alist:
771
        bo[attr] = self._ContainerToDicts(alist)
772
    return bo
773

    
774
  @classmethod
775
  def FromDict(cls, val):
776
    """Custom function for Disks
777

778
    """
779
    obj = super(Disk, cls).FromDict(val)
780
    if obj.children:
781
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
782
    if obj.logical_id and isinstance(obj.logical_id, list):
783
      obj.logical_id = tuple(obj.logical_id)
784
    if obj.physical_id and isinstance(obj.physical_id, list):
785
      obj.physical_id = tuple(obj.physical_id)
786
    if obj.dev_type in constants.LDS_DRBD:
787
      # we need a tuple of length six here
788
      if len(obj.logical_id) < 6:
789
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
790
    return obj
791

    
792
  def __str__(self):
793
    """Custom str() formatter for disks.
794

795
    """
796
    if self.dev_type == constants.LD_LV:
797
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
798
    elif self.dev_type in constants.LDS_DRBD:
799
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
800
      val = "<DRBD8("
801
      if self.physical_id is None:
802
        phy = "unconfigured"
803
      else:
804
        phy = ("configured as %s:%s %s:%s" %
805
               (self.physical_id[0], self.physical_id[1],
806
                self.physical_id[2], self.physical_id[3]))
807

    
808
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
809
              (node_a, minor_a, node_b, minor_b, port, phy))
810
      if self.children and self.children.count(None) == 0:
811
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
812
      else:
813
        val += "no local storage"
814
    else:
815
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
816
             (self.dev_type, self.logical_id, self.physical_id, self.children))
817
    if self.iv_name is None:
818
      val += ", not visible"
819
    else:
820
      val += ", visible as /dev/%s" % self.iv_name
821
    if isinstance(self.size, int):
822
      val += ", size=%dm)>" % self.size
823
    else:
824
      val += ", size='%s')>" % (self.size,)
825
    return val
826

    
827
  def Verify(self):
828
    """Checks that this disk is correctly configured.
829

830
    """
831
    all_errors = []
832
    if self.mode not in constants.DISK_ACCESS_SET:
833
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
834
    return all_errors
835

    
836
  def UpgradeConfig(self):
837
    """Fill defaults for missing configuration values.
838

839
    """
840
    if self.children:
841
      for child in self.children:
842
        child.UpgradeConfig()
843

    
844
    # FIXME: Make this configurable in Ganeti 2.7
845
    self.params = {}
846
    # add here config upgrade for this disk
847

    
848
  @staticmethod
849
  def ComputeLDParams(disk_template, disk_params):
850
    """Computes Logical Disk parameters from Disk Template parameters.
851

852
    @type disk_template: string
853
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
854
    @type disk_params: dict
855
    @param disk_params: disk template parameters;
856
                        dict(template_name -> parameters
857
    @rtype: list(dict)
858
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
859
      contains the LD parameters of the node. The tree is flattened in-order.
860

861
    """
862
    if disk_template not in constants.DISK_TEMPLATES:
863
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
864

    
865
    assert disk_template in disk_params
866

    
867
    result = list()
868
    dt_params = disk_params[disk_template]
869
    if disk_template == constants.DT_DRBD8:
870
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
871
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
872
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
873
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
874
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
875
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
876
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
877
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
878
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
879
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
880
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
881
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
882
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
883
        }))
884

    
885
      # data LV
886
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
887
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
888
        }))
889

    
890
      # metadata LV
891
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
892
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
893
        }))
894

    
895
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
896
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
897

    
898
    elif disk_template == constants.DT_PLAIN:
899
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
900
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
901
        }))
902

    
903
    elif disk_template == constants.DT_BLOCK:
904
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
905

    
906
    elif disk_template == constants.DT_RBD:
907
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
908
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
909
        }))
910

    
911
    elif disk_template == constants.DT_EXT:
912
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
913

    
914
    return result
915

    
916

    
917
class InstancePolicy(ConfigObject):
918
  """Config object representing instance policy limits dictionary.
919

920

921
  Note that this object is not actually used in the config, it's just
922
  used as a placeholder for a few functions.
923

924
  """
925
  @classmethod
926
  def CheckParameterSyntax(cls, ipolicy, check_std):
927
    """ Check the instance policy for validity.
928

929
    """
930
    for param in constants.ISPECS_PARAMETERS:
931
      InstancePolicy.CheckISpecSyntax(ipolicy, param, check_std)
932
    if constants.IPOLICY_DTS in ipolicy:
933
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
934
    for key in constants.IPOLICY_PARAMETERS:
935
      if key in ipolicy:
936
        InstancePolicy.CheckParameter(key, ipolicy[key])
937
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
938
    if wrong_keys:
939
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
940
                                      utils.CommaJoin(wrong_keys))
941

    
942
  @classmethod
943
  def CheckISpecSyntax(cls, ipolicy, name, check_std):
944
    """Check the instance policy for validity on a given key.
945

946
    We check if the instance policy makes sense for a given key, that is
947
    if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
948

949
    @type ipolicy: dict
950
    @param ipolicy: dictionary with min, max, std specs
951
    @type name: string
952
    @param name: what are the limits for
953
    @type check_std: bool
954
    @param check_std: Whether to check std value or just assume compliance
955
    @raise errors.ConfigureError: when specs for given name are not valid
956

957
    """
958
    min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
959

    
960
    if check_std:
961
      std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
962
      std_msg = std_v
963
    else:
964
      std_v = min_v
965
      std_msg = "-"
966

    
967
    max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
968
    err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
969
           (name,
970
            ipolicy[constants.ISPECS_MIN].get(name, "-"),
971
            ipolicy[constants.ISPECS_MAX].get(name, "-"),
972
            std_msg))
973
    if min_v > std_v or std_v > max_v:
974
      raise errors.ConfigurationError(err)
975

    
976
  @classmethod
977
  def CheckDiskTemplates(cls, disk_templates):
978
    """Checks the disk templates for validity.
979

980
    """
981
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
982
    if wrong:
983
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
984
                                      utils.CommaJoin(wrong))
985

    
986
  @classmethod
987
  def CheckParameter(cls, key, value):
988
    """Checks a parameter.
989

990
    Currently we expect all parameters to be float values.
991

992
    """
993
    try:
994
      float(value)
995
    except (TypeError, ValueError), err:
996
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
997
                                      " '%s', error: %s" % (key, value, err))
998

    
999

    
1000
class Instance(TaggableObject):
1001
  """Config object representing an instance."""
1002
  __slots__ = [
1003
    "name",
1004
    "primary_node",
1005
    "os",
1006
    "hypervisor",
1007
    "hvparams",
1008
    "beparams",
1009
    "osparams",
1010
    "admin_state",
1011
    "nics",
1012
    "disks",
1013
    "disk_template",
1014
    "network_port",
1015
    "serial_no",
1016
    ] + _TIMESTAMPS + _UUID
1017

    
1018
  def _ComputeSecondaryNodes(self):
1019
    """Compute the list of secondary nodes.
1020

1021
    This is a simple wrapper over _ComputeAllNodes.
1022

1023
    """
1024
    all_nodes = set(self._ComputeAllNodes())
1025
    all_nodes.discard(self.primary_node)
1026
    return tuple(all_nodes)
1027

    
1028
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1029
                             "List of names of secondary nodes")
1030

    
1031
  def _ComputeAllNodes(self):
1032
    """Compute the list of all nodes.
1033

1034
    Since the data is already there (in the drbd disks), keeping it as
1035
    a separate normal attribute is redundant and if not properly
1036
    synchronised can cause problems. Thus it's better to compute it
1037
    dynamically.
1038

1039
    """
1040
    def _Helper(nodes, device):
1041
      """Recursively computes nodes given a top device."""
1042
      if device.dev_type in constants.LDS_DRBD:
1043
        nodea, nodeb = device.logical_id[:2]
1044
        nodes.add(nodea)
1045
        nodes.add(nodeb)
1046
      if device.children:
1047
        for child in device.children:
1048
          _Helper(nodes, child)
1049

    
1050
    all_nodes = set()
1051
    all_nodes.add(self.primary_node)
1052
    for device in self.disks:
1053
      _Helper(all_nodes, device)
1054
    return tuple(all_nodes)
1055

    
1056
  all_nodes = property(_ComputeAllNodes, None, None,
1057
                       "List of names of all the nodes of the instance")
1058

    
1059
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1060
    """Provide a mapping of nodes to LVs this instance owns.
1061

1062
    This function figures out what logical volumes should belong on
1063
    which nodes, recursing through a device tree.
1064

1065
    @param lvmap: optional dictionary to receive the
1066
        'node' : ['lv', ...] data.
1067

1068
    @return: None if lvmap arg is given, otherwise, a dictionary of
1069
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1070
        volumeN is of the form "vg_name/lv_name", compatible with
1071
        GetVolumeList()
1072

1073
    """
1074
    if node is None:
1075
      node = self.primary_node
1076

    
1077
    if lvmap is None:
1078
      lvmap = {
1079
        node: [],
1080
        }
1081
      ret = lvmap
1082
    else:
1083
      if not node in lvmap:
1084
        lvmap[node] = []
1085
      ret = None
1086

    
1087
    if not devs:
1088
      devs = self.disks
1089

    
1090
    for dev in devs:
1091
      if dev.dev_type == constants.LD_LV:
1092
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1093

    
1094
      elif dev.dev_type in constants.LDS_DRBD:
1095
        if dev.children:
1096
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1097
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1098

    
1099
      elif dev.children:
1100
        self.MapLVsByNode(lvmap, dev.children, node)
1101

    
1102
    return ret
1103

    
1104
  def FindDisk(self, idx):
1105
    """Find a disk given having a specified index.
1106

1107
    This is just a wrapper that does validation of the index.
1108

1109
    @type idx: int
1110
    @param idx: the disk index
1111
    @rtype: L{Disk}
1112
    @return: the corresponding disk
1113
    @raise errors.OpPrereqError: when the given index is not valid
1114

1115
    """
1116
    try:
1117
      idx = int(idx)
1118
      return self.disks[idx]
1119
    except (TypeError, ValueError), err:
1120
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1121
                                 errors.ECODE_INVAL)
1122
    except IndexError:
1123
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1124
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1125
                                 errors.ECODE_INVAL)
1126

    
1127
  def ToDict(self):
1128
    """Instance-specific conversion to standard python types.
1129

1130
    This replaces the children lists of objects with lists of standard
1131
    python types.
1132

1133
    """
1134
    bo = super(Instance, self).ToDict()
1135

    
1136
    for attr in "nics", "disks":
1137
      alist = bo.get(attr, None)
1138
      if alist:
1139
        nlist = self._ContainerToDicts(alist)
1140
      else:
1141
        nlist = []
1142
      bo[attr] = nlist
1143
    return bo
1144

    
1145
  @classmethod
1146
  def FromDict(cls, val):
1147
    """Custom function for instances.
1148

1149
    """
1150
    if "admin_state" not in val:
1151
      if val.get("admin_up", False):
1152
        val["admin_state"] = constants.ADMINST_UP
1153
      else:
1154
        val["admin_state"] = constants.ADMINST_DOWN
1155
    if "admin_up" in val:
1156
      del val["admin_up"]
1157
    obj = super(Instance, cls).FromDict(val)
1158
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
1159
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
1160
    return obj
1161

    
1162
  def UpgradeConfig(self):
1163
    """Fill defaults for missing configuration values.
1164

1165
    """
1166
    for nic in self.nics:
1167
      nic.UpgradeConfig()
1168
    for disk in self.disks:
1169
      disk.UpgradeConfig()
1170
    if self.hvparams:
1171
      for key in constants.HVC_GLOBALS:
1172
        try:
1173
          del self.hvparams[key]
1174
        except KeyError:
1175
          pass
1176
    if self.osparams is None:
1177
      self.osparams = {}
1178
    UpgradeBeParams(self.beparams)
1179

    
1180

    
1181
class OS(ConfigObject):
1182
  """Config object representing an operating system.
1183

1184
  @type supported_parameters: list
1185
  @ivar supported_parameters: a list of tuples, name and description,
1186
      containing the supported parameters by this OS
1187

1188
  @type VARIANT_DELIM: string
1189
  @cvar VARIANT_DELIM: the variant delimiter
1190

1191
  """
1192
  __slots__ = [
1193
    "name",
1194
    "path",
1195
    "api_versions",
1196
    "create_script",
1197
    "export_script",
1198
    "import_script",
1199
    "rename_script",
1200
    "verify_script",
1201
    "supported_variants",
1202
    "supported_parameters",
1203
    ]
1204

    
1205
  VARIANT_DELIM = "+"
1206

    
1207
  @classmethod
1208
  def SplitNameVariant(cls, name):
1209
    """Splits the name into the proper name and variant.
1210

1211
    @param name: the OS (unprocessed) name
1212
    @rtype: list
1213
    @return: a list of two elements; if the original name didn't
1214
        contain a variant, it's returned as an empty string
1215

1216
    """
1217
    nv = name.split(cls.VARIANT_DELIM, 1)
1218
    if len(nv) == 1:
1219
      nv.append("")
1220
    return nv
1221

    
1222
  @classmethod
1223
  def GetName(cls, name):
1224
    """Returns the proper name of the os (without the variant).
1225

1226
    @param name: the OS (unprocessed) name
1227

1228
    """
1229
    return cls.SplitNameVariant(name)[0]
1230

    
1231
  @classmethod
1232
  def GetVariant(cls, name):
1233
    """Returns the variant the os (without the base name).
1234

1235
    @param name: the OS (unprocessed) name
1236

1237
    """
1238
    return cls.SplitNameVariant(name)[1]
1239

    
1240

    
1241
class ExtStorage(ConfigObject):
1242
  """Config object representing an External Storage Provider.
1243

1244
  """
1245
  __slots__ = [
1246
    "name",
1247
    "path",
1248
    "create_script",
1249
    "remove_script",
1250
    "grow_script",
1251
    "attach_script",
1252
    "detach_script",
1253
    "setinfo_script",
1254
    "verify_script",
1255
    "supported_parameters",
1256
    ]
1257

    
1258

    
1259
class NodeHvState(ConfigObject):
1260
  """Hypvervisor state on a node.
1261

1262
  @ivar mem_total: Total amount of memory
1263
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1264
    available)
1265
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1266
    rounding
1267
  @ivar mem_inst: Memory used by instances living on node
1268
  @ivar cpu_total: Total node CPU core count
1269
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1270

1271
  """
1272
  __slots__ = [
1273
    "mem_total",
1274
    "mem_node",
1275
    "mem_hv",
1276
    "mem_inst",
1277
    "cpu_total",
1278
    "cpu_node",
1279
    ] + _TIMESTAMPS
1280

    
1281

    
1282
class NodeDiskState(ConfigObject):
1283
  """Disk state on a node.
1284

1285
  """
1286
  __slots__ = [
1287
    "total",
1288
    "reserved",
1289
    "overhead",
1290
    ] + _TIMESTAMPS
1291

    
1292

    
1293
class Node(TaggableObject):
1294
  """Config object representing a node.
1295

1296
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1297
  @ivar hv_state_static: Hypervisor state overriden by user
1298
  @ivar disk_state: Disk state (e.g. free space)
1299
  @ivar disk_state_static: Disk state overriden by user
1300

1301
  """
1302
  __slots__ = [
1303
    "name",
1304
    "primary_ip",
1305
    "secondary_ip",
1306
    "serial_no",
1307
    "master_candidate",
1308
    "offline",
1309
    "drained",
1310
    "group",
1311
    "master_capable",
1312
    "vm_capable",
1313
    "ndparams",
1314
    "powered",
1315
    "hv_state",
1316
    "hv_state_static",
1317
    "disk_state",
1318
    "disk_state_static",
1319
    ] + _TIMESTAMPS + _UUID
1320

    
1321
  def UpgradeConfig(self):
1322
    """Fill defaults for missing configuration values.
1323

1324
    """
1325
    # pylint: disable=E0203
1326
    # because these are "defined" via slots, not manually
1327
    if self.master_capable is None:
1328
      self.master_capable = True
1329

    
1330
    if self.vm_capable is None:
1331
      self.vm_capable = True
1332

    
1333
    if self.ndparams is None:
1334
      self.ndparams = {}
1335

    
1336
    if self.powered is None:
1337
      self.powered = True
1338

    
1339
  def ToDict(self):
1340
    """Custom function for serializing.
1341

1342
    """
1343
    data = super(Node, self).ToDict()
1344

    
1345
    hv_state = data.get("hv_state", None)
1346
    if hv_state is not None:
1347
      data["hv_state"] = self._ContainerToDicts(hv_state)
1348

    
1349
    disk_state = data.get("disk_state", None)
1350
    if disk_state is not None:
1351
      data["disk_state"] = \
1352
        dict((key, self._ContainerToDicts(value))
1353
             for (key, value) in disk_state.items())
1354

    
1355
    return data
1356

    
1357
  @classmethod
1358
  def FromDict(cls, val):
1359
    """Custom function for deserializing.
1360

1361
    """
1362
    obj = super(Node, cls).FromDict(val)
1363

    
1364
    if obj.hv_state is not None:
1365
      obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1366

    
1367
    if obj.disk_state is not None:
1368
      obj.disk_state = \
1369
        dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
1370
             for (key, value) in obj.disk_state.items())
1371

    
1372
    return obj
1373

    
1374

    
1375
class NodeGroup(TaggableObject):
1376
  """Config object representing a node group."""
1377
  __slots__ = [
1378
    "name",
1379
    "members",
1380
    "ndparams",
1381
    "diskparams",
1382
    "ipolicy",
1383
    "serial_no",
1384
    "hv_state_static",
1385
    "disk_state_static",
1386
    "alloc_policy",
1387
    "networks",
1388
    ] + _TIMESTAMPS + _UUID
1389

    
1390
  def ToDict(self):
1391
    """Custom function for nodegroup.
1392

1393
    This discards the members object, which gets recalculated and is only kept
1394
    in memory.
1395

1396
    """
1397
    mydict = super(NodeGroup, self).ToDict()
1398
    del mydict["members"]
1399
    return mydict
1400

    
1401
  @classmethod
1402
  def FromDict(cls, val):
1403
    """Custom function for nodegroup.
1404

1405
    The members slot is initialized to an empty list, upon deserialization.
1406

1407
    """
1408
    obj = super(NodeGroup, cls).FromDict(val)
1409
    obj.members = []
1410
    return obj
1411

    
1412
  def UpgradeConfig(self):
1413
    """Fill defaults for missing configuration values.
1414

1415
    """
1416
    if self.ndparams is None:
1417
      self.ndparams = {}
1418

    
1419
    if self.serial_no is None:
1420
      self.serial_no = 1
1421

    
1422
    if self.alloc_policy is None:
1423
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1424

    
1425
    # We only update mtime, and not ctime, since we would not be able
1426
    # to provide a correct value for creation time.
1427
    if self.mtime is None:
1428
      self.mtime = time.time()
1429

    
1430
    if self.diskparams is None:
1431
      self.diskparams = {}
1432
    if self.ipolicy is None:
1433
      self.ipolicy = MakeEmptyIPolicy()
1434

    
1435
    if self.networks is None:
1436
      self.networks = {}
1437

    
1438
  def FillND(self, node):
1439
    """Return filled out ndparams for L{objects.Node}
1440

1441
    @type node: L{objects.Node}
1442
    @param node: A Node object to fill
1443
    @return a copy of the node's ndparams with defaults filled
1444

1445
    """
1446
    return self.SimpleFillND(node.ndparams)
1447

    
1448
  def SimpleFillND(self, ndparams):
1449
    """Fill a given ndparams dict with defaults.
1450

1451
    @type ndparams: dict
1452
    @param ndparams: the dict to fill
1453
    @rtype: dict
1454
    @return: a copy of the passed in ndparams with missing keys filled
1455
        from the node group defaults
1456

1457
    """
1458
    return FillDict(self.ndparams, ndparams)
1459

    
1460

    
1461
class Cluster(TaggableObject):
1462
  """Config object representing the cluster."""
1463
  __slots__ = [
1464
    "serial_no",
1465
    "rsahostkeypub",
1466
    "highest_used_port",
1467
    "tcpudp_port_pool",
1468
    "mac_prefix",
1469
    "volume_group_name",
1470
    "reserved_lvs",
1471
    "drbd_usermode_helper",
1472
    "default_bridge",
1473
    "default_hypervisor",
1474
    "master_node",
1475
    "master_ip",
1476
    "master_netdev",
1477
    "master_netmask",
1478
    "use_external_mip_script",
1479
    "cluster_name",
1480
    "file_storage_dir",
1481
    "shared_file_storage_dir",
1482
    "enabled_hypervisors",
1483
    "hvparams",
1484
    "ipolicy",
1485
    "os_hvp",
1486
    "beparams",
1487
    "osparams",
1488
    "nicparams",
1489
    "ndparams",
1490
    "diskparams",
1491
    "candidate_pool_size",
1492
    "modify_etc_hosts",
1493
    "modify_ssh_setup",
1494
    "maintain_node_health",
1495
    "uid_pool",
1496
    "default_iallocator",
1497
    "hidden_os",
1498
    "blacklisted_os",
1499
    "primary_ip_family",
1500
    "prealloc_wipe_disks",
1501
    "hv_state_static",
1502
    "disk_state_static",
1503
    ] + _TIMESTAMPS + _UUID
1504

    
1505
  def UpgradeConfig(self):
1506
    """Fill defaults for missing configuration values.
1507

1508
    """
1509
    # pylint: disable=E0203
1510
    # because these are "defined" via slots, not manually
1511
    if self.hvparams is None:
1512
      self.hvparams = constants.HVC_DEFAULTS
1513
    else:
1514
      for hypervisor in self.hvparams:
1515
        self.hvparams[hypervisor] = FillDict(
1516
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1517

    
1518
    if self.os_hvp is None:
1519
      self.os_hvp = {}
1520

    
1521
    # osparams added before 2.2
1522
    if self.osparams is None:
1523
      self.osparams = {}
1524

    
1525
    self.ndparams = UpgradeNDParams(self.ndparams)
1526

    
1527
    self.beparams = UpgradeGroupedParams(self.beparams,
1528
                                         constants.BEC_DEFAULTS)
1529
    for beparams_group in self.beparams:
1530
      UpgradeBeParams(self.beparams[beparams_group])
1531

    
1532
    migrate_default_bridge = not self.nicparams
1533
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1534
                                          constants.NICC_DEFAULTS)
1535
    if migrate_default_bridge:
1536
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1537
        self.default_bridge
1538

    
1539
    if self.modify_etc_hosts is None:
1540
      self.modify_etc_hosts = True
1541

    
1542
    if self.modify_ssh_setup is None:
1543
      self.modify_ssh_setup = True
1544

    
1545
    # default_bridge is no longer used in 2.1. The slot is left there to
1546
    # support auto-upgrading. It can be removed once we decide to deprecate
1547
    # upgrading straight from 2.0.
1548
    if self.default_bridge is not None:
1549
      self.default_bridge = None
1550

    
1551
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1552
    # code can be removed once upgrading straight from 2.0 is deprecated.
1553
    if self.default_hypervisor is not None:
1554
      self.enabled_hypervisors = ([self.default_hypervisor] +
1555
                                  [hvname for hvname in self.enabled_hypervisors
1556
                                   if hvname != self.default_hypervisor])
1557
      self.default_hypervisor = None
1558

    
1559
    # maintain_node_health added after 2.1.1
1560
    if self.maintain_node_health is None:
1561
      self.maintain_node_health = False
1562

    
1563
    if self.uid_pool is None:
1564
      self.uid_pool = []
1565

    
1566
    if self.default_iallocator is None:
1567
      self.default_iallocator = ""
1568

    
1569
    # reserved_lvs added before 2.2
1570
    if self.reserved_lvs is None:
1571
      self.reserved_lvs = []
1572

    
1573
    # hidden and blacklisted operating systems added before 2.2.1
1574
    if self.hidden_os is None:
1575
      self.hidden_os = []
1576

    
1577
    if self.blacklisted_os is None:
1578
      self.blacklisted_os = []
1579

    
1580
    # primary_ip_family added before 2.3
1581
    if self.primary_ip_family is None:
1582
      self.primary_ip_family = AF_INET
1583

    
1584
    if self.master_netmask is None:
1585
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1586
      self.master_netmask = ipcls.iplen
1587

    
1588
    if self.prealloc_wipe_disks is None:
1589
      self.prealloc_wipe_disks = False
1590

    
1591
    # shared_file_storage_dir added before 2.5
1592
    if self.shared_file_storage_dir is None:
1593
      self.shared_file_storage_dir = ""
1594

    
1595
    if self.use_external_mip_script is None:
1596
      self.use_external_mip_script = False
1597

    
1598
    if self.diskparams:
1599
      self.diskparams = UpgradeDiskParams(self.diskparams)
1600
    else:
1601
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1602

    
1603
    # instance policy added before 2.6
1604
    if self.ipolicy is None:
1605
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1606
    else:
1607
      # we can either make sure to upgrade the ipolicy always, or only
1608
      # do it in some corner cases (e.g. missing keys); note that this
1609
      # will break any removal of keys from the ipolicy dict
1610
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1611

    
1612
  @property
1613
  def primary_hypervisor(self):
1614
    """The first hypervisor is the primary.
1615

1616
    Useful, for example, for L{Node}'s hv/disk state.
1617

1618
    """
1619
    return self.enabled_hypervisors[0]
1620

    
1621
  def ToDict(self):
1622
    """Custom function for cluster.
1623

1624
    """
1625
    mydict = super(Cluster, self).ToDict()
1626
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1627
    return mydict
1628

    
1629
  @classmethod
1630
  def FromDict(cls, val):
1631
    """Custom function for cluster.
1632

1633
    """
1634
    obj = super(Cluster, cls).FromDict(val)
1635
    if not isinstance(obj.tcpudp_port_pool, set):
1636
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1637
    return obj
1638

    
1639
  def SimpleFillDP(self, diskparams):
1640
    """Fill a given diskparams dict with cluster defaults.
1641

1642
    @param diskparams: The diskparams
1643
    @return: The defaults dict
1644

1645
    """
1646
    return FillDiskParams(self.diskparams, diskparams)
1647

    
1648
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1649
    """Get the default hypervisor parameters for the cluster.
1650

1651
    @param hypervisor: the hypervisor name
1652
    @param os_name: if specified, we'll also update the defaults for this OS
1653
    @param skip_keys: if passed, list of keys not to use
1654
    @return: the defaults dict
1655

1656
    """
1657
    if skip_keys is None:
1658
      skip_keys = []
1659

    
1660
    fill_stack = [self.hvparams.get(hypervisor, {})]
1661
    if os_name is not None:
1662
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1663
      fill_stack.append(os_hvp)
1664

    
1665
    ret_dict = {}
1666
    for o_dict in fill_stack:
1667
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1668

    
1669
    return ret_dict
1670

    
1671
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1672
    """Fill a given hvparams dict with cluster defaults.
1673

1674
    @type hv_name: string
1675
    @param hv_name: the hypervisor to use
1676
    @type os_name: string
1677
    @param os_name: the OS to use for overriding the hypervisor defaults
1678
    @type skip_globals: boolean
1679
    @param skip_globals: if True, the global hypervisor parameters will
1680
        not be filled
1681
    @rtype: dict
1682
    @return: a copy of the given hvparams with missing keys filled from
1683
        the cluster defaults
1684

1685
    """
1686
    if skip_globals:
1687
      skip_keys = constants.HVC_GLOBALS
1688
    else:
1689
      skip_keys = []
1690

    
1691
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1692
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1693

    
1694
  def FillHV(self, instance, skip_globals=False):
1695
    """Fill an instance's hvparams dict with cluster defaults.
1696

1697
    @type instance: L{objects.Instance}
1698
    @param instance: the instance parameter to fill
1699
    @type skip_globals: boolean
1700
    @param skip_globals: if True, the global hypervisor parameters will
1701
        not be filled
1702
    @rtype: dict
1703
    @return: a copy of the instance's hvparams with missing keys filled from
1704
        the cluster defaults
1705

1706
    """
1707
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1708
                             instance.hvparams, skip_globals)
1709

    
1710
  def SimpleFillBE(self, beparams):
1711
    """Fill a given beparams dict with cluster defaults.
1712

1713
    @type beparams: dict
1714
    @param beparams: the dict to fill
1715
    @rtype: dict
1716
    @return: a copy of the passed in beparams with missing keys filled
1717
        from the cluster defaults
1718

1719
    """
1720
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1721

    
1722
  def FillBE(self, instance):
1723
    """Fill an instance's beparams dict with cluster defaults.
1724

1725
    @type instance: L{objects.Instance}
1726
    @param instance: the instance parameter to fill
1727
    @rtype: dict
1728
    @return: a copy of the instance's beparams with missing keys filled from
1729
        the cluster defaults
1730

1731
    """
1732
    return self.SimpleFillBE(instance.beparams)
1733

    
1734
  def SimpleFillNIC(self, nicparams):
1735
    """Fill a given nicparams dict with cluster defaults.
1736

1737
    @type nicparams: dict
1738
    @param nicparams: the dict to fill
1739
    @rtype: dict
1740
    @return: a copy of the passed in nicparams with missing keys filled
1741
        from the cluster defaults
1742

1743
    """
1744
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1745

    
1746
  def SimpleFillOS(self, os_name, os_params):
1747
    """Fill an instance's osparams dict with cluster defaults.
1748

1749
    @type os_name: string
1750
    @param os_name: the OS name to use
1751
    @type os_params: dict
1752
    @param os_params: the dict to fill with default values
1753
    @rtype: dict
1754
    @return: a copy of the instance's osparams with missing keys filled from
1755
        the cluster defaults
1756

1757
    """
1758
    name_only = os_name.split("+", 1)[0]
1759
    # base OS
1760
    result = self.osparams.get(name_only, {})
1761
    # OS with variant
1762
    result = FillDict(result, self.osparams.get(os_name, {}))
1763
    # specified params
1764
    return FillDict(result, os_params)
1765

    
1766
  @staticmethod
1767
  def SimpleFillHvState(hv_state):
1768
    """Fill an hv_state sub dict with cluster defaults.
1769

1770
    """
1771
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1772

    
1773
  @staticmethod
1774
  def SimpleFillDiskState(disk_state):
1775
    """Fill an disk_state sub dict with cluster defaults.
1776

1777
    """
1778
    return FillDict(constants.DS_DEFAULTS, disk_state)
1779

    
1780
  def FillND(self, node, nodegroup):
1781
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1782

1783
    @type node: L{objects.Node}
1784
    @param node: A Node object to fill
1785
    @type nodegroup: L{objects.NodeGroup}
1786
    @param nodegroup: A Node object to fill
1787
    @return a copy of the node's ndparams with defaults filled
1788

1789
    """
1790
    return self.SimpleFillND(nodegroup.FillND(node))
1791

    
1792
  def SimpleFillND(self, ndparams):
1793
    """Fill a given ndparams dict with defaults.
1794

1795
    @type ndparams: dict
1796
    @param ndparams: the dict to fill
1797
    @rtype: dict
1798
    @return: a copy of the passed in ndparams with missing keys filled
1799
        from the cluster defaults
1800

1801
    """
1802
    return FillDict(self.ndparams, ndparams)
1803

    
1804
  def SimpleFillIPolicy(self, ipolicy):
1805
    """ Fill instance policy dict with defaults.
1806

1807
    @type ipolicy: dict
1808
    @param ipolicy: the dict to fill
1809
    @rtype: dict
1810
    @return: a copy of passed ipolicy with missing keys filled from
1811
      the cluster defaults
1812

1813
    """
1814
    return FillIPolicy(self.ipolicy, ipolicy)
1815

    
1816

    
1817
class BlockDevStatus(ConfigObject):
1818
  """Config object representing the status of a block device."""
1819
  __slots__ = [
1820
    "dev_path",
1821
    "major",
1822
    "minor",
1823
    "sync_percent",
1824
    "estimated_time",
1825
    "is_degraded",
1826
    "ldisk_status",
1827
    ]
1828

    
1829

    
1830
class ImportExportStatus(ConfigObject):
1831
  """Config object representing the status of an import or export."""
1832
  __slots__ = [
1833
    "recent_output",
1834
    "listen_port",
1835
    "connected",
1836
    "progress_mbytes",
1837
    "progress_throughput",
1838
    "progress_eta",
1839
    "progress_percent",
1840
    "exit_status",
1841
    "error_message",
1842
    ] + _TIMESTAMPS
1843

    
1844

    
1845
class ImportExportOptions(ConfigObject):
1846
  """Options for import/export daemon
1847

1848
  @ivar key_name: X509 key name (None for cluster certificate)
1849
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1850
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1851
  @ivar magic: Used to ensure the connection goes to the right disk
1852
  @ivar ipv6: Whether to use IPv6
1853
  @ivar connect_timeout: Number of seconds for establishing connection
1854

1855
  """
1856
  __slots__ = [
1857
    "key_name",
1858
    "ca_pem",
1859
    "compress",
1860
    "magic",
1861
    "ipv6",
1862
    "connect_timeout",
1863
    ]
1864

    
1865

    
1866
class ConfdRequest(ConfigObject):
1867
  """Object holding a confd request.
1868

1869
  @ivar protocol: confd protocol version
1870
  @ivar type: confd query type
1871
  @ivar query: query request
1872
  @ivar rsalt: requested reply salt
1873

1874
  """
1875
  __slots__ = [
1876
    "protocol",
1877
    "type",
1878
    "query",
1879
    "rsalt",
1880
    ]
1881

    
1882

    
1883
class ConfdReply(ConfigObject):
1884
  """Object holding a confd reply.
1885

1886
  @ivar protocol: confd protocol version
1887
  @ivar status: reply status code (ok, error)
1888
  @ivar answer: confd query reply
1889
  @ivar serial: configuration serial number
1890

1891
  """
1892
  __slots__ = [
1893
    "protocol",
1894
    "status",
1895
    "answer",
1896
    "serial",
1897
    ]
1898

    
1899

    
1900
class QueryFieldDefinition(ConfigObject):
1901
  """Object holding a query field definition.
1902

1903
  @ivar name: Field name
1904
  @ivar title: Human-readable title
1905
  @ivar kind: Field type
1906
  @ivar doc: Human-readable description
1907

1908
  """
1909
  __slots__ = [
1910
    "name",
1911
    "title",
1912
    "kind",
1913
    "doc",
1914
    ]
1915

    
1916

    
1917
class _QueryResponseBase(ConfigObject):
1918
  __slots__ = [
1919
    "fields",
1920
    ]
1921

    
1922
  def ToDict(self):
1923
    """Custom function for serializing.
1924

1925
    """
1926
    mydict = super(_QueryResponseBase, self).ToDict()
1927
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1928
    return mydict
1929

    
1930
  @classmethod
1931
  def FromDict(cls, val):
1932
    """Custom function for de-serializing.
1933

1934
    """
1935
    obj = super(_QueryResponseBase, cls).FromDict(val)
1936
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1937
    return obj
1938

    
1939

    
1940
class QueryResponse(_QueryResponseBase):
1941
  """Object holding the response to a query.
1942

1943
  @ivar fields: List of L{QueryFieldDefinition} objects
1944
  @ivar data: Requested data
1945

1946
  """
1947
  __slots__ = [
1948
    "data",
1949
    ]
1950

    
1951

    
1952
class QueryFieldsRequest(ConfigObject):
1953
  """Object holding a request for querying available fields.
1954

1955
  """
1956
  __slots__ = [
1957
    "what",
1958
    "fields",
1959
    ]
1960

    
1961

    
1962
class QueryFieldsResponse(_QueryResponseBase):
1963
  """Object holding the response to a query for fields.
1964

1965
  @ivar fields: List of L{QueryFieldDefinition} objects
1966

1967
  """
1968
  __slots__ = []
1969

    
1970

    
1971
class MigrationStatus(ConfigObject):
1972
  """Object holding the status of a migration.
1973

1974
  """
1975
  __slots__ = [
1976
    "status",
1977
    "transferred_ram",
1978
    "total_ram",
1979
    ]
1980

    
1981

    
1982
class InstanceConsole(ConfigObject):
1983
  """Object describing how to access the console of an instance.
1984

1985
  """
1986
  __slots__ = [
1987
    "instance",
1988
    "kind",
1989
    "message",
1990
    "host",
1991
    "port",
1992
    "user",
1993
    "command",
1994
    "display",
1995
    ]
1996

    
1997
  def Validate(self):
1998
    """Validates contents of this object.
1999

2000
    """
2001
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2002
    assert self.instance, "Missing instance name"
2003
    assert self.message or self.kind in [constants.CONS_SSH,
2004
                                         constants.CONS_SPICE,
2005
                                         constants.CONS_VNC]
2006
    assert self.host or self.kind == constants.CONS_MESSAGE
2007
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2008
                                      constants.CONS_SSH]
2009
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2010
                                      constants.CONS_SPICE,
2011
                                      constants.CONS_VNC]
2012
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2013
                                         constants.CONS_SPICE,
2014
                                         constants.CONS_VNC]
2015
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2016
                                         constants.CONS_SPICE,
2017
                                         constants.CONS_SSH]
2018
    return True
2019

    
2020

    
2021
class Network(TaggableObject):
2022
  """Object representing a network definition for ganeti.
2023

2024
  """
2025
  __slots__ = [
2026
    "name",
2027
    "serial_no",
2028
    "network_type",
2029
    "mac_prefix",
2030
    "family",
2031
    "network",
2032
    "network6",
2033
    "gateway",
2034
    "gateway6",
2035
    "size",
2036
    "reservations",
2037
    "ext_reservations",
2038
    ] + _TIMESTAMPS + _UUID
2039

    
2040

    
2041
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2042
  """Simple wrapper over ConfigParse that allows serialization.
2043

2044
  This class is basically ConfigParser.SafeConfigParser with two
2045
  additional methods that allow it to serialize/unserialize to/from a
2046
  buffer.
2047

2048
  """
2049
  def Dumps(self):
2050
    """Dump this instance and return the string representation."""
2051
    buf = StringIO()
2052
    self.write(buf)
2053
    return buf.getvalue()
2054

    
2055
  @classmethod
2056
  def Loads(cls, data):
2057
    """Load data from a string."""
2058
    buf = StringIO(data)
2059
    cfp = cls()
2060
    cfp.readfp(buf)
2061
    return cfp
2062

    
2063

    
2064
class LvmPvInfo(ConfigObject):
2065
  """Information about an LVM physical volume (PV).
2066

2067
  @type name: string
2068
  @ivar name: name of the PV
2069
  @type vg_name: string
2070
  @ivar vg_name: name of the volume group containing the PV
2071
  @type size: float
2072
  @ivar size: size of the PV in MiB
2073
  @type free: float
2074
  @ivar free: free space in the PV, in MiB
2075
  @type attributes: string
2076
  @ivar attributes: PV attributes
2077
  """
2078
  __slots__ = [
2079
    "name",
2080
    "vg_name",
2081
    "size",
2082
    "free",
2083
    "attributes",
2084
    ]
2085

    
2086
  def IsEmpty(self):
2087
    """Is this PV empty?
2088

2089
    """
2090
    return self.size <= (self.free + 1)
2091

    
2092
  def IsAllocatable(self):
2093
    """Is this PV allocatable?
2094

2095
    """
2096
    return ("a" in self.attributes)