Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 5dbee5ea

History | View | Annotate | Download (57.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import time
42
from cStringIO import StringIO
43

    
44
from ganeti import errors
45
from ganeti import constants
46
from ganeti import netutils
47
from ganeti import utils
48

    
49
from socket import AF_INET
50

    
51

    
52
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
53
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
54

    
55
_TIMESTAMPS = ["ctime", "mtime"]
56
_UUID = ["uuid"]
57

    
58

    
59
def FillDict(defaults_dict, custom_dict, skip_keys=None):
60
  """Basic function to apply settings on top a default dict.
61

62
  @type defaults_dict: dict
63
  @param defaults_dict: dictionary holding the default values
64
  @type custom_dict: dict
65
  @param custom_dict: dictionary holding customized value
66
  @type skip_keys: list
67
  @param skip_keys: which keys not to fill
68
  @rtype: dict
69
  @return: dict with the 'full' values
70

71
  """
72
  ret_dict = copy.deepcopy(defaults_dict)
73
  ret_dict.update(custom_dict)
74
  if skip_keys:
75
    for k in skip_keys:
76
      try:
77
        del ret_dict[k]
78
      except KeyError:
79
        pass
80
  return ret_dict
81

    
82

    
83
def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
84
  """Fills an instance policy with defaults.
85

86
  """
87
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
88
  ret_dict = {}
89
  for key in constants.IPOLICY_ISPECS:
90
    ret_dict[key] = FillDict(default_ipolicy[key],
91
                             custom_ipolicy.get(key, {}),
92
                             skip_keys=skip_keys)
93
  # list items
94
  for key in [constants.IPOLICY_DTS]:
95
    ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
96
  # other items which we know we can directly copy (immutables)
97
  for key in constants.IPOLICY_PARAMETERS:
98
    ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
99

    
100
  return ret_dict
101

    
102

    
103
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
104
  """Fills the disk parameter defaults.
105

106
  @see: L{FillDict} for parameters and return value
107

108
  """
109
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
110

    
111
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
112
                             skip_keys=skip_keys))
113
              for dt in constants.DISK_TEMPLATES)
114

    
115

    
116
def UpgradeGroupedParams(target, defaults):
117
  """Update all groups for the target parameter.
118

119
  @type target: dict of dicts
120
  @param target: {group: {parameter: value}}
121
  @type defaults: dict
122
  @param defaults: default parameter values
123

124
  """
125
  if target is None:
126
    target = {constants.PP_DEFAULT: defaults}
127
  else:
128
    for group in target:
129
      target[group] = FillDict(defaults, target[group])
130
  return target
131

    
132

    
133
def UpgradeBeParams(target):
134
  """Update the be parameters dict to the new format.
135

136
  @type target: dict
137
  @param target: "be" parameters dict
138

139
  """
140
  if constants.BE_MEMORY in target:
141
    memory = target[constants.BE_MEMORY]
142
    target[constants.BE_MAXMEM] = memory
143
    target[constants.BE_MINMEM] = memory
144
    del target[constants.BE_MEMORY]
145

    
146

    
147
def UpgradeDiskParams(diskparams):
148
  """Upgrade the disk parameters.
149

150
  @type diskparams: dict
151
  @param diskparams: disk parameters to upgrade
152
  @rtype: dict
153
  @return: the upgraded disk parameters dict
154

155
  """
156
  if not diskparams:
157
    result = {}
158
  else:
159
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
160

    
161
  return result
162

    
163

    
164
def UpgradeNDParams(ndparams):
165
  """Upgrade ndparams structure.
166

167
  @type ndparams: dict
168
  @param ndparams: disk parameters to upgrade
169
  @rtype: dict
170
  @return: the upgraded node parameters dict
171

172
  """
173
  if ndparams is None:
174
    ndparams = {}
175

    
176
  return FillDict(constants.NDC_DEFAULTS, ndparams)
177

    
178

    
179
def MakeEmptyIPolicy():
180
  """Create empty IPolicy dictionary.
181

182
  """
183
  return dict([
184
    (constants.ISPECS_MIN, {}),
185
    (constants.ISPECS_MAX, {}),
186
    (constants.ISPECS_STD, {}),
187
    ])
188

    
189

    
190
class ConfigObject(object):
191
  """A generic config object.
192

193
  It has the following properties:
194

195
    - provides somewhat safe recursive unpickling and pickling for its classes
196
    - unset attributes which are defined in slots are always returned
197
      as None instead of raising an error
198

199
  Classes derived from this must always declare __slots__ (we use many
200
  config objects and the memory reduction is useful)
201

202
  """
203
  __slots__ = []
204

    
205
  def __init__(self, **kwargs):
206
    for k, v in kwargs.iteritems():
207
      setattr(self, k, v)
208

    
209
  def __getattr__(self, name):
210
    if name not in self._all_slots():
211
      raise AttributeError("Invalid object attribute %s.%s" %
212
                           (type(self).__name__, name))
213
    return None
214

    
215
  def __setstate__(self, state):
216
    slots = self._all_slots()
217
    for name in state:
218
      if name in slots:
219
        setattr(self, name, state[name])
220

    
221
  @classmethod
222
  def _all_slots(cls):
223
    """Compute the list of all declared slots for a class.
224

225
    """
226
    slots = []
227
    for parent in cls.__mro__:
228
      slots.extend(getattr(parent, "__slots__", []))
229
    return slots
230

    
231
  #: Public getter for the defined slots
232
  GetAllSlots = _all_slots
233

    
234
  def ToDict(self):
235
    """Convert to a dict holding only standard python types.
236

237
    The generic routine just dumps all of this object's attributes in
238
    a dict. It does not work if the class has children who are
239
    ConfigObjects themselves (e.g. the nics list in an Instance), in
240
    which case the object should subclass the function in order to
241
    make sure all objects returned are only standard python types.
242

243
    """
244
    result = {}
245
    for name in self._all_slots():
246
      value = getattr(self, name, None)
247
      if value is not None:
248
        result[name] = value
249
    return result
250

    
251
  __getstate__ = ToDict
252

    
253
  @classmethod
254
  def FromDict(cls, val):
255
    """Create an object from a dictionary.
256

257
    This generic routine takes a dict, instantiates a new instance of
258
    the given class, and sets attributes based on the dict content.
259

260
    As for `ToDict`, this does not work if the class has children
261
    who are ConfigObjects themselves (e.g. the nics list in an
262
    Instance), in which case the object should subclass the function
263
    and alter the objects.
264

265
    """
266
    if not isinstance(val, dict):
267
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
268
                                      " expected dict, got %s" % type(val))
269
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
270
    obj = cls(**val_str) # pylint: disable=W0142
271
    return obj
272

    
273
  @staticmethod
274
  def _ContainerToDicts(container):
275
    """Convert the elements of a container to standard python types.
276

277
    This method converts a container with elements derived from
278
    ConfigData to standard python types. If the container is a dict,
279
    we don't touch the keys, only the values.
280

281
    """
282
    if isinstance(container, dict):
283
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
284
    elif isinstance(container, (list, tuple, set, frozenset)):
285
      ret = [elem.ToDict() for elem in container]
286
    else:
287
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
288
                      type(container))
289
    return ret
290

    
291
  @staticmethod
292
  def _ContainerFromDicts(source, c_type, e_type):
293
    """Convert a container from standard python types.
294

295
    This method converts a container with standard python types to
296
    ConfigData objects. If the container is a dict, we don't touch the
297
    keys, only the values.
298

299
    """
300
    if not isinstance(c_type, type):
301
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
302
                      " not a type" % type(c_type))
303
    if source is None:
304
      source = c_type()
305
    if c_type is dict:
306
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
307
    elif c_type in (list, tuple, set, frozenset):
308
      ret = c_type([e_type.FromDict(elem) for elem in source])
309
    else:
310
      raise TypeError("Invalid container type %s passed to"
311
                      " _ContainerFromDicts" % c_type)
312
    return ret
313

    
314
  def Copy(self):
315
    """Makes a deep copy of the current object and its children.
316

317
    """
318
    dict_form = self.ToDict()
319
    clone_obj = self.__class__.FromDict(dict_form)
320
    return clone_obj
321

    
322
  def __repr__(self):
323
    """Implement __repr__ for ConfigObjects."""
324
    return repr(self.ToDict())
325

    
326
  def UpgradeConfig(self):
327
    """Fill defaults for missing configuration values.
328

329
    This method will be called at configuration load time, and its
330
    implementation will be object dependent.
331

332
    """
333
    pass
334

    
335

    
336
class TaggableObject(ConfigObject):
337
  """An generic class supporting tags.
338

339
  """
340
  __slots__ = ["tags"]
341
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
342

    
343
  @classmethod
344
  def ValidateTag(cls, tag):
345
    """Check if a tag is valid.
346

347
    If the tag is invalid, an errors.TagError will be raised. The
348
    function has no return value.
349

350
    """
351
    if not isinstance(tag, basestring):
352
      raise errors.TagError("Invalid tag type (not a string)")
353
    if len(tag) > constants.MAX_TAG_LEN:
354
      raise errors.TagError("Tag too long (>%d characters)" %
355
                            constants.MAX_TAG_LEN)
356
    if not tag:
357
      raise errors.TagError("Tags cannot be empty")
358
    if not cls.VALID_TAG_RE.match(tag):
359
      raise errors.TagError("Tag contains invalid characters")
360

    
361
  def GetTags(self):
362
    """Return the tags list.
363

364
    """
365
    tags = getattr(self, "tags", None)
366
    if tags is None:
367
      tags = self.tags = set()
368
    return tags
369

    
370
  def AddTag(self, tag):
371
    """Add a new tag.
372

373
    """
374
    self.ValidateTag(tag)
375
    tags = self.GetTags()
376
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
377
      raise errors.TagError("Too many tags")
378
    self.GetTags().add(tag)
379

    
380
  def RemoveTag(self, tag):
381
    """Remove a tag.
382

383
    """
384
    self.ValidateTag(tag)
385
    tags = self.GetTags()
386
    try:
387
      tags.remove(tag)
388
    except KeyError:
389
      raise errors.TagError("Tag not found")
390

    
391
  def ToDict(self):
392
    """Taggable-object-specific conversion to standard python types.
393

394
    This replaces the tags set with a list.
395

396
    """
397
    bo = super(TaggableObject, self).ToDict()
398

    
399
    tags = bo.get("tags", None)
400
    if isinstance(tags, set):
401
      bo["tags"] = list(tags)
402
    return bo
403

    
404
  @classmethod
405
  def FromDict(cls, val):
406
    """Custom function for instances.
407

408
    """
409
    obj = super(TaggableObject, cls).FromDict(val)
410
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
411
      obj.tags = set(obj.tags)
412
    return obj
413

    
414

    
415
class MasterNetworkParameters(ConfigObject):
416
  """Network configuration parameters for the master
417

418
  @ivar name: master name
419
  @ivar ip: master IP
420
  @ivar netmask: master netmask
421
  @ivar netdev: master network device
422
  @ivar ip_family: master IP family
423

424
  """
425
  __slots__ = [
426
    "name",
427
    "ip",
428
    "netmask",
429
    "netdev",
430
    "ip_family"
431
    ]
432

    
433

    
434
class ConfigData(ConfigObject):
435
  """Top-level config object."""
436
  __slots__ = [
437
    "version",
438
    "cluster",
439
    "nodes",
440
    "nodegroups",
441
    "instances",
442
    "serial_no",
443
    ] + _TIMESTAMPS
444

    
445
  def ToDict(self):
446
    """Custom function for top-level config data.
447

448
    This just replaces the list of instances, nodes and the cluster
449
    with standard python types.
450

451
    """
452
    mydict = super(ConfigData, self).ToDict()
453
    mydict["cluster"] = mydict["cluster"].ToDict()
454
    for key in "nodes", "instances", "nodegroups":
455
      mydict[key] = self._ContainerToDicts(mydict[key])
456

    
457
    return mydict
458

    
459
  @classmethod
460
  def FromDict(cls, val):
461
    """Custom function for top-level config data
462

463
    """
464
    obj = super(ConfigData, cls).FromDict(val)
465
    obj.cluster = Cluster.FromDict(obj.cluster)
466
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
467
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
468
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
469
    return obj
470

    
471
  def HasAnyDiskOfType(self, dev_type):
472
    """Check if in there is at disk of the given type in the configuration.
473

474
    @type dev_type: L{constants.LDS_BLOCK}
475
    @param dev_type: the type to look for
476
    @rtype: boolean
477
    @return: boolean indicating if a disk of the given type was found or not
478

479
    """
480
    for instance in self.instances.values():
481
      for disk in instance.disks:
482
        if disk.IsBasedOnDiskType(dev_type):
483
          return True
484
    return False
485

    
486
  def UpgradeConfig(self):
487
    """Fill defaults for missing configuration values.
488

489
    """
490
    self.cluster.UpgradeConfig()
491
    for node in self.nodes.values():
492
      node.UpgradeConfig()
493
    for instance in self.instances.values():
494
      instance.UpgradeConfig()
495
    if self.nodegroups is None:
496
      self.nodegroups = {}
497
    for nodegroup in self.nodegroups.values():
498
      nodegroup.UpgradeConfig()
499
    if self.cluster.drbd_usermode_helper is None:
500
      # To decide if we set an helper let's check if at least one instance has
501
      # a DRBD disk. This does not cover all the possible scenarios but it
502
      # gives a good approximation.
503
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
504
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
505

    
506

    
507
class NIC(ConfigObject):
508
  """Config object representing a network card."""
509
  __slots__ = ["mac", "ip", "nicparams"]
510

    
511
  @classmethod
512
  def CheckParameterSyntax(cls, nicparams):
513
    """Check the given parameters for validity.
514

515
    @type nicparams:  dict
516
    @param nicparams: dictionary with parameter names/value
517
    @raise errors.ConfigurationError: when a parameter is not valid
518

519
    """
520
    if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
521
        nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
522
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
523
      raise errors.ConfigurationError(err)
524

    
525
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
526
        not nicparams[constants.NIC_LINK]):
527
      err = "Missing bridged nic link"
528
      raise errors.ConfigurationError(err)
529

    
530

    
531
class Disk(ConfigObject):
532
  """Config object representing a block device."""
533
  __slots__ = ["dev_type", "logical_id", "physical_id",
534
               "children", "iv_name", "size", "mode", "params"]
535

    
536
  def CreateOnSecondary(self):
537
    """Test if this device needs to be created on a secondary node."""
538
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
539

    
540
  def AssembleOnSecondary(self):
541
    """Test if this device needs to be assembled on a secondary node."""
542
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
543

    
544
  def OpenOnSecondary(self):
545
    """Test if this device needs to be opened on a secondary node."""
546
    return self.dev_type in (constants.LD_LV,)
547

    
548
  def StaticDevPath(self):
549
    """Return the device path if this device type has a static one.
550

551
    Some devices (LVM for example) live always at the same /dev/ path,
552
    irrespective of their status. For such devices, we return this
553
    path, for others we return None.
554

555
    @warning: The path returned is not a normalized pathname; callers
556
        should check that it is a valid path.
557

558
    """
559
    if self.dev_type == constants.LD_LV:
560
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
561
    elif self.dev_type == constants.LD_BLOCKDEV:
562
      return self.logical_id[1]
563
    elif self.dev_type == constants.LD_RBD:
564
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
565
    return None
566

    
567
  def ChildrenNeeded(self):
568
    """Compute the needed number of children for activation.
569

570
    This method will return either -1 (all children) or a positive
571
    number denoting the minimum number of children needed for
572
    activation (only mirrored devices will usually return >=0).
573

574
    Currently, only DRBD8 supports diskless activation (therefore we
575
    return 0), for all other we keep the previous semantics and return
576
    -1.
577

578
    """
579
    if self.dev_type == constants.LD_DRBD8:
580
      return 0
581
    return -1
582

    
583
  def IsBasedOnDiskType(self, dev_type):
584
    """Check if the disk or its children are based on the given type.
585

586
    @type dev_type: L{constants.LDS_BLOCK}
587
    @param dev_type: the type to look for
588
    @rtype: boolean
589
    @return: boolean indicating if a device of the given type was found or not
590

591
    """
592
    if self.children:
593
      for child in self.children:
594
        if child.IsBasedOnDiskType(dev_type):
595
          return True
596
    return self.dev_type == dev_type
597

    
598
  def GetNodes(self, node):
599
    """This function returns the nodes this device lives on.
600

601
    Given the node on which the parent of the device lives on (or, in
602
    case of a top-level device, the primary node of the devices'
603
    instance), this function will return a list of nodes on which this
604
    devices needs to (or can) be assembled.
605

606
    """
607
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
608
                         constants.LD_BLOCKDEV, constants.LD_RBD]:
609
      result = [node]
610
    elif self.dev_type in constants.LDS_DRBD:
611
      result = [self.logical_id[0], self.logical_id[1]]
612
      if node not in result:
613
        raise errors.ConfigurationError("DRBD device passed unknown node")
614
    else:
615
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
616
    return result
617

    
618
  def ComputeNodeTree(self, parent_node):
619
    """Compute the node/disk tree for this disk and its children.
620

621
    This method, given the node on which the parent disk lives, will
622
    return the list of all (node, disk) pairs which describe the disk
623
    tree in the most compact way. For example, a drbd/lvm stack
624
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
625
    which represents all the top-level devices on the nodes.
626

627
    """
628
    my_nodes = self.GetNodes(parent_node)
629
    result = [(node, self) for node in my_nodes]
630
    if not self.children:
631
      # leaf device
632
      return result
633
    for node in my_nodes:
634
      for child in self.children:
635
        child_result = child.ComputeNodeTree(node)
636
        if len(child_result) == 1:
637
          # child (and all its descendants) is simple, doesn't split
638
          # over multiple hosts, so we don't need to describe it, our
639
          # own entry for this node describes it completely
640
          continue
641
        else:
642
          # check if child nodes differ from my nodes; note that
643
          # subdisk can differ from the child itself, and be instead
644
          # one of its descendants
645
          for subnode, subdisk in child_result:
646
            if subnode not in my_nodes:
647
              result.append((subnode, subdisk))
648
            # otherwise child is under our own node, so we ignore this
649
            # entry (but probably the other results in the list will
650
            # be different)
651
    return result
652

    
653
  def ComputeGrowth(self, amount):
654
    """Compute the per-VG growth requirements.
655

656
    This only works for VG-based disks.
657

658
    @type amount: integer
659
    @param amount: the desired increase in (user-visible) disk space
660
    @rtype: dict
661
    @return: a dictionary of volume-groups and the required size
662

663
    """
664
    if self.dev_type == constants.LD_LV:
665
      return {self.logical_id[0]: amount}
666
    elif self.dev_type == constants.LD_DRBD8:
667
      if self.children:
668
        return self.children[0].ComputeGrowth(amount)
669
      else:
670
        return {}
671
    else:
672
      # Other disk types do not require VG space
673
      return {}
674

    
675
  def RecordGrow(self, amount):
676
    """Update the size of this disk after growth.
677

678
    This method recurses over the disks's children and updates their
679
    size correspondigly. The method needs to be kept in sync with the
680
    actual algorithms from bdev.
681

682
    """
683
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
684
                         constants.LD_RBD):
685
      self.size += amount
686
    elif self.dev_type == constants.LD_DRBD8:
687
      if self.children:
688
        self.children[0].RecordGrow(amount)
689
      self.size += amount
690
    else:
691
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
692
                                   " disk type %s" % self.dev_type)
693

    
694
  def Update(self, size=None, mode=None):
695
    """Apply changes to size and mode.
696

697
    """
698
    if self.dev_type == constants.LD_DRBD8:
699
      if self.children:
700
        self.children[0].Update(size=size, mode=mode)
701
    else:
702
      assert not self.children
703

    
704
    if size is not None:
705
      self.size = size
706
    if mode is not None:
707
      self.mode = mode
708

    
709
  def UnsetSize(self):
710
    """Sets recursively the size to zero for the disk and its children.
711

712
    """
713
    if self.children:
714
      for child in self.children:
715
        child.UnsetSize()
716
    self.size = 0
717

    
718
  def SetPhysicalID(self, target_node, nodes_ip):
719
    """Convert the logical ID to the physical ID.
720

721
    This is used only for drbd, which needs ip/port configuration.
722

723
    The routine descends down and updates its children also, because
724
    this helps when the only the top device is passed to the remote
725
    node.
726

727
    Arguments:
728
      - target_node: the node we wish to configure for
729
      - nodes_ip: a mapping of node name to ip
730

731
    The target_node must exist in in nodes_ip, and must be one of the
732
    nodes in the logical ID for each of the DRBD devices encountered
733
    in the disk tree.
734

735
    """
736
    if self.children:
737
      for child in self.children:
738
        child.SetPhysicalID(target_node, nodes_ip)
739

    
740
    if self.logical_id is None and self.physical_id is not None:
741
      return
742
    if self.dev_type in constants.LDS_DRBD:
743
      pnode, snode, port, pminor, sminor, secret = self.logical_id
744
      if target_node not in (pnode, snode):
745
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
746
                                        target_node)
747
      pnode_ip = nodes_ip.get(pnode, None)
748
      snode_ip = nodes_ip.get(snode, None)
749
      if pnode_ip is None or snode_ip is None:
750
        raise errors.ConfigurationError("Can't find primary or secondary node"
751
                                        " for %s" % str(self))
752
      p_data = (pnode_ip, port)
753
      s_data = (snode_ip, port)
754
      if pnode == target_node:
755
        self.physical_id = p_data + s_data + (pminor, secret)
756
      else: # it must be secondary, we tested above
757
        self.physical_id = s_data + p_data + (sminor, secret)
758
    else:
759
      self.physical_id = self.logical_id
760
    return
761

    
762
  def ToDict(self):
763
    """Disk-specific conversion to standard python types.
764

765
    This replaces the children lists of objects with lists of
766
    standard python types.
767

768
    """
769
    bo = super(Disk, self).ToDict()
770

    
771
    for attr in ("children",):
772
      alist = bo.get(attr, None)
773
      if alist:
774
        bo[attr] = self._ContainerToDicts(alist)
775
    return bo
776

    
777
  @classmethod
778
  def FromDict(cls, val):
779
    """Custom function for Disks
780

781
    """
782
    obj = super(Disk, cls).FromDict(val)
783
    if obj.children:
784
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
785
    if obj.logical_id and isinstance(obj.logical_id, list):
786
      obj.logical_id = tuple(obj.logical_id)
787
    if obj.physical_id and isinstance(obj.physical_id, list):
788
      obj.physical_id = tuple(obj.physical_id)
789
    if obj.dev_type in constants.LDS_DRBD:
790
      # we need a tuple of length six here
791
      if len(obj.logical_id) < 6:
792
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
793
    return obj
794

    
795
  def __str__(self):
796
    """Custom str() formatter for disks.
797

798
    """
799
    if self.dev_type == constants.LD_LV:
800
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
801
    elif self.dev_type in constants.LDS_DRBD:
802
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
803
      val = "<DRBD8("
804
      if self.physical_id is None:
805
        phy = "unconfigured"
806
      else:
807
        phy = ("configured as %s:%s %s:%s" %
808
               (self.physical_id[0], self.physical_id[1],
809
                self.physical_id[2], self.physical_id[3]))
810

    
811
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
812
              (node_a, minor_a, node_b, minor_b, port, phy))
813
      if self.children and self.children.count(None) == 0:
814
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
815
      else:
816
        val += "no local storage"
817
    else:
818
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
819
             (self.dev_type, self.logical_id, self.physical_id, self.children))
820
    if self.iv_name is None:
821
      val += ", not visible"
822
    else:
823
      val += ", visible as /dev/%s" % self.iv_name
824
    if isinstance(self.size, int):
825
      val += ", size=%dm)>" % self.size
826
    else:
827
      val += ", size='%s')>" % (self.size,)
828
    return val
829

    
830
  def Verify(self):
831
    """Checks that this disk is correctly configured.
832

833
    """
834
    all_errors = []
835
    if self.mode not in constants.DISK_ACCESS_SET:
836
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
837
    return all_errors
838

    
839
  def UpgradeConfig(self):
840
    """Fill defaults for missing configuration values.
841

842
    """
843
    if self.children:
844
      for child in self.children:
845
        child.UpgradeConfig()
846

    
847
    # FIXME: Make this configurable in Ganeti 2.7
848
    self.params = {}
849
    # add here config upgrade for this disk
850

    
851
  @staticmethod
852
  def ComputeLDParams(disk_template, disk_params):
853
    """Computes Logical Disk parameters from Disk Template parameters.
854

855
    @type disk_template: string
856
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
857
    @type disk_params: dict
858
    @param disk_params: disk template parameters;
859
                        dict(template_name -> parameters
860
    @rtype: list(dict)
861
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
862
      contains the LD parameters of the node. The tree is flattened in-order.
863

864
    """
865
    if disk_template not in constants.DISK_TEMPLATES:
866
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
867

    
868
    assert disk_template in disk_params
869

    
870
    result = list()
871
    dt_params = disk_params[disk_template]
872
    if disk_template == constants.DT_DRBD8:
873
      drbd_params = {
874
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
875
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
876
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
877
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
878
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
879
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
880
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
881
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
882
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
883
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
884
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
885
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
886
        }
887

    
888
      drbd_params = \
889
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8],
890
                 drbd_params)
891

    
892
      result.append(drbd_params)
893

    
894
      # data LV
895
      data_params = {
896
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
897
        }
898
      data_params = \
899
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
900
                 data_params)
901
      result.append(data_params)
902

    
903
      # metadata LV
904
      meta_params = {
905
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
906
        }
907
      meta_params = \
908
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
909
                 meta_params)
910
      result.append(meta_params)
911

    
912
    elif (disk_template == constants.DT_FILE or
913
          disk_template == constants.DT_SHARED_FILE):
914
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
915

    
916
    elif disk_template == constants.DT_PLAIN:
917
      params = {
918
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
919
        }
920
      params = \
921
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
922
                 params)
923
      result.append(params)
924

    
925
    elif disk_template == constants.DT_BLOCK:
926
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
927

    
928
    elif disk_template == constants.DT_RBD:
929
      params = {
930
        constants.LDP_POOL: dt_params[constants.RBD_POOL]
931
        }
932
      params = \
933
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD],
934
                 params)
935
      result.append(params)
936

    
937
    return result
938

    
939

    
940
class InstancePolicy(ConfigObject):
941
  """Config object representing instance policy limits dictionary.
942

943

944
  Note that this object is not actually used in the config, it's just
945
  used as a placeholder for a few functions.
946

947
  """
948
  @classmethod
949
  def CheckParameterSyntax(cls, ipolicy):
950
    """ Check the instance policy for validity.
951

952
    """
953
    for param in constants.ISPECS_PARAMETERS:
954
      InstancePolicy.CheckISpecSyntax(ipolicy, param)
955
    if constants.IPOLICY_DTS in ipolicy:
956
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
957
    for key in constants.IPOLICY_PARAMETERS:
958
      if key in ipolicy:
959
        InstancePolicy.CheckParameter(key, ipolicy[key])
960
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
961
    if wrong_keys:
962
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
963
                                      utils.CommaJoin(wrong_keys))
964

    
965
  @classmethod
966
  def CheckISpecSyntax(cls, ipolicy, name):
967
    """Check the instance policy for validity on a given key.
968

969
    We check if the instance policy makes sense for a given key, that is
970
    if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
971

972
    @type ipolicy: dict
973
    @param ipolicy: dictionary with min, max, std specs
974
    @type name: string
975
    @param name: what are the limits for
976
    @raise errors.ConfigureError: when specs for given name are not valid
977

978
    """
979
    min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
980
    std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
981
    max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
982
    err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
983
           (name,
984
            ipolicy[constants.ISPECS_MIN].get(name, "-"),
985
            ipolicy[constants.ISPECS_MAX].get(name, "-"),
986
            ipolicy[constants.ISPECS_STD].get(name, "-")))
987
    if min_v > std_v or std_v > max_v:
988
      raise errors.ConfigurationError(err)
989

    
990
  @classmethod
991
  def CheckDiskTemplates(cls, disk_templates):
992
    """Checks the disk templates for validity.
993

994
    """
995
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
996
    if wrong:
997
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
998
                                      utils.CommaJoin(wrong))
999

    
1000
  @classmethod
1001
  def CheckParameter(cls, key, value):
1002
    """Checks a parameter.
1003

1004
    Currently we expect all parameters to be float values.
1005

1006
    """
1007
    try:
1008
      float(value)
1009
    except (TypeError, ValueError), err:
1010
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1011
                                      " '%s', error: %s" % (key, value, err))
1012

    
1013

    
1014
class Instance(TaggableObject):
1015
  """Config object representing an instance."""
1016
  __slots__ = [
1017
    "name",
1018
    "primary_node",
1019
    "os",
1020
    "hypervisor",
1021
    "hvparams",
1022
    "beparams",
1023
    "osparams",
1024
    "admin_state",
1025
    "nics",
1026
    "disks",
1027
    "disk_template",
1028
    "network_port",
1029
    "serial_no",
1030
    ] + _TIMESTAMPS + _UUID
1031

    
1032
  def _ComputeSecondaryNodes(self):
1033
    """Compute the list of secondary nodes.
1034

1035
    This is a simple wrapper over _ComputeAllNodes.
1036

1037
    """
1038
    all_nodes = set(self._ComputeAllNodes())
1039
    all_nodes.discard(self.primary_node)
1040
    return tuple(all_nodes)
1041

    
1042
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1043
                             "List of secondary nodes")
1044

    
1045
  def _ComputeAllNodes(self):
1046
    """Compute the list of all nodes.
1047

1048
    Since the data is already there (in the drbd disks), keeping it as
1049
    a separate normal attribute is redundant and if not properly
1050
    synchronised can cause problems. Thus it's better to compute it
1051
    dynamically.
1052

1053
    """
1054
    def _Helper(nodes, device):
1055
      """Recursively computes nodes given a top device."""
1056
      if device.dev_type in constants.LDS_DRBD:
1057
        nodea, nodeb = device.logical_id[:2]
1058
        nodes.add(nodea)
1059
        nodes.add(nodeb)
1060
      if device.children:
1061
        for child in device.children:
1062
          _Helper(nodes, child)
1063

    
1064
    all_nodes = set()
1065
    all_nodes.add(self.primary_node)
1066
    for device in self.disks:
1067
      _Helper(all_nodes, device)
1068
    return tuple(all_nodes)
1069

    
1070
  all_nodes = property(_ComputeAllNodes, None, None,
1071
                       "List of all nodes of the instance")
1072

    
1073
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1074
    """Provide a mapping of nodes to LVs this instance owns.
1075

1076
    This function figures out what logical volumes should belong on
1077
    which nodes, recursing through a device tree.
1078

1079
    @param lvmap: optional dictionary to receive the
1080
        'node' : ['lv', ...] data.
1081

1082
    @return: None if lvmap arg is given, otherwise, a dictionary of
1083
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1084
        volumeN is of the form "vg_name/lv_name", compatible with
1085
        GetVolumeList()
1086

1087
    """
1088
    if node == None:
1089
      node = self.primary_node
1090

    
1091
    if lvmap is None:
1092
      lvmap = {
1093
        node: [],
1094
        }
1095
      ret = lvmap
1096
    else:
1097
      if not node in lvmap:
1098
        lvmap[node] = []
1099
      ret = None
1100

    
1101
    if not devs:
1102
      devs = self.disks
1103

    
1104
    for dev in devs:
1105
      if dev.dev_type == constants.LD_LV:
1106
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1107

    
1108
      elif dev.dev_type in constants.LDS_DRBD:
1109
        if dev.children:
1110
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1111
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1112

    
1113
      elif dev.children:
1114
        self.MapLVsByNode(lvmap, dev.children, node)
1115

    
1116
    return ret
1117

    
1118
  def FindDisk(self, idx):
1119
    """Find a disk given having a specified index.
1120

1121
    This is just a wrapper that does validation of the index.
1122

1123
    @type idx: int
1124
    @param idx: the disk index
1125
    @rtype: L{Disk}
1126
    @return: the corresponding disk
1127
    @raise errors.OpPrereqError: when the given index is not valid
1128

1129
    """
1130
    try:
1131
      idx = int(idx)
1132
      return self.disks[idx]
1133
    except (TypeError, ValueError), err:
1134
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1135
                                 errors.ECODE_INVAL)
1136
    except IndexError:
1137
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1138
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1139
                                 errors.ECODE_INVAL)
1140

    
1141
  def ToDict(self):
1142
    """Instance-specific conversion to standard python types.
1143

1144
    This replaces the children lists of objects with lists of standard
1145
    python types.
1146

1147
    """
1148
    bo = super(Instance, self).ToDict()
1149

    
1150
    for attr in "nics", "disks":
1151
      alist = bo.get(attr, None)
1152
      if alist:
1153
        nlist = self._ContainerToDicts(alist)
1154
      else:
1155
        nlist = []
1156
      bo[attr] = nlist
1157
    return bo
1158

    
1159
  @classmethod
1160
  def FromDict(cls, val):
1161
    """Custom function for instances.
1162

1163
    """
1164
    if "admin_state" not in val:
1165
      if val.get("admin_up", False):
1166
        val["admin_state"] = constants.ADMINST_UP
1167
      else:
1168
        val["admin_state"] = constants.ADMINST_DOWN
1169
    if "admin_up" in val:
1170
      del val["admin_up"]
1171
    obj = super(Instance, cls).FromDict(val)
1172
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
1173
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
1174
    return obj
1175

    
1176
  def UpgradeConfig(self):
1177
    """Fill defaults for missing configuration values.
1178

1179
    """
1180
    for nic in self.nics:
1181
      nic.UpgradeConfig()
1182
    for disk in self.disks:
1183
      disk.UpgradeConfig()
1184
    if self.hvparams:
1185
      for key in constants.HVC_GLOBALS:
1186
        try:
1187
          del self.hvparams[key]
1188
        except KeyError:
1189
          pass
1190
    if self.osparams is None:
1191
      self.osparams = {}
1192
    UpgradeBeParams(self.beparams)
1193

    
1194

    
1195
class OS(ConfigObject):
1196
  """Config object representing an operating system.
1197

1198
  @type supported_parameters: list
1199
  @ivar supported_parameters: a list of tuples, name and description,
1200
      containing the supported parameters by this OS
1201

1202
  @type VARIANT_DELIM: string
1203
  @cvar VARIANT_DELIM: the variant delimiter
1204

1205
  """
1206
  __slots__ = [
1207
    "name",
1208
    "path",
1209
    "api_versions",
1210
    "create_script",
1211
    "export_script",
1212
    "import_script",
1213
    "rename_script",
1214
    "verify_script",
1215
    "supported_variants",
1216
    "supported_parameters",
1217
    ]
1218

    
1219
  VARIANT_DELIM = "+"
1220

    
1221
  @classmethod
1222
  def SplitNameVariant(cls, name):
1223
    """Splits the name into the proper name and variant.
1224

1225
    @param name: the OS (unprocessed) name
1226
    @rtype: list
1227
    @return: a list of two elements; if the original name didn't
1228
        contain a variant, it's returned as an empty string
1229

1230
    """
1231
    nv = name.split(cls.VARIANT_DELIM, 1)
1232
    if len(nv) == 1:
1233
      nv.append("")
1234
    return nv
1235

    
1236
  @classmethod
1237
  def GetName(cls, name):
1238
    """Returns the proper name of the os (without the variant).
1239

1240
    @param name: the OS (unprocessed) name
1241

1242
    """
1243
    return cls.SplitNameVariant(name)[0]
1244

    
1245
  @classmethod
1246
  def GetVariant(cls, name):
1247
    """Returns the variant the os (without the base name).
1248

1249
    @param name: the OS (unprocessed) name
1250

1251
    """
1252
    return cls.SplitNameVariant(name)[1]
1253

    
1254

    
1255
class NodeHvState(ConfigObject):
1256
  """Hypvervisor state on a node.
1257

1258
  @ivar mem_total: Total amount of memory
1259
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1260
    available)
1261
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1262
    rounding
1263
  @ivar mem_inst: Memory used by instances living on node
1264
  @ivar cpu_total: Total node CPU core count
1265
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1266

1267
  """
1268
  __slots__ = [
1269
    "mem_total",
1270
    "mem_node",
1271
    "mem_hv",
1272
    "mem_inst",
1273
    "cpu_total",
1274
    "cpu_node",
1275
    ] + _TIMESTAMPS
1276

    
1277

    
1278
class NodeDiskState(ConfigObject):
1279
  """Disk state on a node.
1280

1281
  """
1282
  __slots__ = [
1283
    "total",
1284
    "reserved",
1285
    "overhead",
1286
    ] + _TIMESTAMPS
1287

    
1288

    
1289
class Node(TaggableObject):
1290
  """Config object representing a node.
1291

1292
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1293
  @ivar hv_state_static: Hypervisor state overriden by user
1294
  @ivar disk_state: Disk state (e.g. free space)
1295
  @ivar disk_state_static: Disk state overriden by user
1296

1297
  """
1298
  __slots__ = [
1299
    "name",
1300
    "primary_ip",
1301
    "secondary_ip",
1302
    "serial_no",
1303
    "master_candidate",
1304
    "offline",
1305
    "drained",
1306
    "group",
1307
    "master_capable",
1308
    "vm_capable",
1309
    "ndparams",
1310
    "powered",
1311
    "hv_state",
1312
    "hv_state_static",
1313
    "disk_state",
1314
    "disk_state_static",
1315
    ] + _TIMESTAMPS + _UUID
1316

    
1317
  def UpgradeConfig(self):
1318
    """Fill defaults for missing configuration values.
1319

1320
    """
1321
    # pylint: disable=E0203
1322
    # because these are "defined" via slots, not manually
1323
    if self.master_capable is None:
1324
      self.master_capable = True
1325

    
1326
    if self.vm_capable is None:
1327
      self.vm_capable = True
1328

    
1329
    if self.ndparams is None:
1330
      self.ndparams = {}
1331

    
1332
    if self.powered is None:
1333
      self.powered = True
1334

    
1335
  def ToDict(self):
1336
    """Custom function for serializing.
1337

1338
    """
1339
    data = super(Node, self).ToDict()
1340

    
1341
    hv_state = data.get("hv_state", None)
1342
    if hv_state is not None:
1343
      data["hv_state"] = self._ContainerToDicts(hv_state)
1344

    
1345
    disk_state = data.get("disk_state", None)
1346
    if disk_state is not None:
1347
      data["disk_state"] = \
1348
        dict((key, self._ContainerToDicts(value))
1349
             for (key, value) in disk_state.items())
1350

    
1351
    return data
1352

    
1353
  @classmethod
1354
  def FromDict(cls, val):
1355
    """Custom function for deserializing.
1356

1357
    """
1358
    obj = super(Node, cls).FromDict(val)
1359

    
1360
    if obj.hv_state is not None:
1361
      obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1362

    
1363
    if obj.disk_state is not None:
1364
      obj.disk_state = \
1365
        dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
1366
             for (key, value) in obj.disk_state.items())
1367

    
1368
    return obj
1369

    
1370

    
1371
class NodeGroup(TaggableObject):
1372
  """Config object representing a node group."""
1373
  __slots__ = [
1374
    "name",
1375
    "members",
1376
    "ndparams",
1377
    "diskparams",
1378
    "ipolicy",
1379
    "serial_no",
1380
    "hv_state_static",
1381
    "disk_state_static",
1382
    "alloc_policy",
1383
    ] + _TIMESTAMPS + _UUID
1384

    
1385
  def ToDict(self):
1386
    """Custom function for nodegroup.
1387

1388
    This discards the members object, which gets recalculated and is only kept
1389
    in memory.
1390

1391
    """
1392
    mydict = super(NodeGroup, self).ToDict()
1393
    del mydict["members"]
1394
    return mydict
1395

    
1396
  @classmethod
1397
  def FromDict(cls, val):
1398
    """Custom function for nodegroup.
1399

1400
    The members slot is initialized to an empty list, upon deserialization.
1401

1402
    """
1403
    obj = super(NodeGroup, cls).FromDict(val)
1404
    obj.members = []
1405
    return obj
1406

    
1407
  def UpgradeConfig(self):
1408
    """Fill defaults for missing configuration values.
1409

1410
    """
1411
    if self.ndparams is None:
1412
      self.ndparams = {}
1413

    
1414
    if self.serial_no is None:
1415
      self.serial_no = 1
1416

    
1417
    if self.alloc_policy is None:
1418
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1419

    
1420
    # We only update mtime, and not ctime, since we would not be able
1421
    # to provide a correct value for creation time.
1422
    if self.mtime is None:
1423
      self.mtime = time.time()
1424

    
1425
    if self.diskparams is None:
1426
      self.diskparams = {}
1427
    if self.ipolicy is None:
1428
      self.ipolicy = MakeEmptyIPolicy()
1429

    
1430
  def FillND(self, node):
1431
    """Return filled out ndparams for L{objects.Node}
1432

1433
    @type node: L{objects.Node}
1434
    @param node: A Node object to fill
1435
    @return a copy of the node's ndparams with defaults filled
1436

1437
    """
1438
    return self.SimpleFillND(node.ndparams)
1439

    
1440
  def SimpleFillND(self, ndparams):
1441
    """Fill a given ndparams dict with defaults.
1442

1443
    @type ndparams: dict
1444
    @param ndparams: the dict to fill
1445
    @rtype: dict
1446
    @return: a copy of the passed in ndparams with missing keys filled
1447
        from the node group defaults
1448

1449
    """
1450
    return FillDict(self.ndparams, ndparams)
1451

    
1452

    
1453
class Cluster(TaggableObject):
1454
  """Config object representing the cluster."""
1455
  __slots__ = [
1456
    "serial_no",
1457
    "rsahostkeypub",
1458
    "highest_used_port",
1459
    "tcpudp_port_pool",
1460
    "mac_prefix",
1461
    "volume_group_name",
1462
    "reserved_lvs",
1463
    "drbd_usermode_helper",
1464
    "default_bridge",
1465
    "default_hypervisor",
1466
    "master_node",
1467
    "master_ip",
1468
    "master_netdev",
1469
    "master_netmask",
1470
    "use_external_mip_script",
1471
    "cluster_name",
1472
    "file_storage_dir",
1473
    "shared_file_storage_dir",
1474
    "enabled_hypervisors",
1475
    "hvparams",
1476
    "ipolicy",
1477
    "os_hvp",
1478
    "beparams",
1479
    "osparams",
1480
    "nicparams",
1481
    "ndparams",
1482
    "diskparams",
1483
    "candidate_pool_size",
1484
    "modify_etc_hosts",
1485
    "modify_ssh_setup",
1486
    "maintain_node_health",
1487
    "uid_pool",
1488
    "default_iallocator",
1489
    "hidden_os",
1490
    "blacklisted_os",
1491
    "primary_ip_family",
1492
    "prealloc_wipe_disks",
1493
    "hv_state_static",
1494
    "disk_state_static",
1495
    ] + _TIMESTAMPS + _UUID
1496

    
1497
  def UpgradeConfig(self):
1498
    """Fill defaults for missing configuration values.
1499

1500
    """
1501
    # pylint: disable=E0203
1502
    # because these are "defined" via slots, not manually
1503
    if self.hvparams is None:
1504
      self.hvparams = constants.HVC_DEFAULTS
1505
    else:
1506
      for hypervisor in self.hvparams:
1507
        self.hvparams[hypervisor] = FillDict(
1508
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1509

    
1510
    if self.os_hvp is None:
1511
      self.os_hvp = {}
1512

    
1513
    # osparams added before 2.2
1514
    if self.osparams is None:
1515
      self.osparams = {}
1516

    
1517
    self.ndparams = UpgradeNDParams(self.ndparams)
1518

    
1519
    self.beparams = UpgradeGroupedParams(self.beparams,
1520
                                         constants.BEC_DEFAULTS)
1521
    for beparams_group in self.beparams:
1522
      UpgradeBeParams(self.beparams[beparams_group])
1523

    
1524
    migrate_default_bridge = not self.nicparams
1525
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1526
                                          constants.NICC_DEFAULTS)
1527
    if migrate_default_bridge:
1528
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1529
        self.default_bridge
1530

    
1531
    if self.modify_etc_hosts is None:
1532
      self.modify_etc_hosts = True
1533

    
1534
    if self.modify_ssh_setup is None:
1535
      self.modify_ssh_setup = True
1536

    
1537
    # default_bridge is no longer used in 2.1. The slot is left there to
1538
    # support auto-upgrading. It can be removed once we decide to deprecate
1539
    # upgrading straight from 2.0.
1540
    if self.default_bridge is not None:
1541
      self.default_bridge = None
1542

    
1543
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1544
    # code can be removed once upgrading straight from 2.0 is deprecated.
1545
    if self.default_hypervisor is not None:
1546
      self.enabled_hypervisors = ([self.default_hypervisor] +
1547
        [hvname for hvname in self.enabled_hypervisors
1548
         if hvname != self.default_hypervisor])
1549
      self.default_hypervisor = None
1550

    
1551
    # maintain_node_health added after 2.1.1
1552
    if self.maintain_node_health is None:
1553
      self.maintain_node_health = False
1554

    
1555
    if self.uid_pool is None:
1556
      self.uid_pool = []
1557

    
1558
    if self.default_iallocator is None:
1559
      self.default_iallocator = ""
1560

    
1561
    # reserved_lvs added before 2.2
1562
    if self.reserved_lvs is None:
1563
      self.reserved_lvs = []
1564

    
1565
    # hidden and blacklisted operating systems added before 2.2.1
1566
    if self.hidden_os is None:
1567
      self.hidden_os = []
1568

    
1569
    if self.blacklisted_os is None:
1570
      self.blacklisted_os = []
1571

    
1572
    # primary_ip_family added before 2.3
1573
    if self.primary_ip_family is None:
1574
      self.primary_ip_family = AF_INET
1575

    
1576
    if self.master_netmask is None:
1577
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1578
      self.master_netmask = ipcls.iplen
1579

    
1580
    if self.prealloc_wipe_disks is None:
1581
      self.prealloc_wipe_disks = False
1582

    
1583
    # shared_file_storage_dir added before 2.5
1584
    if self.shared_file_storage_dir is None:
1585
      self.shared_file_storage_dir = ""
1586

    
1587
    if self.use_external_mip_script is None:
1588
      self.use_external_mip_script = False
1589

    
1590
    if self.diskparams:
1591
      self.diskparams = UpgradeDiskParams(self.diskparams)
1592
    else:
1593
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1594

    
1595
    # instance policy added before 2.6
1596
    if self.ipolicy is None:
1597
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1598
    else:
1599
      # we can either make sure to upgrade the ipolicy always, or only
1600
      # do it in some corner cases (e.g. missing keys); note that this
1601
      # will break any removal of keys from the ipolicy dict
1602
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1603

    
1604
  @property
1605
  def primary_hypervisor(self):
1606
    """The first hypervisor is the primary.
1607

1608
    Useful, for example, for L{Node}'s hv/disk state.
1609

1610
    """
1611
    return self.enabled_hypervisors[0]
1612

    
1613
  def ToDict(self):
1614
    """Custom function for cluster.
1615

1616
    """
1617
    mydict = super(Cluster, self).ToDict()
1618
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1619
    return mydict
1620

    
1621
  @classmethod
1622
  def FromDict(cls, val):
1623
    """Custom function for cluster.
1624

1625
    """
1626
    obj = super(Cluster, cls).FromDict(val)
1627
    if not isinstance(obj.tcpudp_port_pool, set):
1628
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1629
    return obj
1630

    
1631
  def SimpleFillDP(self, diskparams):
1632
    """Fill a given diskparams dict with cluster defaults.
1633

1634
    @param diskparams: The diskparams
1635
    @return: The defaults dict
1636

1637
    """
1638
    return FillDiskParams(self.diskparams, diskparams)
1639

    
1640
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1641
    """Get the default hypervisor parameters for the cluster.
1642

1643
    @param hypervisor: the hypervisor name
1644
    @param os_name: if specified, we'll also update the defaults for this OS
1645
    @param skip_keys: if passed, list of keys not to use
1646
    @return: the defaults dict
1647

1648
    """
1649
    if skip_keys is None:
1650
      skip_keys = []
1651

    
1652
    fill_stack = [self.hvparams.get(hypervisor, {})]
1653
    if os_name is not None:
1654
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1655
      fill_stack.append(os_hvp)
1656

    
1657
    ret_dict = {}
1658
    for o_dict in fill_stack:
1659
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1660

    
1661
    return ret_dict
1662

    
1663
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1664
    """Fill a given hvparams dict with cluster defaults.
1665

1666
    @type hv_name: string
1667
    @param hv_name: the hypervisor to use
1668
    @type os_name: string
1669
    @param os_name: the OS to use for overriding the hypervisor defaults
1670
    @type skip_globals: boolean
1671
    @param skip_globals: if True, the global hypervisor parameters will
1672
        not be filled
1673
    @rtype: dict
1674
    @return: a copy of the given hvparams with missing keys filled from
1675
        the cluster defaults
1676

1677
    """
1678
    if skip_globals:
1679
      skip_keys = constants.HVC_GLOBALS
1680
    else:
1681
      skip_keys = []
1682

    
1683
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1684
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1685

    
1686
  def FillHV(self, instance, skip_globals=False):
1687
    """Fill an instance's hvparams dict with cluster defaults.
1688

1689
    @type instance: L{objects.Instance}
1690
    @param instance: the instance parameter to fill
1691
    @type skip_globals: boolean
1692
    @param skip_globals: if True, the global hypervisor parameters will
1693
        not be filled
1694
    @rtype: dict
1695
    @return: a copy of the instance's hvparams with missing keys filled from
1696
        the cluster defaults
1697

1698
    """
1699
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1700
                             instance.hvparams, skip_globals)
1701

    
1702
  def SimpleFillBE(self, beparams):
1703
    """Fill a given beparams dict with cluster defaults.
1704

1705
    @type beparams: dict
1706
    @param beparams: the dict to fill
1707
    @rtype: dict
1708
    @return: a copy of the passed in beparams with missing keys filled
1709
        from the cluster defaults
1710

1711
    """
1712
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1713

    
1714
  def FillBE(self, instance):
1715
    """Fill an instance's beparams dict with cluster defaults.
1716

1717
    @type instance: L{objects.Instance}
1718
    @param instance: the instance parameter to fill
1719
    @rtype: dict
1720
    @return: a copy of the instance's beparams with missing keys filled from
1721
        the cluster defaults
1722

1723
    """
1724
    return self.SimpleFillBE(instance.beparams)
1725

    
1726
  def SimpleFillNIC(self, nicparams):
1727
    """Fill a given nicparams dict with cluster defaults.
1728

1729
    @type nicparams: dict
1730
    @param nicparams: the dict to fill
1731
    @rtype: dict
1732
    @return: a copy of the passed in nicparams with missing keys filled
1733
        from the cluster defaults
1734

1735
    """
1736
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1737

    
1738
  def SimpleFillOS(self, os_name, os_params):
1739
    """Fill an instance's osparams dict with cluster defaults.
1740

1741
    @type os_name: string
1742
    @param os_name: the OS name to use
1743
    @type os_params: dict
1744
    @param os_params: the dict to fill with default values
1745
    @rtype: dict
1746
    @return: a copy of the instance's osparams with missing keys filled from
1747
        the cluster defaults
1748

1749
    """
1750
    name_only = os_name.split("+", 1)[0]
1751
    # base OS
1752
    result = self.osparams.get(name_only, {})
1753
    # OS with variant
1754
    result = FillDict(result, self.osparams.get(os_name, {}))
1755
    # specified params
1756
    return FillDict(result, os_params)
1757

    
1758
  @staticmethod
1759
  def SimpleFillHvState(hv_state):
1760
    """Fill an hv_state sub dict with cluster defaults.
1761

1762
    """
1763
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1764

    
1765
  @staticmethod
1766
  def SimpleFillDiskState(disk_state):
1767
    """Fill an disk_state sub dict with cluster defaults.
1768

1769
    """
1770
    return FillDict(constants.DS_DEFAULTS, disk_state)
1771

    
1772
  def FillND(self, node, nodegroup):
1773
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1774

1775
    @type node: L{objects.Node}
1776
    @param node: A Node object to fill
1777
    @type nodegroup: L{objects.NodeGroup}
1778
    @param nodegroup: A Node object to fill
1779
    @return a copy of the node's ndparams with defaults filled
1780

1781
    """
1782
    return self.SimpleFillND(nodegroup.FillND(node))
1783

    
1784
  def SimpleFillND(self, ndparams):
1785
    """Fill a given ndparams dict with defaults.
1786

1787
    @type ndparams: dict
1788
    @param ndparams: the dict to fill
1789
    @rtype: dict
1790
    @return: a copy of the passed in ndparams with missing keys filled
1791
        from the cluster defaults
1792

1793
    """
1794
    return FillDict(self.ndparams, ndparams)
1795

    
1796
  def SimpleFillIPolicy(self, ipolicy):
1797
    """ Fill instance policy dict with defaults.
1798

1799
    @type ipolicy: dict
1800
    @param ipolicy: the dict to fill
1801
    @rtype: dict
1802
    @return: a copy of passed ipolicy with missing keys filled from
1803
      the cluster defaults
1804

1805
    """
1806
    return FillIPolicy(self.ipolicy, ipolicy)
1807

    
1808

    
1809
class BlockDevStatus(ConfigObject):
1810
  """Config object representing the status of a block device."""
1811
  __slots__ = [
1812
    "dev_path",
1813
    "major",
1814
    "minor",
1815
    "sync_percent",
1816
    "estimated_time",
1817
    "is_degraded",
1818
    "ldisk_status",
1819
    ]
1820

    
1821

    
1822
class ImportExportStatus(ConfigObject):
1823
  """Config object representing the status of an import or export."""
1824
  __slots__ = [
1825
    "recent_output",
1826
    "listen_port",
1827
    "connected",
1828
    "progress_mbytes",
1829
    "progress_throughput",
1830
    "progress_eta",
1831
    "progress_percent",
1832
    "exit_status",
1833
    "error_message",
1834
    ] + _TIMESTAMPS
1835

    
1836

    
1837
class ImportExportOptions(ConfigObject):
1838
  """Options for import/export daemon
1839

1840
  @ivar key_name: X509 key name (None for cluster certificate)
1841
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1842
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1843
  @ivar magic: Used to ensure the connection goes to the right disk
1844
  @ivar ipv6: Whether to use IPv6
1845
  @ivar connect_timeout: Number of seconds for establishing connection
1846

1847
  """
1848
  __slots__ = [
1849
    "key_name",
1850
    "ca_pem",
1851
    "compress",
1852
    "magic",
1853
    "ipv6",
1854
    "connect_timeout",
1855
    ]
1856

    
1857

    
1858
class ConfdRequest(ConfigObject):
1859
  """Object holding a confd request.
1860

1861
  @ivar protocol: confd protocol version
1862
  @ivar type: confd query type
1863
  @ivar query: query request
1864
  @ivar rsalt: requested reply salt
1865

1866
  """
1867
  __slots__ = [
1868
    "protocol",
1869
    "type",
1870
    "query",
1871
    "rsalt",
1872
    ]
1873

    
1874

    
1875
class ConfdReply(ConfigObject):
1876
  """Object holding a confd reply.
1877

1878
  @ivar protocol: confd protocol version
1879
  @ivar status: reply status code (ok, error)
1880
  @ivar answer: confd query reply
1881
  @ivar serial: configuration serial number
1882

1883
  """
1884
  __slots__ = [
1885
    "protocol",
1886
    "status",
1887
    "answer",
1888
    "serial",
1889
    ]
1890

    
1891

    
1892
class QueryFieldDefinition(ConfigObject):
1893
  """Object holding a query field definition.
1894

1895
  @ivar name: Field name
1896
  @ivar title: Human-readable title
1897
  @ivar kind: Field type
1898
  @ivar doc: Human-readable description
1899

1900
  """
1901
  __slots__ = [
1902
    "name",
1903
    "title",
1904
    "kind",
1905
    "doc",
1906
    ]
1907

    
1908

    
1909
class _QueryResponseBase(ConfigObject):
1910
  __slots__ = [
1911
    "fields",
1912
    ]
1913

    
1914
  def ToDict(self):
1915
    """Custom function for serializing.
1916

1917
    """
1918
    mydict = super(_QueryResponseBase, self).ToDict()
1919
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1920
    return mydict
1921

    
1922
  @classmethod
1923
  def FromDict(cls, val):
1924
    """Custom function for de-serializing.
1925

1926
    """
1927
    obj = super(_QueryResponseBase, cls).FromDict(val)
1928
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1929
    return obj
1930

    
1931

    
1932
class QueryResponse(_QueryResponseBase):
1933
  """Object holding the response to a query.
1934

1935
  @ivar fields: List of L{QueryFieldDefinition} objects
1936
  @ivar data: Requested data
1937

1938
  """
1939
  __slots__ = [
1940
    "data",
1941
    ]
1942

    
1943

    
1944
class QueryFieldsRequest(ConfigObject):
1945
  """Object holding a request for querying available fields.
1946

1947
  """
1948
  __slots__ = [
1949
    "what",
1950
    "fields",
1951
    ]
1952

    
1953

    
1954
class QueryFieldsResponse(_QueryResponseBase):
1955
  """Object holding the response to a query for fields.
1956

1957
  @ivar fields: List of L{QueryFieldDefinition} objects
1958

1959
  """
1960
  __slots__ = [
1961
    ]
1962

    
1963

    
1964
class MigrationStatus(ConfigObject):
1965
  """Object holding the status of a migration.
1966

1967
  """
1968
  __slots__ = [
1969
    "status",
1970
    "transferred_ram",
1971
    "total_ram",
1972
    ]
1973

    
1974

    
1975
class InstanceConsole(ConfigObject):
1976
  """Object describing how to access the console of an instance.
1977

1978
  """
1979
  __slots__ = [
1980
    "instance",
1981
    "kind",
1982
    "message",
1983
    "host",
1984
    "port",
1985
    "user",
1986
    "command",
1987
    "display",
1988
    ]
1989

    
1990
  def Validate(self):
1991
    """Validates contents of this object.
1992

1993
    """
1994
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1995
    assert self.instance, "Missing instance name"
1996
    assert self.message or self.kind in [constants.CONS_SSH,
1997
                                         constants.CONS_SPICE,
1998
                                         constants.CONS_VNC]
1999
    assert self.host or self.kind == constants.CONS_MESSAGE
2000
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2001
                                      constants.CONS_SSH]
2002
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2003
                                      constants.CONS_SPICE,
2004
                                      constants.CONS_VNC]
2005
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2006
                                         constants.CONS_SPICE,
2007
                                         constants.CONS_VNC]
2008
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2009
                                         constants.CONS_SPICE,
2010
                                         constants.CONS_SSH]
2011
    return True
2012

    
2013

    
2014
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2015
  """Simple wrapper over ConfigParse that allows serialization.
2016

2017
  This class is basically ConfigParser.SafeConfigParser with two
2018
  additional methods that allow it to serialize/unserialize to/from a
2019
  buffer.
2020

2021
  """
2022
  def Dumps(self):
2023
    """Dump this instance and return the string representation."""
2024
    buf = StringIO()
2025
    self.write(buf)
2026
    return buf.getvalue()
2027

    
2028
  @classmethod
2029
  def Loads(cls, data):
2030
    """Load data from a string."""
2031
    buf = StringIO(data)
2032
    cfp = cls()
2033
    cfp.readfp(buf)
2034
    return cfp