Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ ff6c5e55

History | View | Annotate | Download (56.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import time
42
from cStringIO import StringIO
43

    
44
from ganeti import errors
45
from ganeti import constants
46
from ganeti import netutils
47
from ganeti import utils
48

    
49
from socket import AF_INET
50

    
51

    
52
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
53
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
54

    
55
_TIMESTAMPS = ["ctime", "mtime"]
56
_UUID = ["uuid"]
57

    
58
# constants used to create InstancePolicy dictionary
59
TISPECS_GROUP_TYPES = {
60
  constants.ISPECS_MIN: constants.VTYPE_INT,
61
  constants.ISPECS_MAX: constants.VTYPE_INT,
62
  }
63

    
64
TISPECS_CLUSTER_TYPES = {
65
  constants.ISPECS_MIN: constants.VTYPE_INT,
66
  constants.ISPECS_MAX: constants.VTYPE_INT,
67
  constants.ISPECS_STD: constants.VTYPE_INT,
68
  }
69

    
70

    
71
def FillDict(defaults_dict, custom_dict, skip_keys=None):
72
  """Basic function to apply settings on top a default dict.
73

74
  @type defaults_dict: dict
75
  @param defaults_dict: dictionary holding the default values
76
  @type custom_dict: dict
77
  @param custom_dict: dictionary holding customized value
78
  @type skip_keys: list
79
  @param skip_keys: which keys not to fill
80
  @rtype: dict
81
  @return: dict with the 'full' values
82

83
  """
84
  ret_dict = copy.deepcopy(defaults_dict)
85
  ret_dict.update(custom_dict)
86
  if skip_keys:
87
    for k in skip_keys:
88
      try:
89
        del ret_dict[k]
90
      except KeyError:
91
        pass
92
  return ret_dict
93

    
94

    
95
def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
96
  """Fills an instance policy with defaults.
97

98
  """
99
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
100
  ret_dict = {}
101
  for key in constants.IPOLICY_ISPECS:
102
    ret_dict[key] = FillDict(default_ipolicy[key],
103
                             custom_ipolicy.get(key, {}),
104
                             skip_keys=skip_keys)
105
  # list items
106
  for key in [constants.IPOLICY_DTS]:
107
    ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
108
  # other items which we know we can directly copy (immutables)
109
  for key in constants.IPOLICY_PARAMETERS:
110
    ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
111

    
112
  return ret_dict
113

    
114

    
115
def UpgradeGroupedParams(target, defaults):
116
  """Update all groups for the target parameter.
117

118
  @type target: dict of dicts
119
  @param target: {group: {parameter: value}}
120
  @type defaults: dict
121
  @param defaults: default parameter values
122

123
  """
124
  if target is None:
125
    target = {constants.PP_DEFAULT: defaults}
126
  else:
127
    for group in target:
128
      target[group] = FillDict(defaults, target[group])
129
  return target
130

    
131

    
132
def UpgradeBeParams(target):
133
  """Update the be parameters dict to the new format.
134

135
  @type target: dict
136
  @param target: "be" parameters dict
137

138
  """
139
  if constants.BE_MEMORY in target:
140
    memory = target[constants.BE_MEMORY]
141
    target[constants.BE_MAXMEM] = memory
142
    target[constants.BE_MINMEM] = memory
143
    del target[constants.BE_MEMORY]
144

    
145

    
146
def UpgradeDiskParams(diskparams):
147
  """Upgrade the disk parameters.
148

149
  @type diskparams: dict
150
  @param diskparams: disk parameters to upgrade
151
  @rtype: dict
152
  @return: the upgraded disk parameters dit
153

154
  """
155
  result = dict()
156
  if diskparams is None:
157
    result = constants.DISK_DT_DEFAULTS.copy()
158
  else:
159
    # Update the disk parameter values for each disk template.
160
    # The code iterates over constants.DISK_TEMPLATES because new templates
161
    # might have been added.
162
    for template in constants.DISK_TEMPLATES:
163
      if template not in diskparams:
164
        result[template] = constants.DISK_DT_DEFAULTS[template].copy()
165
      else:
166
        result[template] = FillDict(constants.DISK_DT_DEFAULTS[template],
167
                                    diskparams[template])
168

    
169
  return result
170

    
171

    
172
def MakeEmptyIPolicy():
173
  """Create empty IPolicy dictionary.
174

175
  """
176
  return dict([
177
    (constants.ISPECS_MIN, {}),
178
    (constants.ISPECS_MAX, {}),
179
    (constants.ISPECS_STD, {}),
180
    ])
181

    
182

    
183
def CreateIPolicyFromOpts(ispecs_mem_size=None,
184
                          ispecs_cpu_count=None,
185
                          ispecs_disk_count=None,
186
                          ispecs_disk_size=None,
187
                          ispecs_nic_count=None,
188
                          ipolicy_disk_templates=None,
189
                          group_ipolicy=False,
190
                          allowed_values=None,
191
                          fill_all=False):
192
  """Creation of instance policy based on command line options.
193

194
  @param fill_all: whether for cluster policies we should ensure that
195
    all values are filled
196

197

198
  """
199
  # prepare ipolicy dict
200
  ipolicy_transposed = {
201
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
202
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
203
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
204
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
205
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
206
    }
207

    
208
  # first, check that the values given are correct
209
  if group_ipolicy:
210
    forced_type = TISPECS_GROUP_TYPES
211
  else:
212
    forced_type = TISPECS_CLUSTER_TYPES
213

    
214
  for specs in ipolicy_transposed.values():
215
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
216

    
217
  # then transpose
218
  ipolicy_out = MakeEmptyIPolicy()
219
  for name, specs in ipolicy_transposed.iteritems():
220
    assert name in constants.ISPECS_PARAMETERS
221
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
222
      ipolicy_out[key][name] = val
223

    
224
  # no filldict for lists
225
  if not group_ipolicy and fill_all and ipolicy_disk_templates is None:
226
    ipolicy_disk_templates = constants.DISK_TEMPLATES
227
  if ipolicy_disk_templates is not None:
228
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
229

    
230
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
231

    
232
  return ipolicy_out
233

    
234

    
235
class ConfigObject(object):
236
  """A generic config object.
237

238
  It has the following properties:
239

240
    - provides somewhat safe recursive unpickling and pickling for its classes
241
    - unset attributes which are defined in slots are always returned
242
      as None instead of raising an error
243

244
  Classes derived from this must always declare __slots__ (we use many
245
  config objects and the memory reduction is useful)
246

247
  """
248
  __slots__ = []
249

    
250
  def __init__(self, **kwargs):
251
    for k, v in kwargs.iteritems():
252
      setattr(self, k, v)
253

    
254
  def __getattr__(self, name):
255
    if name not in self._all_slots():
256
      raise AttributeError("Invalid object attribute %s.%s" %
257
                           (type(self).__name__, name))
258
    return None
259

    
260
  def __setstate__(self, state):
261
    slots = self._all_slots()
262
    for name in state:
263
      if name in slots:
264
        setattr(self, name, state[name])
265

    
266
  @classmethod
267
  def _all_slots(cls):
268
    """Compute the list of all declared slots for a class.
269

270
    """
271
    slots = []
272
    for parent in cls.__mro__:
273
      slots.extend(getattr(parent, "__slots__", []))
274
    return slots
275

    
276
  def ToDict(self):
277
    """Convert to a dict holding only standard python types.
278

279
    The generic routine just dumps all of this object's attributes in
280
    a dict. It does not work if the class has children who are
281
    ConfigObjects themselves (e.g. the nics list in an Instance), in
282
    which case the object should subclass the function in order to
283
    make sure all objects returned are only standard python types.
284

285
    """
286
    result = {}
287
    for name in self._all_slots():
288
      value = getattr(self, name, None)
289
      if value is not None:
290
        result[name] = value
291
    return result
292

    
293
  __getstate__ = ToDict
294

    
295
  @classmethod
296
  def FromDict(cls, val):
297
    """Create an object from a dictionary.
298

299
    This generic routine takes a dict, instantiates a new instance of
300
    the given class, and sets attributes based on the dict content.
301

302
    As for `ToDict`, this does not work if the class has children
303
    who are ConfigObjects themselves (e.g. the nics list in an
304
    Instance), in which case the object should subclass the function
305
    and alter the objects.
306

307
    """
308
    if not isinstance(val, dict):
309
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
310
                                      " expected dict, got %s" % type(val))
311
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
312
    obj = cls(**val_str) # pylint: disable=W0142
313
    return obj
314

    
315
  @staticmethod
316
  def _ContainerToDicts(container):
317
    """Convert the elements of a container to standard python types.
318

319
    This method converts a container with elements derived from
320
    ConfigData to standard python types. If the container is a dict,
321
    we don't touch the keys, only the values.
322

323
    """
324
    if isinstance(container, dict):
325
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
326
    elif isinstance(container, (list, tuple, set, frozenset)):
327
      ret = [elem.ToDict() for elem in container]
328
    else:
329
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
330
                      type(container))
331
    return ret
332

    
333
  @staticmethod
334
  def _ContainerFromDicts(source, c_type, e_type):
335
    """Convert a container from standard python types.
336

337
    This method converts a container with standard python types to
338
    ConfigData objects. If the container is a dict, we don't touch the
339
    keys, only the values.
340

341
    """
342
    if not isinstance(c_type, type):
343
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
344
                      " not a type" % type(c_type))
345
    if source is None:
346
      source = c_type()
347
    if c_type is dict:
348
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
349
    elif c_type in (list, tuple, set, frozenset):
350
      ret = c_type([e_type.FromDict(elem) for elem in source])
351
    else:
352
      raise TypeError("Invalid container type %s passed to"
353
                      " _ContainerFromDicts" % c_type)
354
    return ret
355

    
356
  def Copy(self):
357
    """Makes a deep copy of the current object and its children.
358

359
    """
360
    dict_form = self.ToDict()
361
    clone_obj = self.__class__.FromDict(dict_form)
362
    return clone_obj
363

    
364
  def __repr__(self):
365
    """Implement __repr__ for ConfigObjects."""
366
    return repr(self.ToDict())
367

    
368
  def UpgradeConfig(self):
369
    """Fill defaults for missing configuration values.
370

371
    This method will be called at configuration load time, and its
372
    implementation will be object dependent.
373

374
    """
375
    pass
376

    
377

    
378
class TaggableObject(ConfigObject):
379
  """An generic class supporting tags.
380

381
  """
382
  __slots__ = ["tags"]
383
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
384

    
385
  @classmethod
386
  def ValidateTag(cls, tag):
387
    """Check if a tag is valid.
388

389
    If the tag is invalid, an errors.TagError will be raised. The
390
    function has no return value.
391

392
    """
393
    if not isinstance(tag, basestring):
394
      raise errors.TagError("Invalid tag type (not a string)")
395
    if len(tag) > constants.MAX_TAG_LEN:
396
      raise errors.TagError("Tag too long (>%d characters)" %
397
                            constants.MAX_TAG_LEN)
398
    if not tag:
399
      raise errors.TagError("Tags cannot be empty")
400
    if not cls.VALID_TAG_RE.match(tag):
401
      raise errors.TagError("Tag contains invalid characters")
402

    
403
  def GetTags(self):
404
    """Return the tags list.
405

406
    """
407
    tags = getattr(self, "tags", None)
408
    if tags is None:
409
      tags = self.tags = set()
410
    return tags
411

    
412
  def AddTag(self, tag):
413
    """Add a new tag.
414

415
    """
416
    self.ValidateTag(tag)
417
    tags = self.GetTags()
418
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
419
      raise errors.TagError("Too many tags")
420
    self.GetTags().add(tag)
421

    
422
  def RemoveTag(self, tag):
423
    """Remove a tag.
424

425
    """
426
    self.ValidateTag(tag)
427
    tags = self.GetTags()
428
    try:
429
      tags.remove(tag)
430
    except KeyError:
431
      raise errors.TagError("Tag not found")
432

    
433
  def ToDict(self):
434
    """Taggable-object-specific conversion to standard python types.
435

436
    This replaces the tags set with a list.
437

438
    """
439
    bo = super(TaggableObject, self).ToDict()
440

    
441
    tags = bo.get("tags", None)
442
    if isinstance(tags, set):
443
      bo["tags"] = list(tags)
444
    return bo
445

    
446
  @classmethod
447
  def FromDict(cls, val):
448
    """Custom function for instances.
449

450
    """
451
    obj = super(TaggableObject, cls).FromDict(val)
452
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
453
      obj.tags = set(obj.tags)
454
    return obj
455

    
456

    
457
class MasterNetworkParameters(ConfigObject):
458
  """Network configuration parameters for the master
459

460
  @ivar name: master name
461
  @ivar ip: master IP
462
  @ivar netmask: master netmask
463
  @ivar netdev: master network device
464
  @ivar ip_family: master IP family
465

466
  """
467
  __slots__ = [
468
    "name",
469
    "ip",
470
    "netmask",
471
    "netdev",
472
    "ip_family"
473
    ]
474

    
475

    
476
class ConfigData(ConfigObject):
477
  """Top-level config object."""
478
  __slots__ = [
479
    "version",
480
    "cluster",
481
    "nodes",
482
    "nodegroups",
483
    "instances",
484
    "serial_no",
485
    ] + _TIMESTAMPS
486

    
487
  def ToDict(self):
488
    """Custom function for top-level config data.
489

490
    This just replaces the list of instances, nodes and the cluster
491
    with standard python types.
492

493
    """
494
    mydict = super(ConfigData, self).ToDict()
495
    mydict["cluster"] = mydict["cluster"].ToDict()
496
    for key in "nodes", "instances", "nodegroups":
497
      mydict[key] = self._ContainerToDicts(mydict[key])
498

    
499
    return mydict
500

    
501
  @classmethod
502
  def FromDict(cls, val):
503
    """Custom function for top-level config data
504

505
    """
506
    obj = super(ConfigData, cls).FromDict(val)
507
    obj.cluster = Cluster.FromDict(obj.cluster)
508
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
509
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
510
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
511
    return obj
512

    
513
  def HasAnyDiskOfType(self, dev_type):
514
    """Check if in there is at disk of the given type in the configuration.
515

516
    @type dev_type: L{constants.LDS_BLOCK}
517
    @param dev_type: the type to look for
518
    @rtype: boolean
519
    @return: boolean indicating if a disk of the given type was found or not
520

521
    """
522
    for instance in self.instances.values():
523
      for disk in instance.disks:
524
        if disk.IsBasedOnDiskType(dev_type):
525
          return True
526
    return False
527

    
528
  def UpgradeConfig(self):
529
    """Fill defaults for missing configuration values.
530

531
    """
532
    self.cluster.UpgradeConfig()
533
    for node in self.nodes.values():
534
      node.UpgradeConfig()
535
    for instance in self.instances.values():
536
      instance.UpgradeConfig()
537
    if self.nodegroups is None:
538
      self.nodegroups = {}
539
    for nodegroup in self.nodegroups.values():
540
      nodegroup.UpgradeConfig()
541
    if self.cluster.drbd_usermode_helper is None:
542
      # To decide if we set an helper let's check if at least one instance has
543
      # a DRBD disk. This does not cover all the possible scenarios but it
544
      # gives a good approximation.
545
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
546
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
547

    
548

    
549
class NIC(ConfigObject):
550
  """Config object representing a network card."""
551
  __slots__ = ["mac", "ip", "nicparams"]
552

    
553
  @classmethod
554
  def CheckParameterSyntax(cls, nicparams):
555
    """Check the given parameters for validity.
556

557
    @type nicparams:  dict
558
    @param nicparams: dictionary with parameter names/value
559
    @raise errors.ConfigurationError: when a parameter is not valid
560

561
    """
562
    if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
563
        nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
564
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
565
      raise errors.ConfigurationError(err)
566

    
567
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
568
        not nicparams[constants.NIC_LINK]):
569
      err = "Missing bridged nic link"
570
      raise errors.ConfigurationError(err)
571

    
572

    
573
class Disk(ConfigObject):
574
  """Config object representing a block device."""
575
  __slots__ = ["dev_type", "logical_id", "physical_id",
576
               "children", "iv_name", "size", "mode", "params"]
577

    
578
  def CreateOnSecondary(self):
579
    """Test if this device needs to be created on a secondary node."""
580
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
581

    
582
  def AssembleOnSecondary(self):
583
    """Test if this device needs to be assembled on a secondary node."""
584
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
585

    
586
  def OpenOnSecondary(self):
587
    """Test if this device needs to be opened on a secondary node."""
588
    return self.dev_type in (constants.LD_LV,)
589

    
590
  def StaticDevPath(self):
591
    """Return the device path if this device type has a static one.
592

593
    Some devices (LVM for example) live always at the same /dev/ path,
594
    irrespective of their status. For such devices, we return this
595
    path, for others we return None.
596

597
    @warning: The path returned is not a normalized pathname; callers
598
        should check that it is a valid path.
599

600
    """
601
    if self.dev_type == constants.LD_LV:
602
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
603
    elif self.dev_type == constants.LD_BLOCKDEV:
604
      return self.logical_id[1]
605
    elif self.dev_type == constants.LD_RBD:
606
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
607
    return None
608

    
609
  def ChildrenNeeded(self):
610
    """Compute the needed number of children for activation.
611

612
    This method will return either -1 (all children) or a positive
613
    number denoting the minimum number of children needed for
614
    activation (only mirrored devices will usually return >=0).
615

616
    Currently, only DRBD8 supports diskless activation (therefore we
617
    return 0), for all other we keep the previous semantics and return
618
    -1.
619

620
    """
621
    if self.dev_type == constants.LD_DRBD8:
622
      return 0
623
    return -1
624

    
625
  def IsBasedOnDiskType(self, dev_type):
626
    """Check if the disk or its children are based on the given type.
627

628
    @type dev_type: L{constants.LDS_BLOCK}
629
    @param dev_type: the type to look for
630
    @rtype: boolean
631
    @return: boolean indicating if a device of the given type was found or not
632

633
    """
634
    if self.children:
635
      for child in self.children:
636
        if child.IsBasedOnDiskType(dev_type):
637
          return True
638
    return self.dev_type == dev_type
639

    
640
  def GetNodes(self, node):
641
    """This function returns the nodes this device lives on.
642

643
    Given the node on which the parent of the device lives on (or, in
644
    case of a top-level device, the primary node of the devices'
645
    instance), this function will return a list of nodes on which this
646
    devices needs to (or can) be assembled.
647

648
    """
649
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
650
                         constants.LD_BLOCKDEV, constants.LD_RBD]:
651
      result = [node]
652
    elif self.dev_type in constants.LDS_DRBD:
653
      result = [self.logical_id[0], self.logical_id[1]]
654
      if node not in result:
655
        raise errors.ConfigurationError("DRBD device passed unknown node")
656
    else:
657
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
658
    return result
659

    
660
  def ComputeNodeTree(self, parent_node):
661
    """Compute the node/disk tree for this disk and its children.
662

663
    This method, given the node on which the parent disk lives, will
664
    return the list of all (node, disk) pairs which describe the disk
665
    tree in the most compact way. For example, a drbd/lvm stack
666
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
667
    which represents all the top-level devices on the nodes.
668

669
    """
670
    my_nodes = self.GetNodes(parent_node)
671
    result = [(node, self) for node in my_nodes]
672
    if not self.children:
673
      # leaf device
674
      return result
675
    for node in my_nodes:
676
      for child in self.children:
677
        child_result = child.ComputeNodeTree(node)
678
        if len(child_result) == 1:
679
          # child (and all its descendants) is simple, doesn't split
680
          # over multiple hosts, so we don't need to describe it, our
681
          # own entry for this node describes it completely
682
          continue
683
        else:
684
          # check if child nodes differ from my nodes; note that
685
          # subdisk can differ from the child itself, and be instead
686
          # one of its descendants
687
          for subnode, subdisk in child_result:
688
            if subnode not in my_nodes:
689
              result.append((subnode, subdisk))
690
            # otherwise child is under our own node, so we ignore this
691
            # entry (but probably the other results in the list will
692
            # be different)
693
    return result
694

    
695
  def ComputeGrowth(self, amount):
696
    """Compute the per-VG growth requirements.
697

698
    This only works for VG-based disks.
699

700
    @type amount: integer
701
    @param amount: the desired increase in (user-visible) disk space
702
    @rtype: dict
703
    @return: a dictionary of volume-groups and the required size
704

705
    """
706
    if self.dev_type == constants.LD_LV:
707
      return {self.logical_id[0]: amount}
708
    elif self.dev_type == constants.LD_DRBD8:
709
      if self.children:
710
        return self.children[0].ComputeGrowth(amount)
711
      else:
712
        return {}
713
    else:
714
      # Other disk types do not require VG space
715
      return {}
716

    
717
  def RecordGrow(self, amount):
718
    """Update the size of this disk after growth.
719

720
    This method recurses over the disks's children and updates their
721
    size correspondigly. The method needs to be kept in sync with the
722
    actual algorithms from bdev.
723

724
    """
725
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
726
                         constants.LD_RBD):
727
      self.size += amount
728
    elif self.dev_type == constants.LD_DRBD8:
729
      if self.children:
730
        self.children[0].RecordGrow(amount)
731
      self.size += amount
732
    else:
733
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
734
                                   " disk type %s" % self.dev_type)
735

    
736
  def Update(self, size=None, mode=None):
737
    """Apply changes to size and mode.
738

739
    """
740
    if self.dev_type == constants.LD_DRBD8:
741
      if self.children:
742
        self.children[0].Update(size=size, mode=mode)
743
    else:
744
      assert not self.children
745

    
746
    if size is not None:
747
      self.size = size
748
    if mode is not None:
749
      self.mode = mode
750

    
751
  def UnsetSize(self):
752
    """Sets recursively the size to zero for the disk and its children.
753

754
    """
755
    if self.children:
756
      for child in self.children:
757
        child.UnsetSize()
758
    self.size = 0
759

    
760
  def SetPhysicalID(self, target_node, nodes_ip):
761
    """Convert the logical ID to the physical ID.
762

763
    This is used only for drbd, which needs ip/port configuration.
764

765
    The routine descends down and updates its children also, because
766
    this helps when the only the top device is passed to the remote
767
    node.
768

769
    Arguments:
770
      - target_node: the node we wish to configure for
771
      - nodes_ip: a mapping of node name to ip
772

773
    The target_node must exist in in nodes_ip, and must be one of the
774
    nodes in the logical ID for each of the DRBD devices encountered
775
    in the disk tree.
776

777
    """
778
    if self.children:
779
      for child in self.children:
780
        child.SetPhysicalID(target_node, nodes_ip)
781

    
782
    if self.logical_id is None and self.physical_id is not None:
783
      return
784
    if self.dev_type in constants.LDS_DRBD:
785
      pnode, snode, port, pminor, sminor, secret = self.logical_id
786
      if target_node not in (pnode, snode):
787
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
788
                                        target_node)
789
      pnode_ip = nodes_ip.get(pnode, None)
790
      snode_ip = nodes_ip.get(snode, None)
791
      if pnode_ip is None or snode_ip is None:
792
        raise errors.ConfigurationError("Can't find primary or secondary node"
793
                                        " for %s" % str(self))
794
      p_data = (pnode_ip, port)
795
      s_data = (snode_ip, port)
796
      if pnode == target_node:
797
        self.physical_id = p_data + s_data + (pminor, secret)
798
      else: # it must be secondary, we tested above
799
        self.physical_id = s_data + p_data + (sminor, secret)
800
    else:
801
      self.physical_id = self.logical_id
802
    return
803

    
804
  def ToDict(self):
805
    """Disk-specific conversion to standard python types.
806

807
    This replaces the children lists of objects with lists of
808
    standard python types.
809

810
    """
811
    bo = super(Disk, self).ToDict()
812

    
813
    for attr in ("children",):
814
      alist = bo.get(attr, None)
815
      if alist:
816
        bo[attr] = self._ContainerToDicts(alist)
817
    return bo
818

    
819
  @classmethod
820
  def FromDict(cls, val):
821
    """Custom function for Disks
822

823
    """
824
    obj = super(Disk, cls).FromDict(val)
825
    if obj.children:
826
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
827
    if obj.logical_id and isinstance(obj.logical_id, list):
828
      obj.logical_id = tuple(obj.logical_id)
829
    if obj.physical_id and isinstance(obj.physical_id, list):
830
      obj.physical_id = tuple(obj.physical_id)
831
    if obj.dev_type in constants.LDS_DRBD:
832
      # we need a tuple of length six here
833
      if len(obj.logical_id) < 6:
834
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
835
    return obj
836

    
837
  def __str__(self):
838
    """Custom str() formatter for disks.
839

840
    """
841
    if self.dev_type == constants.LD_LV:
842
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
843
    elif self.dev_type in constants.LDS_DRBD:
844
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
845
      val = "<DRBD8("
846
      if self.physical_id is None:
847
        phy = "unconfigured"
848
      else:
849
        phy = ("configured as %s:%s %s:%s" %
850
               (self.physical_id[0], self.physical_id[1],
851
                self.physical_id[2], self.physical_id[3]))
852

    
853
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
854
              (node_a, minor_a, node_b, minor_b, port, phy))
855
      if self.children and self.children.count(None) == 0:
856
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
857
      else:
858
        val += "no local storage"
859
    else:
860
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
861
             (self.dev_type, self.logical_id, self.physical_id, self.children))
862
    if self.iv_name is None:
863
      val += ", not visible"
864
    else:
865
      val += ", visible as /dev/%s" % self.iv_name
866
    if isinstance(self.size, int):
867
      val += ", size=%dm)>" % self.size
868
    else:
869
      val += ", size='%s')>" % (self.size,)
870
    return val
871

    
872
  def Verify(self):
873
    """Checks that this disk is correctly configured.
874

875
    """
876
    all_errors = []
877
    if self.mode not in constants.DISK_ACCESS_SET:
878
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
879
    return all_errors
880

    
881
  def UpgradeConfig(self):
882
    """Fill defaults for missing configuration values.
883

884
    """
885
    if self.children:
886
      for child in self.children:
887
        child.UpgradeConfig()
888

    
889
    if not self.params:
890
      self.params = constants.DISK_LD_DEFAULTS[self.dev_type].copy()
891
    else:
892
      self.params = FillDict(constants.DISK_LD_DEFAULTS[self.dev_type],
893
                             self.params)
894
    # add here config upgrade for this disk
895

    
896

    
897
class InstancePolicy(ConfigObject):
898
  """Config object representing instance policy limits dictionary.
899

900

901
  Note that this object is not actually used in the config, it's just
902
  used as a placeholder for a few functions.
903

904
  """
905
  @classmethod
906
  def CheckParameterSyntax(cls, ipolicy):
907
    """ Check the instance policy for validity.
908

909
    """
910
    for param in constants.ISPECS_PARAMETERS:
911
      InstancePolicy.CheckISpecSyntax(ipolicy, param)
912
    if constants.IPOLICY_DTS in ipolicy:
913
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
914
    for key in constants.IPOLICY_PARAMETERS:
915
      if key in ipolicy:
916
        InstancePolicy.CheckParameter(key, ipolicy[key])
917
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
918
    if wrong_keys:
919
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
920
                                      utils.CommaJoin(wrong_keys))
921

    
922
  @classmethod
923
  def CheckISpecSyntax(cls, ipolicy, name):
924
    """Check the instance policy for validity on a given key.
925

926
    We check if the instance policy makes sense for a given key, that is
927
    if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
928

929
    @type ipolicy: dict
930
    @param ipolicy: dictionary with min, max, std specs
931
    @type name: string
932
    @param name: what are the limits for
933
    @raise errors.ConfigureError: when specs for given name are not valid
934

935
    """
936
    min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
937
    std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
938
    max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
939
    err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
940
           (name,
941
            ipolicy[constants.ISPECS_MIN].get(name, "-"),
942
            ipolicy[constants.ISPECS_MAX].get(name, "-"),
943
            ipolicy[constants.ISPECS_STD].get(name, "-")))
944
    if min_v > std_v or std_v > max_v:
945
      raise errors.ConfigurationError(err)
946

    
947
  @classmethod
948
  def CheckDiskTemplates(cls, disk_templates):
949
    """Checks the disk templates for validity.
950

951
    """
952
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
953
    if wrong:
954
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
955
                                      utils.CommaJoin(wrong))
956

    
957
  @classmethod
958
  def CheckParameter(cls, key, value):
959
    """Checks a parameter.
960

961
    Currently we expect all parameters to be float values.
962

963
    """
964
    try:
965
      float(value)
966
    except (TypeError, ValueError), err:
967
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
968
                                      " '%s', error: %s" % (key, value, err))
969

    
970

    
971
class Instance(TaggableObject):
972
  """Config object representing an instance."""
973
  __slots__ = [
974
    "name",
975
    "primary_node",
976
    "os",
977
    "hypervisor",
978
    "hvparams",
979
    "beparams",
980
    "osparams",
981
    "admin_state",
982
    "nics",
983
    "disks",
984
    "disk_template",
985
    "network_port",
986
    "serial_no",
987
    ] + _TIMESTAMPS + _UUID
988

    
989
  def _ComputeSecondaryNodes(self):
990
    """Compute the list of secondary nodes.
991

992
    This is a simple wrapper over _ComputeAllNodes.
993

994
    """
995
    all_nodes = set(self._ComputeAllNodes())
996
    all_nodes.discard(self.primary_node)
997
    return tuple(all_nodes)
998

    
999
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1000
                             "List of secondary nodes")
1001

    
1002
  def _ComputeAllNodes(self):
1003
    """Compute the list of all nodes.
1004

1005
    Since the data is already there (in the drbd disks), keeping it as
1006
    a separate normal attribute is redundant and if not properly
1007
    synchronised can cause problems. Thus it's better to compute it
1008
    dynamically.
1009

1010
    """
1011
    def _Helper(nodes, device):
1012
      """Recursively computes nodes given a top device."""
1013
      if device.dev_type in constants.LDS_DRBD:
1014
        nodea, nodeb = device.logical_id[:2]
1015
        nodes.add(nodea)
1016
        nodes.add(nodeb)
1017
      if device.children:
1018
        for child in device.children:
1019
          _Helper(nodes, child)
1020

    
1021
    all_nodes = set()
1022
    all_nodes.add(self.primary_node)
1023
    for device in self.disks:
1024
      _Helper(all_nodes, device)
1025
    return tuple(all_nodes)
1026

    
1027
  all_nodes = property(_ComputeAllNodes, None, None,
1028
                       "List of all nodes of the instance")
1029

    
1030
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1031
    """Provide a mapping of nodes to LVs this instance owns.
1032

1033
    This function figures out what logical volumes should belong on
1034
    which nodes, recursing through a device tree.
1035

1036
    @param lvmap: optional dictionary to receive the
1037
        'node' : ['lv', ...] data.
1038

1039
    @return: None if lvmap arg is given, otherwise, a dictionary of
1040
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1041
        volumeN is of the form "vg_name/lv_name", compatible with
1042
        GetVolumeList()
1043

1044
    """
1045
    if node == None:
1046
      node = self.primary_node
1047

    
1048
    if lvmap is None:
1049
      lvmap = {
1050
        node: [],
1051
        }
1052
      ret = lvmap
1053
    else:
1054
      if not node in lvmap:
1055
        lvmap[node] = []
1056
      ret = None
1057

    
1058
    if not devs:
1059
      devs = self.disks
1060

    
1061
    for dev in devs:
1062
      if dev.dev_type == constants.LD_LV:
1063
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1064

    
1065
      elif dev.dev_type in constants.LDS_DRBD:
1066
        if dev.children:
1067
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1068
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1069

    
1070
      elif dev.children:
1071
        self.MapLVsByNode(lvmap, dev.children, node)
1072

    
1073
    return ret
1074

    
1075
  def FindDisk(self, idx):
1076
    """Find a disk given having a specified index.
1077

1078
    This is just a wrapper that does validation of the index.
1079

1080
    @type idx: int
1081
    @param idx: the disk index
1082
    @rtype: L{Disk}
1083
    @return: the corresponding disk
1084
    @raise errors.OpPrereqError: when the given index is not valid
1085

1086
    """
1087
    try:
1088
      idx = int(idx)
1089
      return self.disks[idx]
1090
    except (TypeError, ValueError), err:
1091
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1092
                                 errors.ECODE_INVAL)
1093
    except IndexError:
1094
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1095
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1096
                                 errors.ECODE_INVAL)
1097

    
1098
  def ToDict(self):
1099
    """Instance-specific conversion to standard python types.
1100

1101
    This replaces the children lists of objects with lists of standard
1102
    python types.
1103

1104
    """
1105
    bo = super(Instance, self).ToDict()
1106

    
1107
    for attr in "nics", "disks":
1108
      alist = bo.get(attr, None)
1109
      if alist:
1110
        nlist = self._ContainerToDicts(alist)
1111
      else:
1112
        nlist = []
1113
      bo[attr] = nlist
1114
    return bo
1115

    
1116
  @classmethod
1117
  def FromDict(cls, val):
1118
    """Custom function for instances.
1119

1120
    """
1121
    if "admin_state" not in val:
1122
      if val.get("admin_up", False):
1123
        val["admin_state"] = constants.ADMINST_UP
1124
      else:
1125
        val["admin_state"] = constants.ADMINST_DOWN
1126
    if "admin_up" in val:
1127
      del val["admin_up"]
1128
    obj = super(Instance, cls).FromDict(val)
1129
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
1130
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
1131
    return obj
1132

    
1133
  def UpgradeConfig(self):
1134
    """Fill defaults for missing configuration values.
1135

1136
    """
1137
    for nic in self.nics:
1138
      nic.UpgradeConfig()
1139
    for disk in self.disks:
1140
      disk.UpgradeConfig()
1141
    if self.hvparams:
1142
      for key in constants.HVC_GLOBALS:
1143
        try:
1144
          del self.hvparams[key]
1145
        except KeyError:
1146
          pass
1147
    if self.osparams is None:
1148
      self.osparams = {}
1149
    UpgradeBeParams(self.beparams)
1150

    
1151

    
1152
class OS(ConfigObject):
1153
  """Config object representing an operating system.
1154

1155
  @type supported_parameters: list
1156
  @ivar supported_parameters: a list of tuples, name and description,
1157
      containing the supported parameters by this OS
1158

1159
  @type VARIANT_DELIM: string
1160
  @cvar VARIANT_DELIM: the variant delimiter
1161

1162
  """
1163
  __slots__ = [
1164
    "name",
1165
    "path",
1166
    "api_versions",
1167
    "create_script",
1168
    "export_script",
1169
    "import_script",
1170
    "rename_script",
1171
    "verify_script",
1172
    "supported_variants",
1173
    "supported_parameters",
1174
    ]
1175

    
1176
  VARIANT_DELIM = "+"
1177

    
1178
  @classmethod
1179
  def SplitNameVariant(cls, name):
1180
    """Splits the name into the proper name and variant.
1181

1182
    @param name: the OS (unprocessed) name
1183
    @rtype: list
1184
    @return: a list of two elements; if the original name didn't
1185
        contain a variant, it's returned as an empty string
1186

1187
    """
1188
    nv = name.split(cls.VARIANT_DELIM, 1)
1189
    if len(nv) == 1:
1190
      nv.append("")
1191
    return nv
1192

    
1193
  @classmethod
1194
  def GetName(cls, name):
1195
    """Returns the proper name of the os (without the variant).
1196

1197
    @param name: the OS (unprocessed) name
1198

1199
    """
1200
    return cls.SplitNameVariant(name)[0]
1201

    
1202
  @classmethod
1203
  def GetVariant(cls, name):
1204
    """Returns the variant the os (without the base name).
1205

1206
    @param name: the OS (unprocessed) name
1207

1208
    """
1209
    return cls.SplitNameVariant(name)[1]
1210

    
1211

    
1212
class NodeHvState(ConfigObject):
1213
  """Hypvervisor state on a node.
1214

1215
  @ivar mem_total: Total amount of memory
1216
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1217
    available)
1218
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1219
    rounding
1220
  @ivar mem_inst: Memory used by instances living on node
1221
  @ivar cpu_total: Total node CPU core count
1222
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1223

1224
  """
1225
  __slots__ = [
1226
    "mem_total",
1227
    "mem_node",
1228
    "mem_hv",
1229
    "mem_inst",
1230
    "cpu_total",
1231
    "cpu_node",
1232
    ] + _TIMESTAMPS
1233

    
1234

    
1235
class NodeDiskState(ConfigObject):
1236
  """Disk state on a node.
1237

1238
  """
1239
  __slots__ = [
1240
    "total",
1241
    "reserved",
1242
    "overhead",
1243
    ] + _TIMESTAMPS
1244

    
1245

    
1246
class Node(TaggableObject):
1247
  """Config object representing a node.
1248

1249
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1250
  @ivar hv_state_static: Hypervisor state overriden by user
1251
  @ivar disk_state: Disk state (e.g. free space)
1252
  @ivar disk_state_static: Disk state overriden by user
1253

1254
  """
1255
  __slots__ = [
1256
    "name",
1257
    "primary_ip",
1258
    "secondary_ip",
1259
    "serial_no",
1260
    "master_candidate",
1261
    "offline",
1262
    "drained",
1263
    "group",
1264
    "master_capable",
1265
    "vm_capable",
1266
    "ndparams",
1267
    "powered",
1268
    "hv_state",
1269
    "hv_state_static",
1270
    "disk_state",
1271
    "disk_state_static",
1272
    ] + _TIMESTAMPS + _UUID
1273

    
1274
  def UpgradeConfig(self):
1275
    """Fill defaults for missing configuration values.
1276

1277
    """
1278
    # pylint: disable=E0203
1279
    # because these are "defined" via slots, not manually
1280
    if self.master_capable is None:
1281
      self.master_capable = True
1282

    
1283
    if self.vm_capable is None:
1284
      self.vm_capable = True
1285

    
1286
    if self.ndparams is None:
1287
      self.ndparams = {}
1288

    
1289
    if self.powered is None:
1290
      self.powered = True
1291

    
1292
  def ToDict(self):
1293
    """Custom function for serializing.
1294

1295
    """
1296
    data = super(Node, self).ToDict()
1297

    
1298
    hv_state = data.get("hv_state", None)
1299
    if hv_state is not None:
1300
      data["hv_state"] = self._ContainerToDicts(hv_state)
1301

    
1302
    disk_state = data.get("disk_state", None)
1303
    if disk_state is not None:
1304
      data["disk_state"] = \
1305
        dict((key, self._ContainerToDicts(value))
1306
             for (key, value) in disk_state.items())
1307

    
1308
    return data
1309

    
1310
  @classmethod
1311
  def FromDict(cls, val):
1312
    """Custom function for deserializing.
1313

1314
    """
1315
    obj = super(Node, cls).FromDict(val)
1316

    
1317
    if obj.hv_state is not None:
1318
      obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1319

    
1320
    if obj.disk_state is not None:
1321
      obj.disk_state = \
1322
        dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
1323
             for (key, value) in obj.disk_state.items())
1324

    
1325
    return obj
1326

    
1327

    
1328
class NodeGroup(TaggableObject):
1329
  """Config object representing a node group."""
1330
  __slots__ = [
1331
    "name",
1332
    "members",
1333
    "ndparams",
1334
    "diskparams",
1335
    "ipolicy",
1336
    "serial_no",
1337
    "hv_state_static",
1338
    "disk_state_static",
1339
    "alloc_policy",
1340
    ] + _TIMESTAMPS + _UUID
1341

    
1342
  def ToDict(self):
1343
    """Custom function for nodegroup.
1344

1345
    This discards the members object, which gets recalculated and is only kept
1346
    in memory.
1347

1348
    """
1349
    mydict = super(NodeGroup, self).ToDict()
1350
    del mydict["members"]
1351
    return mydict
1352

    
1353
  @classmethod
1354
  def FromDict(cls, val):
1355
    """Custom function for nodegroup.
1356

1357
    The members slot is initialized to an empty list, upon deserialization.
1358

1359
    """
1360
    obj = super(NodeGroup, cls).FromDict(val)
1361
    obj.members = []
1362
    return obj
1363

    
1364
  def UpgradeConfig(self):
1365
    """Fill defaults for missing configuration values.
1366

1367
    """
1368
    if self.ndparams is None:
1369
      self.ndparams = {}
1370

    
1371
    if self.serial_no is None:
1372
      self.serial_no = 1
1373

    
1374
    if self.alloc_policy is None:
1375
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1376

    
1377
    # We only update mtime, and not ctime, since we would not be able
1378
    # to provide a correct value for creation time.
1379
    if self.mtime is None:
1380
      self.mtime = time.time()
1381

    
1382
    self.diskparams = UpgradeDiskParams(self.diskparams)
1383
    if self.ipolicy is None:
1384
      self.ipolicy = MakeEmptyIPolicy()
1385

    
1386
  def FillND(self, node):
1387
    """Return filled out ndparams for L{objects.Node}
1388

1389
    @type node: L{objects.Node}
1390
    @param node: A Node object to fill
1391
    @return a copy of the node's ndparams with defaults filled
1392

1393
    """
1394
    return self.SimpleFillND(node.ndparams)
1395

    
1396
  def SimpleFillND(self, ndparams):
1397
    """Fill a given ndparams dict with defaults.
1398

1399
    @type ndparams: dict
1400
    @param ndparams: the dict to fill
1401
    @rtype: dict
1402
    @return: a copy of the passed in ndparams with missing keys filled
1403
        from the node group defaults
1404

1405
    """
1406
    return FillDict(self.ndparams, ndparams)
1407

    
1408

    
1409
class Cluster(TaggableObject):
1410
  """Config object representing the cluster."""
1411
  __slots__ = [
1412
    "serial_no",
1413
    "rsahostkeypub",
1414
    "highest_used_port",
1415
    "tcpudp_port_pool",
1416
    "mac_prefix",
1417
    "volume_group_name",
1418
    "reserved_lvs",
1419
    "drbd_usermode_helper",
1420
    "default_bridge",
1421
    "default_hypervisor",
1422
    "master_node",
1423
    "master_ip",
1424
    "master_netdev",
1425
    "master_netmask",
1426
    "use_external_mip_script",
1427
    "cluster_name",
1428
    "file_storage_dir",
1429
    "shared_file_storage_dir",
1430
    "enabled_hypervisors",
1431
    "hvparams",
1432
    "ipolicy",
1433
    "os_hvp",
1434
    "beparams",
1435
    "osparams",
1436
    "nicparams",
1437
    "ndparams",
1438
    "diskparams",
1439
    "candidate_pool_size",
1440
    "modify_etc_hosts",
1441
    "modify_ssh_setup",
1442
    "maintain_node_health",
1443
    "uid_pool",
1444
    "default_iallocator",
1445
    "hidden_os",
1446
    "blacklisted_os",
1447
    "primary_ip_family",
1448
    "prealloc_wipe_disks",
1449
    "hv_state_static",
1450
    "disk_state_static",
1451
    ] + _TIMESTAMPS + _UUID
1452

    
1453
  def UpgradeConfig(self):
1454
    """Fill defaults for missing configuration values.
1455

1456
    """
1457
    # pylint: disable=E0203
1458
    # because these are "defined" via slots, not manually
1459
    if self.hvparams is None:
1460
      self.hvparams = constants.HVC_DEFAULTS
1461
    else:
1462
      for hypervisor in self.hvparams:
1463
        self.hvparams[hypervisor] = FillDict(
1464
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1465

    
1466
    if self.os_hvp is None:
1467
      self.os_hvp = {}
1468

    
1469
    # osparams added before 2.2
1470
    if self.osparams is None:
1471
      self.osparams = {}
1472

    
1473
    if self.ndparams is None:
1474
      self.ndparams = constants.NDC_DEFAULTS
1475

    
1476
    self.beparams = UpgradeGroupedParams(self.beparams,
1477
                                         constants.BEC_DEFAULTS)
1478
    for beparams_group in self.beparams:
1479
      UpgradeBeParams(self.beparams[beparams_group])
1480

    
1481
    migrate_default_bridge = not self.nicparams
1482
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1483
                                          constants.NICC_DEFAULTS)
1484
    if migrate_default_bridge:
1485
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1486
        self.default_bridge
1487

    
1488
    if self.modify_etc_hosts is None:
1489
      self.modify_etc_hosts = True
1490

    
1491
    if self.modify_ssh_setup is None:
1492
      self.modify_ssh_setup = True
1493

    
1494
    # default_bridge is no longer used in 2.1. The slot is left there to
1495
    # support auto-upgrading. It can be removed once we decide to deprecate
1496
    # upgrading straight from 2.0.
1497
    if self.default_bridge is not None:
1498
      self.default_bridge = None
1499

    
1500
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1501
    # code can be removed once upgrading straight from 2.0 is deprecated.
1502
    if self.default_hypervisor is not None:
1503
      self.enabled_hypervisors = ([self.default_hypervisor] +
1504
        [hvname for hvname in self.enabled_hypervisors
1505
         if hvname != self.default_hypervisor])
1506
      self.default_hypervisor = None
1507

    
1508
    # maintain_node_health added after 2.1.1
1509
    if self.maintain_node_health is None:
1510
      self.maintain_node_health = False
1511

    
1512
    if self.uid_pool is None:
1513
      self.uid_pool = []
1514

    
1515
    if self.default_iallocator is None:
1516
      self.default_iallocator = ""
1517

    
1518
    # reserved_lvs added before 2.2
1519
    if self.reserved_lvs is None:
1520
      self.reserved_lvs = []
1521

    
1522
    # hidden and blacklisted operating systems added before 2.2.1
1523
    if self.hidden_os is None:
1524
      self.hidden_os = []
1525

    
1526
    if self.blacklisted_os is None:
1527
      self.blacklisted_os = []
1528

    
1529
    # primary_ip_family added before 2.3
1530
    if self.primary_ip_family is None:
1531
      self.primary_ip_family = AF_INET
1532

    
1533
    if self.master_netmask is None:
1534
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1535
      self.master_netmask = ipcls.iplen
1536

    
1537
    if self.prealloc_wipe_disks is None:
1538
      self.prealloc_wipe_disks = False
1539

    
1540
    # shared_file_storage_dir added before 2.5
1541
    if self.shared_file_storage_dir is None:
1542
      self.shared_file_storage_dir = ""
1543

    
1544
    if self.use_external_mip_script is None:
1545
      self.use_external_mip_script = False
1546

    
1547
    self.diskparams = UpgradeDiskParams(self.diskparams)
1548

    
1549
    # instance policy added before 2.6
1550
    if self.ipolicy is None:
1551
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1552
    else:
1553
      # we can either make sure to upgrade the ipolicy always, or only
1554
      # do it in some corner cases (e.g. missing keys); note that this
1555
      # will break any removal of keys from the ipolicy dict
1556
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1557

    
1558
  @property
1559
  def primary_hypervisor(self):
1560
    """The first hypervisor is the primary.
1561

1562
    Useful, for example, for L{Node}'s hv/disk state.
1563

1564
    """
1565
    return self.enabled_hypervisors[0]
1566

    
1567
  def ToDict(self):
1568
    """Custom function for cluster.
1569

1570
    """
1571
    mydict = super(Cluster, self).ToDict()
1572
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1573
    return mydict
1574

    
1575
  @classmethod
1576
  def FromDict(cls, val):
1577
    """Custom function for cluster.
1578

1579
    """
1580
    obj = super(Cluster, cls).FromDict(val)
1581
    if not isinstance(obj.tcpudp_port_pool, set):
1582
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1583
    return obj
1584

    
1585
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1586
    """Get the default hypervisor parameters for the cluster.
1587

1588
    @param hypervisor: the hypervisor name
1589
    @param os_name: if specified, we'll also update the defaults for this OS
1590
    @param skip_keys: if passed, list of keys not to use
1591
    @return: the defaults dict
1592

1593
    """
1594
    if skip_keys is None:
1595
      skip_keys = []
1596

    
1597
    fill_stack = [self.hvparams.get(hypervisor, {})]
1598
    if os_name is not None:
1599
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1600
      fill_stack.append(os_hvp)
1601

    
1602
    ret_dict = {}
1603
    for o_dict in fill_stack:
1604
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1605

    
1606
    return ret_dict
1607

    
1608
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1609
    """Fill a given hvparams dict with cluster defaults.
1610

1611
    @type hv_name: string
1612
    @param hv_name: the hypervisor to use
1613
    @type os_name: string
1614
    @param os_name: the OS to use for overriding the hypervisor defaults
1615
    @type skip_globals: boolean
1616
    @param skip_globals: if True, the global hypervisor parameters will
1617
        not be filled
1618
    @rtype: dict
1619
    @return: a copy of the given hvparams with missing keys filled from
1620
        the cluster defaults
1621

1622
    """
1623
    if skip_globals:
1624
      skip_keys = constants.HVC_GLOBALS
1625
    else:
1626
      skip_keys = []
1627

    
1628
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1629
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1630

    
1631
  def FillHV(self, instance, skip_globals=False):
1632
    """Fill an instance's hvparams dict with cluster defaults.
1633

1634
    @type instance: L{objects.Instance}
1635
    @param instance: the instance parameter to fill
1636
    @type skip_globals: boolean
1637
    @param skip_globals: if True, the global hypervisor parameters will
1638
        not be filled
1639
    @rtype: dict
1640
    @return: a copy of the instance's hvparams with missing keys filled from
1641
        the cluster defaults
1642

1643
    """
1644
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1645
                             instance.hvparams, skip_globals)
1646

    
1647
  def SimpleFillBE(self, beparams):
1648
    """Fill a given beparams dict with cluster defaults.
1649

1650
    @type beparams: dict
1651
    @param beparams: the dict to fill
1652
    @rtype: dict
1653
    @return: a copy of the passed in beparams with missing keys filled
1654
        from the cluster defaults
1655

1656
    """
1657
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1658

    
1659
  def FillBE(self, instance):
1660
    """Fill an instance's beparams dict with cluster defaults.
1661

1662
    @type instance: L{objects.Instance}
1663
    @param instance: the instance parameter to fill
1664
    @rtype: dict
1665
    @return: a copy of the instance's beparams with missing keys filled from
1666
        the cluster defaults
1667

1668
    """
1669
    return self.SimpleFillBE(instance.beparams)
1670

    
1671
  def SimpleFillNIC(self, nicparams):
1672
    """Fill a given nicparams dict with cluster defaults.
1673

1674
    @type nicparams: dict
1675
    @param nicparams: the dict to fill
1676
    @rtype: dict
1677
    @return: a copy of the passed in nicparams with missing keys filled
1678
        from the cluster defaults
1679

1680
    """
1681
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1682

    
1683
  def SimpleFillOS(self, os_name, os_params):
1684
    """Fill an instance's osparams dict with cluster defaults.
1685

1686
    @type os_name: string
1687
    @param os_name: the OS name to use
1688
    @type os_params: dict
1689
    @param os_params: the dict to fill with default values
1690
    @rtype: dict
1691
    @return: a copy of the instance's osparams with missing keys filled from
1692
        the cluster defaults
1693

1694
    """
1695
    name_only = os_name.split("+", 1)[0]
1696
    # base OS
1697
    result = self.osparams.get(name_only, {})
1698
    # OS with variant
1699
    result = FillDict(result, self.osparams.get(os_name, {}))
1700
    # specified params
1701
    return FillDict(result, os_params)
1702

    
1703
  @staticmethod
1704
  def SimpleFillHvState(hv_state):
1705
    """Fill an hv_state sub dict with cluster defaults.
1706

1707
    """
1708
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1709

    
1710
  @staticmethod
1711
  def SimpleFillDiskState(disk_state):
1712
    """Fill an disk_state sub dict with cluster defaults.
1713

1714
    """
1715
    return FillDict(constants.DS_DEFAULTS, disk_state)
1716

    
1717
  def FillND(self, node, nodegroup):
1718
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1719

1720
    @type node: L{objects.Node}
1721
    @param node: A Node object to fill
1722
    @type nodegroup: L{objects.NodeGroup}
1723
    @param nodegroup: A Node object to fill
1724
    @return a copy of the node's ndparams with defaults filled
1725

1726
    """
1727
    return self.SimpleFillND(nodegroup.FillND(node))
1728

    
1729
  def SimpleFillND(self, ndparams):
1730
    """Fill a given ndparams dict with defaults.
1731

1732
    @type ndparams: dict
1733
    @param ndparams: the dict to fill
1734
    @rtype: dict
1735
    @return: a copy of the passed in ndparams with missing keys filled
1736
        from the cluster defaults
1737

1738
    """
1739
    return FillDict(self.ndparams, ndparams)
1740

    
1741
  def SimpleFillIPolicy(self, ipolicy):
1742
    """ Fill instance policy dict with defaults.
1743

1744
    @type ipolicy: dict
1745
    @param ipolicy: the dict to fill
1746
    @rtype: dict
1747
    @return: a copy of passed ipolicy with missing keys filled from
1748
      the cluster defaults
1749

1750
    """
1751
    return FillIPolicy(self.ipolicy, ipolicy)
1752

    
1753

    
1754
class BlockDevStatus(ConfigObject):
1755
  """Config object representing the status of a block device."""
1756
  __slots__ = [
1757
    "dev_path",
1758
    "major",
1759
    "minor",
1760
    "sync_percent",
1761
    "estimated_time",
1762
    "is_degraded",
1763
    "ldisk_status",
1764
    ]
1765

    
1766

    
1767
class ImportExportStatus(ConfigObject):
1768
  """Config object representing the status of an import or export."""
1769
  __slots__ = [
1770
    "recent_output",
1771
    "listen_port",
1772
    "connected",
1773
    "progress_mbytes",
1774
    "progress_throughput",
1775
    "progress_eta",
1776
    "progress_percent",
1777
    "exit_status",
1778
    "error_message",
1779
    ] + _TIMESTAMPS
1780

    
1781

    
1782
class ImportExportOptions(ConfigObject):
1783
  """Options for import/export daemon
1784

1785
  @ivar key_name: X509 key name (None for cluster certificate)
1786
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1787
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1788
  @ivar magic: Used to ensure the connection goes to the right disk
1789
  @ivar ipv6: Whether to use IPv6
1790
  @ivar connect_timeout: Number of seconds for establishing connection
1791

1792
  """
1793
  __slots__ = [
1794
    "key_name",
1795
    "ca_pem",
1796
    "compress",
1797
    "magic",
1798
    "ipv6",
1799
    "connect_timeout",
1800
    ]
1801

    
1802

    
1803
class ConfdRequest(ConfigObject):
1804
  """Object holding a confd request.
1805

1806
  @ivar protocol: confd protocol version
1807
  @ivar type: confd query type
1808
  @ivar query: query request
1809
  @ivar rsalt: requested reply salt
1810

1811
  """
1812
  __slots__ = [
1813
    "protocol",
1814
    "type",
1815
    "query",
1816
    "rsalt",
1817
    ]
1818

    
1819

    
1820
class ConfdReply(ConfigObject):
1821
  """Object holding a confd reply.
1822

1823
  @ivar protocol: confd protocol version
1824
  @ivar status: reply status code (ok, error)
1825
  @ivar answer: confd query reply
1826
  @ivar serial: configuration serial number
1827

1828
  """
1829
  __slots__ = [
1830
    "protocol",
1831
    "status",
1832
    "answer",
1833
    "serial",
1834
    ]
1835

    
1836

    
1837
class QueryFieldDefinition(ConfigObject):
1838
  """Object holding a query field definition.
1839

1840
  @ivar name: Field name
1841
  @ivar title: Human-readable title
1842
  @ivar kind: Field type
1843
  @ivar doc: Human-readable description
1844

1845
  """
1846
  __slots__ = [
1847
    "name",
1848
    "title",
1849
    "kind",
1850
    "doc",
1851
    ]
1852

    
1853

    
1854
class _QueryResponseBase(ConfigObject):
1855
  __slots__ = [
1856
    "fields",
1857
    ]
1858

    
1859
  def ToDict(self):
1860
    """Custom function for serializing.
1861

1862
    """
1863
    mydict = super(_QueryResponseBase, self).ToDict()
1864
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1865
    return mydict
1866

    
1867
  @classmethod
1868
  def FromDict(cls, val):
1869
    """Custom function for de-serializing.
1870

1871
    """
1872
    obj = super(_QueryResponseBase, cls).FromDict(val)
1873
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1874
    return obj
1875

    
1876

    
1877
class QueryRequest(ConfigObject):
1878
  """Object holding a query request.
1879

1880
  """
1881
  __slots__ = [
1882
    "what",
1883
    "fields",
1884
    "qfilter",
1885
    ]
1886

    
1887

    
1888
class QueryResponse(_QueryResponseBase):
1889
  """Object holding the response to a query.
1890

1891
  @ivar fields: List of L{QueryFieldDefinition} objects
1892
  @ivar data: Requested data
1893

1894
  """
1895
  __slots__ = [
1896
    "data",
1897
    ]
1898

    
1899

    
1900
class QueryFieldsRequest(ConfigObject):
1901
  """Object holding a request for querying available fields.
1902

1903
  """
1904
  __slots__ = [
1905
    "what",
1906
    "fields",
1907
    ]
1908

    
1909

    
1910
class QueryFieldsResponse(_QueryResponseBase):
1911
  """Object holding the response to a query for fields.
1912

1913
  @ivar fields: List of L{QueryFieldDefinition} objects
1914

1915
  """
1916
  __slots__ = [
1917
    ]
1918

    
1919

    
1920
class MigrationStatus(ConfigObject):
1921
  """Object holding the status of a migration.
1922

1923
  """
1924
  __slots__ = [
1925
    "status",
1926
    "transferred_ram",
1927
    "total_ram",
1928
    ]
1929

    
1930

    
1931
class InstanceConsole(ConfigObject):
1932
  """Object describing how to access the console of an instance.
1933

1934
  """
1935
  __slots__ = [
1936
    "instance",
1937
    "kind",
1938
    "message",
1939
    "host",
1940
    "port",
1941
    "user",
1942
    "command",
1943
    "display",
1944
    ]
1945

    
1946
  def Validate(self):
1947
    """Validates contents of this object.
1948

1949
    """
1950
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1951
    assert self.instance, "Missing instance name"
1952
    assert self.message or self.kind in [constants.CONS_SSH,
1953
                                         constants.CONS_SPICE,
1954
                                         constants.CONS_VNC]
1955
    assert self.host or self.kind == constants.CONS_MESSAGE
1956
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1957
                                      constants.CONS_SSH]
1958
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1959
                                      constants.CONS_SPICE,
1960
                                      constants.CONS_VNC]
1961
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1962
                                         constants.CONS_SPICE,
1963
                                         constants.CONS_VNC]
1964
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1965
                                         constants.CONS_SPICE,
1966
                                         constants.CONS_SSH]
1967
    return True
1968

    
1969

    
1970
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1971
  """Simple wrapper over ConfigParse that allows serialization.
1972

1973
  This class is basically ConfigParser.SafeConfigParser with two
1974
  additional methods that allow it to serialize/unserialize to/from a
1975
  buffer.
1976

1977
  """
1978
  def Dumps(self):
1979
    """Dump this instance and return the string representation."""
1980
    buf = StringIO()
1981
    self.write(buf)
1982
    return buf.getvalue()
1983

    
1984
  @classmethod
1985
  def Loads(cls, data):
1986
    """Load data from a string."""
1987
    buf = StringIO(data)
1988
    cfp = cls()
1989
    cfp.readfp(buf)
1990
    return cfp