Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 976b78ba

History | View | Annotate | Download (56.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import time
42
from cStringIO import StringIO
43

    
44
from ganeti import errors
45
from ganeti import constants
46
from ganeti import netutils
47
from ganeti import utils
48

    
49
from socket import AF_INET
50

    
51

    
52
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
53
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
54

    
55
_TIMESTAMPS = ["ctime", "mtime"]
56
_UUID = ["uuid"]
57

    
58
# constants used to create InstancePolicy dictionary
59
TISPECS_GROUP_TYPES = {
60
  constants.ISPECS_MIN: constants.VTYPE_INT,
61
  constants.ISPECS_MAX: constants.VTYPE_INT,
62
  }
63

    
64
TISPECS_CLUSTER_TYPES = {
65
  constants.ISPECS_MIN: constants.VTYPE_INT,
66
  constants.ISPECS_MAX: constants.VTYPE_INT,
67
  constants.ISPECS_STD: constants.VTYPE_INT,
68
  }
69

    
70

    
71
def FillDict(defaults_dict, custom_dict, skip_keys=None):
72
  """Basic function to apply settings on top a default dict.
73

74
  @type defaults_dict: dict
75
  @param defaults_dict: dictionary holding the default values
76
  @type custom_dict: dict
77
  @param custom_dict: dictionary holding customized value
78
  @type skip_keys: list
79
  @param skip_keys: which keys not to fill
80
  @rtype: dict
81
  @return: dict with the 'full' values
82

83
  """
84
  ret_dict = copy.deepcopy(defaults_dict)
85
  ret_dict.update(custom_dict)
86
  if skip_keys:
87
    for k in skip_keys:
88
      try:
89
        del ret_dict[k]
90
      except KeyError:
91
        pass
92
  return ret_dict
93

    
94

    
95
def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
96
  """Fills an instance policy with defaults.
97

98
  """
99
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
100
  ret_dict = {}
101
  for key in constants.IPOLICY_ISPECS:
102
    ret_dict[key] = FillDict(default_ipolicy[key],
103
                             custom_ipolicy.get(key, {}),
104
                             skip_keys=skip_keys)
105
  # list items
106
  for key in [constants.IPOLICY_DTS]:
107
    ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
108
  # other items which we know we can directly copy (immutables)
109
  for key in constants.IPOLICY_PARAMETERS:
110
    ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
111

    
112
  return ret_dict
113

    
114

    
115
def UpgradeGroupedParams(target, defaults):
116
  """Update all groups for the target parameter.
117

118
  @type target: dict of dicts
119
  @param target: {group: {parameter: value}}
120
  @type defaults: dict
121
  @param defaults: default parameter values
122

123
  """
124
  if target is None:
125
    target = {constants.PP_DEFAULT: defaults}
126
  else:
127
    for group in target:
128
      target[group] = FillDict(defaults, target[group])
129
  return target
130

    
131

    
132
def UpgradeBeParams(target):
133
  """Update the be parameters dict to the new format.
134

135
  @type target: dict
136
  @param target: "be" parameters dict
137

138
  """
139
  if constants.BE_MEMORY in target:
140
    memory = target[constants.BE_MEMORY]
141
    target[constants.BE_MAXMEM] = memory
142
    target[constants.BE_MINMEM] = memory
143
    del target[constants.BE_MEMORY]
144

    
145

    
146
def UpgradeDiskParams(diskparams):
147
  """Upgrade the disk parameters.
148

149
  @type diskparams: dict
150
  @param diskparams: disk parameters to upgrade
151
  @rtype: dict
152
  @return: the upgraded disk parameters dit
153

154
  """
155
  result = dict()
156
  if diskparams is None:
157
    result = constants.DISK_DT_DEFAULTS.copy()
158
  else:
159
    # Update the disk parameter values for each disk template.
160
    # The code iterates over constants.DISK_TEMPLATES because new templates
161
    # might have been added.
162
    for template in constants.DISK_TEMPLATES:
163
      if template not in diskparams:
164
        result[template] = constants.DISK_DT_DEFAULTS[template].copy()
165
      else:
166
        result[template] = FillDict(constants.DISK_DT_DEFAULTS[template],
167
                                    diskparams[template])
168

    
169
  return result
170

    
171

    
172
def MakeEmptyIPolicy():
173
  """Create empty IPolicy dictionary.
174

175
  """
176
  return dict([
177
    (constants.ISPECS_MIN, {}),
178
    (constants.ISPECS_MAX, {}),
179
    (constants.ISPECS_STD, {}),
180
    ])
181

    
182

    
183
def CreateIPolicyFromOpts(ispecs_mem_size=None,
184
                          ispecs_cpu_count=None,
185
                          ispecs_disk_count=None,
186
                          ispecs_disk_size=None,
187
                          ispecs_nic_count=None,
188
                          ipolicy_disk_templates=None,
189
                          ipolicy_vcpu_ratio=None,
190
                          group_ipolicy=False,
191
                          allowed_values=None,
192
                          fill_all=False):
193
  """Creation of instance policy based on command line options.
194

195
  @param fill_all: whether for cluster policies we should ensure that
196
    all values are filled
197

198

199
  """
200
  # prepare ipolicy dict
201
  ipolicy_transposed = {
202
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
203
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
204
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
205
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
206
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
207
    }
208

    
209
  # first, check that the values given are correct
210
  if group_ipolicy:
211
    forced_type = TISPECS_GROUP_TYPES
212
  else:
213
    forced_type = TISPECS_CLUSTER_TYPES
214

    
215
  for specs in ipolicy_transposed.values():
216
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
217

    
218
  # then transpose
219
  ipolicy_out = MakeEmptyIPolicy()
220
  for name, specs in ipolicy_transposed.iteritems():
221
    assert name in constants.ISPECS_PARAMETERS
222
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
223
      ipolicy_out[key][name] = val
224

    
225
  # no filldict for non-dicts
226
  if not group_ipolicy and fill_all:
227
    if ipolicy_disk_templates is None:
228
      ipolicy_disk_templates = constants.DISK_TEMPLATES
229
    if ipolicy_vcpu_ratio is None:
230
      ipolicy_vcpu_ratio = \
231
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
232
  if ipolicy_disk_templates is not None:
233
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
234
  if ipolicy_vcpu_ratio is not None:
235
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
236

    
237
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
238

    
239
  return ipolicy_out
240

    
241

    
242
class ConfigObject(object):
243
  """A generic config object.
244

245
  It has the following properties:
246

247
    - provides somewhat safe recursive unpickling and pickling for its classes
248
    - unset attributes which are defined in slots are always returned
249
      as None instead of raising an error
250

251
  Classes derived from this must always declare __slots__ (we use many
252
  config objects and the memory reduction is useful)
253

254
  """
255
  __slots__ = []
256

    
257
  def __init__(self, **kwargs):
258
    for k, v in kwargs.iteritems():
259
      setattr(self, k, v)
260

    
261
  def __getattr__(self, name):
262
    if name not in self._all_slots():
263
      raise AttributeError("Invalid object attribute %s.%s" %
264
                           (type(self).__name__, name))
265
    return None
266

    
267
  def __setstate__(self, state):
268
    slots = self._all_slots()
269
    for name in state:
270
      if name in slots:
271
        setattr(self, name, state[name])
272

    
273
  @classmethod
274
  def _all_slots(cls):
275
    """Compute the list of all declared slots for a class.
276

277
    """
278
    slots = []
279
    for parent in cls.__mro__:
280
      slots.extend(getattr(parent, "__slots__", []))
281
    return slots
282

    
283
  def ToDict(self):
284
    """Convert to a dict holding only standard python types.
285

286
    The generic routine just dumps all of this object's attributes in
287
    a dict. It does not work if the class has children who are
288
    ConfigObjects themselves (e.g. the nics list in an Instance), in
289
    which case the object should subclass the function in order to
290
    make sure all objects returned are only standard python types.
291

292
    """
293
    result = {}
294
    for name in self._all_slots():
295
      value = getattr(self, name, None)
296
      if value is not None:
297
        result[name] = value
298
    return result
299

    
300
  __getstate__ = ToDict
301

    
302
  @classmethod
303
  def FromDict(cls, val):
304
    """Create an object from a dictionary.
305

306
    This generic routine takes a dict, instantiates a new instance of
307
    the given class, and sets attributes based on the dict content.
308

309
    As for `ToDict`, this does not work if the class has children
310
    who are ConfigObjects themselves (e.g. the nics list in an
311
    Instance), in which case the object should subclass the function
312
    and alter the objects.
313

314
    """
315
    if not isinstance(val, dict):
316
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
317
                                      " expected dict, got %s" % type(val))
318
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
319
    obj = cls(**val_str) # pylint: disable=W0142
320
    return obj
321

    
322
  @staticmethod
323
  def _ContainerToDicts(container):
324
    """Convert the elements of a container to standard python types.
325

326
    This method converts a container with elements derived from
327
    ConfigData to standard python types. If the container is a dict,
328
    we don't touch the keys, only the values.
329

330
    """
331
    if isinstance(container, dict):
332
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
333
    elif isinstance(container, (list, tuple, set, frozenset)):
334
      ret = [elem.ToDict() for elem in container]
335
    else:
336
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
337
                      type(container))
338
    return ret
339

    
340
  @staticmethod
341
  def _ContainerFromDicts(source, c_type, e_type):
342
    """Convert a container from standard python types.
343

344
    This method converts a container with standard python types to
345
    ConfigData objects. If the container is a dict, we don't touch the
346
    keys, only the values.
347

348
    """
349
    if not isinstance(c_type, type):
350
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
351
                      " not a type" % type(c_type))
352
    if source is None:
353
      source = c_type()
354
    if c_type is dict:
355
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
356
    elif c_type in (list, tuple, set, frozenset):
357
      ret = c_type([e_type.FromDict(elem) for elem in source])
358
    else:
359
      raise TypeError("Invalid container type %s passed to"
360
                      " _ContainerFromDicts" % c_type)
361
    return ret
362

    
363
  def Copy(self):
364
    """Makes a deep copy of the current object and its children.
365

366
    """
367
    dict_form = self.ToDict()
368
    clone_obj = self.__class__.FromDict(dict_form)
369
    return clone_obj
370

    
371
  def __repr__(self):
372
    """Implement __repr__ for ConfigObjects."""
373
    return repr(self.ToDict())
374

    
375
  def UpgradeConfig(self):
376
    """Fill defaults for missing configuration values.
377

378
    This method will be called at configuration load time, and its
379
    implementation will be object dependent.
380

381
    """
382
    pass
383

    
384

    
385
class TaggableObject(ConfigObject):
386
  """An generic class supporting tags.
387

388
  """
389
  __slots__ = ["tags"]
390
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
391

    
392
  @classmethod
393
  def ValidateTag(cls, tag):
394
    """Check if a tag is valid.
395

396
    If the tag is invalid, an errors.TagError will be raised. The
397
    function has no return value.
398

399
    """
400
    if not isinstance(tag, basestring):
401
      raise errors.TagError("Invalid tag type (not a string)")
402
    if len(tag) > constants.MAX_TAG_LEN:
403
      raise errors.TagError("Tag too long (>%d characters)" %
404
                            constants.MAX_TAG_LEN)
405
    if not tag:
406
      raise errors.TagError("Tags cannot be empty")
407
    if not cls.VALID_TAG_RE.match(tag):
408
      raise errors.TagError("Tag contains invalid characters")
409

    
410
  def GetTags(self):
411
    """Return the tags list.
412

413
    """
414
    tags = getattr(self, "tags", None)
415
    if tags is None:
416
      tags = self.tags = set()
417
    return tags
418

    
419
  def AddTag(self, tag):
420
    """Add a new tag.
421

422
    """
423
    self.ValidateTag(tag)
424
    tags = self.GetTags()
425
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
426
      raise errors.TagError("Too many tags")
427
    self.GetTags().add(tag)
428

    
429
  def RemoveTag(self, tag):
430
    """Remove a tag.
431

432
    """
433
    self.ValidateTag(tag)
434
    tags = self.GetTags()
435
    try:
436
      tags.remove(tag)
437
    except KeyError:
438
      raise errors.TagError("Tag not found")
439

    
440
  def ToDict(self):
441
    """Taggable-object-specific conversion to standard python types.
442

443
    This replaces the tags set with a list.
444

445
    """
446
    bo = super(TaggableObject, self).ToDict()
447

    
448
    tags = bo.get("tags", None)
449
    if isinstance(tags, set):
450
      bo["tags"] = list(tags)
451
    return bo
452

    
453
  @classmethod
454
  def FromDict(cls, val):
455
    """Custom function for instances.
456

457
    """
458
    obj = super(TaggableObject, cls).FromDict(val)
459
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
460
      obj.tags = set(obj.tags)
461
    return obj
462

    
463

    
464
class MasterNetworkParameters(ConfigObject):
465
  """Network configuration parameters for the master
466

467
  @ivar name: master name
468
  @ivar ip: master IP
469
  @ivar netmask: master netmask
470
  @ivar netdev: master network device
471
  @ivar ip_family: master IP family
472

473
  """
474
  __slots__ = [
475
    "name",
476
    "ip",
477
    "netmask",
478
    "netdev",
479
    "ip_family"
480
    ]
481

    
482

    
483
class ConfigData(ConfigObject):
484
  """Top-level config object."""
485
  __slots__ = [
486
    "version",
487
    "cluster",
488
    "nodes",
489
    "nodegroups",
490
    "instances",
491
    "serial_no",
492
    ] + _TIMESTAMPS
493

    
494
  def ToDict(self):
495
    """Custom function for top-level config data.
496

497
    This just replaces the list of instances, nodes and the cluster
498
    with standard python types.
499

500
    """
501
    mydict = super(ConfigData, self).ToDict()
502
    mydict["cluster"] = mydict["cluster"].ToDict()
503
    for key in "nodes", "instances", "nodegroups":
504
      mydict[key] = self._ContainerToDicts(mydict[key])
505

    
506
    return mydict
507

    
508
  @classmethod
509
  def FromDict(cls, val):
510
    """Custom function for top-level config data
511

512
    """
513
    obj = super(ConfigData, cls).FromDict(val)
514
    obj.cluster = Cluster.FromDict(obj.cluster)
515
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
516
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
517
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
518
    return obj
519

    
520
  def HasAnyDiskOfType(self, dev_type):
521
    """Check if in there is at disk of the given type in the configuration.
522

523
    @type dev_type: L{constants.LDS_BLOCK}
524
    @param dev_type: the type to look for
525
    @rtype: boolean
526
    @return: boolean indicating if a disk of the given type was found or not
527

528
    """
529
    for instance in self.instances.values():
530
      for disk in instance.disks:
531
        if disk.IsBasedOnDiskType(dev_type):
532
          return True
533
    return False
534

    
535
  def UpgradeConfig(self):
536
    """Fill defaults for missing configuration values.
537

538
    """
539
    self.cluster.UpgradeConfig()
540
    for node in self.nodes.values():
541
      node.UpgradeConfig()
542
    for instance in self.instances.values():
543
      instance.UpgradeConfig()
544
    if self.nodegroups is None:
545
      self.nodegroups = {}
546
    for nodegroup in self.nodegroups.values():
547
      nodegroup.UpgradeConfig()
548
    if self.cluster.drbd_usermode_helper is None:
549
      # To decide if we set an helper let's check if at least one instance has
550
      # a DRBD disk. This does not cover all the possible scenarios but it
551
      # gives a good approximation.
552
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
553
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
554

    
555

    
556
class NIC(ConfigObject):
557
  """Config object representing a network card."""
558
  __slots__ = ["mac", "ip", "nicparams"]
559

    
560
  @classmethod
561
  def CheckParameterSyntax(cls, nicparams):
562
    """Check the given parameters for validity.
563

564
    @type nicparams:  dict
565
    @param nicparams: dictionary with parameter names/value
566
    @raise errors.ConfigurationError: when a parameter is not valid
567

568
    """
569
    if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
570
        nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
571
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
572
      raise errors.ConfigurationError(err)
573

    
574
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
575
        not nicparams[constants.NIC_LINK]):
576
      err = "Missing bridged nic link"
577
      raise errors.ConfigurationError(err)
578

    
579

    
580
class Disk(ConfigObject):
581
  """Config object representing a block device."""
582
  __slots__ = ["dev_type", "logical_id", "physical_id",
583
               "children", "iv_name", "size", "mode", "params"]
584

    
585
  def CreateOnSecondary(self):
586
    """Test if this device needs to be created on a secondary node."""
587
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
588

    
589
  def AssembleOnSecondary(self):
590
    """Test if this device needs to be assembled on a secondary node."""
591
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
592

    
593
  def OpenOnSecondary(self):
594
    """Test if this device needs to be opened on a secondary node."""
595
    return self.dev_type in (constants.LD_LV,)
596

    
597
  def StaticDevPath(self):
598
    """Return the device path if this device type has a static one.
599

600
    Some devices (LVM for example) live always at the same /dev/ path,
601
    irrespective of their status. For such devices, we return this
602
    path, for others we return None.
603

604
    @warning: The path returned is not a normalized pathname; callers
605
        should check that it is a valid path.
606

607
    """
608
    if self.dev_type == constants.LD_LV:
609
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
610
    elif self.dev_type == constants.LD_BLOCKDEV:
611
      return self.logical_id[1]
612
    elif self.dev_type == constants.LD_RBD:
613
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
614
    return None
615

    
616
  def ChildrenNeeded(self):
617
    """Compute the needed number of children for activation.
618

619
    This method will return either -1 (all children) or a positive
620
    number denoting the minimum number of children needed for
621
    activation (only mirrored devices will usually return >=0).
622

623
    Currently, only DRBD8 supports diskless activation (therefore we
624
    return 0), for all other we keep the previous semantics and return
625
    -1.
626

627
    """
628
    if self.dev_type == constants.LD_DRBD8:
629
      return 0
630
    return -1
631

    
632
  def IsBasedOnDiskType(self, dev_type):
633
    """Check if the disk or its children are based on the given type.
634

635
    @type dev_type: L{constants.LDS_BLOCK}
636
    @param dev_type: the type to look for
637
    @rtype: boolean
638
    @return: boolean indicating if a device of the given type was found or not
639

640
    """
641
    if self.children:
642
      for child in self.children:
643
        if child.IsBasedOnDiskType(dev_type):
644
          return True
645
    return self.dev_type == dev_type
646

    
647
  def GetNodes(self, node):
648
    """This function returns the nodes this device lives on.
649

650
    Given the node on which the parent of the device lives on (or, in
651
    case of a top-level device, the primary node of the devices'
652
    instance), this function will return a list of nodes on which this
653
    devices needs to (or can) be assembled.
654

655
    """
656
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
657
                         constants.LD_BLOCKDEV, constants.LD_RBD]:
658
      result = [node]
659
    elif self.dev_type in constants.LDS_DRBD:
660
      result = [self.logical_id[0], self.logical_id[1]]
661
      if node not in result:
662
        raise errors.ConfigurationError("DRBD device passed unknown node")
663
    else:
664
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
665
    return result
666

    
667
  def ComputeNodeTree(self, parent_node):
668
    """Compute the node/disk tree for this disk and its children.
669

670
    This method, given the node on which the parent disk lives, will
671
    return the list of all (node, disk) pairs which describe the disk
672
    tree in the most compact way. For example, a drbd/lvm stack
673
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
674
    which represents all the top-level devices on the nodes.
675

676
    """
677
    my_nodes = self.GetNodes(parent_node)
678
    result = [(node, self) for node in my_nodes]
679
    if not self.children:
680
      # leaf device
681
      return result
682
    for node in my_nodes:
683
      for child in self.children:
684
        child_result = child.ComputeNodeTree(node)
685
        if len(child_result) == 1:
686
          # child (and all its descendants) is simple, doesn't split
687
          # over multiple hosts, so we don't need to describe it, our
688
          # own entry for this node describes it completely
689
          continue
690
        else:
691
          # check if child nodes differ from my nodes; note that
692
          # subdisk can differ from the child itself, and be instead
693
          # one of its descendants
694
          for subnode, subdisk in child_result:
695
            if subnode not in my_nodes:
696
              result.append((subnode, subdisk))
697
            # otherwise child is under our own node, so we ignore this
698
            # entry (but probably the other results in the list will
699
            # be different)
700
    return result
701

    
702
  def ComputeGrowth(self, amount):
703
    """Compute the per-VG growth requirements.
704

705
    This only works for VG-based disks.
706

707
    @type amount: integer
708
    @param amount: the desired increase in (user-visible) disk space
709
    @rtype: dict
710
    @return: a dictionary of volume-groups and the required size
711

712
    """
713
    if self.dev_type == constants.LD_LV:
714
      return {self.logical_id[0]: amount}
715
    elif self.dev_type == constants.LD_DRBD8:
716
      if self.children:
717
        return self.children[0].ComputeGrowth(amount)
718
      else:
719
        return {}
720
    else:
721
      # Other disk types do not require VG space
722
      return {}
723

    
724
  def RecordGrow(self, amount):
725
    """Update the size of this disk after growth.
726

727
    This method recurses over the disks's children and updates their
728
    size correspondigly. The method needs to be kept in sync with the
729
    actual algorithms from bdev.
730

731
    """
732
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
733
                         constants.LD_RBD):
734
      self.size += amount
735
    elif self.dev_type == constants.LD_DRBD8:
736
      if self.children:
737
        self.children[0].RecordGrow(amount)
738
      self.size += amount
739
    else:
740
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
741
                                   " disk type %s" % self.dev_type)
742

    
743
  def Update(self, size=None, mode=None):
744
    """Apply changes to size and mode.
745

746
    """
747
    if self.dev_type == constants.LD_DRBD8:
748
      if self.children:
749
        self.children[0].Update(size=size, mode=mode)
750
    else:
751
      assert not self.children
752

    
753
    if size is not None:
754
      self.size = size
755
    if mode is not None:
756
      self.mode = mode
757

    
758
  def UnsetSize(self):
759
    """Sets recursively the size to zero for the disk and its children.
760

761
    """
762
    if self.children:
763
      for child in self.children:
764
        child.UnsetSize()
765
    self.size = 0
766

    
767
  def SetPhysicalID(self, target_node, nodes_ip):
768
    """Convert the logical ID to the physical ID.
769

770
    This is used only for drbd, which needs ip/port configuration.
771

772
    The routine descends down and updates its children also, because
773
    this helps when the only the top device is passed to the remote
774
    node.
775

776
    Arguments:
777
      - target_node: the node we wish to configure for
778
      - nodes_ip: a mapping of node name to ip
779

780
    The target_node must exist in in nodes_ip, and must be one of the
781
    nodes in the logical ID for each of the DRBD devices encountered
782
    in the disk tree.
783

784
    """
785
    if self.children:
786
      for child in self.children:
787
        child.SetPhysicalID(target_node, nodes_ip)
788

    
789
    if self.logical_id is None and self.physical_id is not None:
790
      return
791
    if self.dev_type in constants.LDS_DRBD:
792
      pnode, snode, port, pminor, sminor, secret = self.logical_id
793
      if target_node not in (pnode, snode):
794
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
795
                                        target_node)
796
      pnode_ip = nodes_ip.get(pnode, None)
797
      snode_ip = nodes_ip.get(snode, None)
798
      if pnode_ip is None or snode_ip is None:
799
        raise errors.ConfigurationError("Can't find primary or secondary node"
800
                                        " for %s" % str(self))
801
      p_data = (pnode_ip, port)
802
      s_data = (snode_ip, port)
803
      if pnode == target_node:
804
        self.physical_id = p_data + s_data + (pminor, secret)
805
      else: # it must be secondary, we tested above
806
        self.physical_id = s_data + p_data + (sminor, secret)
807
    else:
808
      self.physical_id = self.logical_id
809
    return
810

    
811
  def ToDict(self):
812
    """Disk-specific conversion to standard python types.
813

814
    This replaces the children lists of objects with lists of
815
    standard python types.
816

817
    """
818
    bo = super(Disk, self).ToDict()
819

    
820
    for attr in ("children",):
821
      alist = bo.get(attr, None)
822
      if alist:
823
        bo[attr] = self._ContainerToDicts(alist)
824
    return bo
825

    
826
  @classmethod
827
  def FromDict(cls, val):
828
    """Custom function for Disks
829

830
    """
831
    obj = super(Disk, cls).FromDict(val)
832
    if obj.children:
833
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
834
    if obj.logical_id and isinstance(obj.logical_id, list):
835
      obj.logical_id = tuple(obj.logical_id)
836
    if obj.physical_id and isinstance(obj.physical_id, list):
837
      obj.physical_id = tuple(obj.physical_id)
838
    if obj.dev_type in constants.LDS_DRBD:
839
      # we need a tuple of length six here
840
      if len(obj.logical_id) < 6:
841
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
842
    return obj
843

    
844
  def __str__(self):
845
    """Custom str() formatter for disks.
846

847
    """
848
    if self.dev_type == constants.LD_LV:
849
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
850
    elif self.dev_type in constants.LDS_DRBD:
851
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
852
      val = "<DRBD8("
853
      if self.physical_id is None:
854
        phy = "unconfigured"
855
      else:
856
        phy = ("configured as %s:%s %s:%s" %
857
               (self.physical_id[0], self.physical_id[1],
858
                self.physical_id[2], self.physical_id[3]))
859

    
860
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
861
              (node_a, minor_a, node_b, minor_b, port, phy))
862
      if self.children and self.children.count(None) == 0:
863
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
864
      else:
865
        val += "no local storage"
866
    else:
867
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
868
             (self.dev_type, self.logical_id, self.physical_id, self.children))
869
    if self.iv_name is None:
870
      val += ", not visible"
871
    else:
872
      val += ", visible as /dev/%s" % self.iv_name
873
    if isinstance(self.size, int):
874
      val += ", size=%dm)>" % self.size
875
    else:
876
      val += ", size='%s')>" % (self.size,)
877
    return val
878

    
879
  def Verify(self):
880
    """Checks that this disk is correctly configured.
881

882
    """
883
    all_errors = []
884
    if self.mode not in constants.DISK_ACCESS_SET:
885
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
886
    return all_errors
887

    
888
  def UpgradeConfig(self):
889
    """Fill defaults for missing configuration values.
890

891
    """
892
    if self.children:
893
      for child in self.children:
894
        child.UpgradeConfig()
895

    
896
    if not self.params:
897
      self.params = constants.DISK_LD_DEFAULTS[self.dev_type].copy()
898
    else:
899
      self.params = FillDict(constants.DISK_LD_DEFAULTS[self.dev_type],
900
                             self.params)
901
    # add here config upgrade for this disk
902

    
903

    
904
class InstancePolicy(ConfigObject):
905
  """Config object representing instance policy limits dictionary.
906

907

908
  Note that this object is not actually used in the config, it's just
909
  used as a placeholder for a few functions.
910

911
  """
912
  @classmethod
913
  def CheckParameterSyntax(cls, ipolicy):
914
    """ Check the instance policy for validity.
915

916
    """
917
    for param in constants.ISPECS_PARAMETERS:
918
      InstancePolicy.CheckISpecSyntax(ipolicy, param)
919
    if constants.IPOLICY_DTS in ipolicy:
920
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
921
    for key in constants.IPOLICY_PARAMETERS:
922
      if key in ipolicy:
923
        InstancePolicy.CheckParameter(key, ipolicy[key])
924
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
925
    if wrong_keys:
926
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
927
                                      utils.CommaJoin(wrong_keys))
928

    
929
  @classmethod
930
  def CheckISpecSyntax(cls, ipolicy, name):
931
    """Check the instance policy for validity on a given key.
932

933
    We check if the instance policy makes sense for a given key, that is
934
    if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
935

936
    @type ipolicy: dict
937
    @param ipolicy: dictionary with min, max, std specs
938
    @type name: string
939
    @param name: what are the limits for
940
    @raise errors.ConfigureError: when specs for given name are not valid
941

942
    """
943
    min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
944
    std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
945
    max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
946
    err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
947
           (name,
948
            ipolicy[constants.ISPECS_MIN].get(name, "-"),
949
            ipolicy[constants.ISPECS_MAX].get(name, "-"),
950
            ipolicy[constants.ISPECS_STD].get(name, "-")))
951
    if min_v > std_v or std_v > max_v:
952
      raise errors.ConfigurationError(err)
953

    
954
  @classmethod
955
  def CheckDiskTemplates(cls, disk_templates):
956
    """Checks the disk templates for validity.
957

958
    """
959
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
960
    if wrong:
961
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
962
                                      utils.CommaJoin(wrong))
963

    
964
  @classmethod
965
  def CheckParameter(cls, key, value):
966
    """Checks a parameter.
967

968
    Currently we expect all parameters to be float values.
969

970
    """
971
    try:
972
      float(value)
973
    except (TypeError, ValueError), err:
974
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
975
                                      " '%s', error: %s" % (key, value, err))
976

    
977

    
978
class Instance(TaggableObject):
979
  """Config object representing an instance."""
980
  __slots__ = [
981
    "name",
982
    "primary_node",
983
    "os",
984
    "hypervisor",
985
    "hvparams",
986
    "beparams",
987
    "osparams",
988
    "admin_state",
989
    "nics",
990
    "disks",
991
    "disk_template",
992
    "network_port",
993
    "serial_no",
994
    ] + _TIMESTAMPS + _UUID
995

    
996
  def _ComputeSecondaryNodes(self):
997
    """Compute the list of secondary nodes.
998

999
    This is a simple wrapper over _ComputeAllNodes.
1000

1001
    """
1002
    all_nodes = set(self._ComputeAllNodes())
1003
    all_nodes.discard(self.primary_node)
1004
    return tuple(all_nodes)
1005

    
1006
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1007
                             "List of secondary nodes")
1008

    
1009
  def _ComputeAllNodes(self):
1010
    """Compute the list of all nodes.
1011

1012
    Since the data is already there (in the drbd disks), keeping it as
1013
    a separate normal attribute is redundant and if not properly
1014
    synchronised can cause problems. Thus it's better to compute it
1015
    dynamically.
1016

1017
    """
1018
    def _Helper(nodes, device):
1019
      """Recursively computes nodes given a top device."""
1020
      if device.dev_type in constants.LDS_DRBD:
1021
        nodea, nodeb = device.logical_id[:2]
1022
        nodes.add(nodea)
1023
        nodes.add(nodeb)
1024
      if device.children:
1025
        for child in device.children:
1026
          _Helper(nodes, child)
1027

    
1028
    all_nodes = set()
1029
    all_nodes.add(self.primary_node)
1030
    for device in self.disks:
1031
      _Helper(all_nodes, device)
1032
    return tuple(all_nodes)
1033

    
1034
  all_nodes = property(_ComputeAllNodes, None, None,
1035
                       "List of all nodes of the instance")
1036

    
1037
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1038
    """Provide a mapping of nodes to LVs this instance owns.
1039

1040
    This function figures out what logical volumes should belong on
1041
    which nodes, recursing through a device tree.
1042

1043
    @param lvmap: optional dictionary to receive the
1044
        'node' : ['lv', ...] data.
1045

1046
    @return: None if lvmap arg is given, otherwise, a dictionary of
1047
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1048
        volumeN is of the form "vg_name/lv_name", compatible with
1049
        GetVolumeList()
1050

1051
    """
1052
    if node == None:
1053
      node = self.primary_node
1054

    
1055
    if lvmap is None:
1056
      lvmap = {
1057
        node: [],
1058
        }
1059
      ret = lvmap
1060
    else:
1061
      if not node in lvmap:
1062
        lvmap[node] = []
1063
      ret = None
1064

    
1065
    if not devs:
1066
      devs = self.disks
1067

    
1068
    for dev in devs:
1069
      if dev.dev_type == constants.LD_LV:
1070
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1071

    
1072
      elif dev.dev_type in constants.LDS_DRBD:
1073
        if dev.children:
1074
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1075
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1076

    
1077
      elif dev.children:
1078
        self.MapLVsByNode(lvmap, dev.children, node)
1079

    
1080
    return ret
1081

    
1082
  def FindDisk(self, idx):
1083
    """Find a disk given having a specified index.
1084

1085
    This is just a wrapper that does validation of the index.
1086

1087
    @type idx: int
1088
    @param idx: the disk index
1089
    @rtype: L{Disk}
1090
    @return: the corresponding disk
1091
    @raise errors.OpPrereqError: when the given index is not valid
1092

1093
    """
1094
    try:
1095
      idx = int(idx)
1096
      return self.disks[idx]
1097
    except (TypeError, ValueError), err:
1098
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1099
                                 errors.ECODE_INVAL)
1100
    except IndexError:
1101
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1102
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1103
                                 errors.ECODE_INVAL)
1104

    
1105
  def ToDict(self):
1106
    """Instance-specific conversion to standard python types.
1107

1108
    This replaces the children lists of objects with lists of standard
1109
    python types.
1110

1111
    """
1112
    bo = super(Instance, self).ToDict()
1113

    
1114
    for attr in "nics", "disks":
1115
      alist = bo.get(attr, None)
1116
      if alist:
1117
        nlist = self._ContainerToDicts(alist)
1118
      else:
1119
        nlist = []
1120
      bo[attr] = nlist
1121
    return bo
1122

    
1123
  @classmethod
1124
  def FromDict(cls, val):
1125
    """Custom function for instances.
1126

1127
    """
1128
    if "admin_state" not in val:
1129
      if val.get("admin_up", False):
1130
        val["admin_state"] = constants.ADMINST_UP
1131
      else:
1132
        val["admin_state"] = constants.ADMINST_DOWN
1133
    if "admin_up" in val:
1134
      del val["admin_up"]
1135
    obj = super(Instance, cls).FromDict(val)
1136
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
1137
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
1138
    return obj
1139

    
1140
  def UpgradeConfig(self):
1141
    """Fill defaults for missing configuration values.
1142

1143
    """
1144
    for nic in self.nics:
1145
      nic.UpgradeConfig()
1146
    for disk in self.disks:
1147
      disk.UpgradeConfig()
1148
    if self.hvparams:
1149
      for key in constants.HVC_GLOBALS:
1150
        try:
1151
          del self.hvparams[key]
1152
        except KeyError:
1153
          pass
1154
    if self.osparams is None:
1155
      self.osparams = {}
1156
    UpgradeBeParams(self.beparams)
1157

    
1158

    
1159
class OS(ConfigObject):
1160
  """Config object representing an operating system.
1161

1162
  @type supported_parameters: list
1163
  @ivar supported_parameters: a list of tuples, name and description,
1164
      containing the supported parameters by this OS
1165

1166
  @type VARIANT_DELIM: string
1167
  @cvar VARIANT_DELIM: the variant delimiter
1168

1169
  """
1170
  __slots__ = [
1171
    "name",
1172
    "path",
1173
    "api_versions",
1174
    "create_script",
1175
    "export_script",
1176
    "import_script",
1177
    "rename_script",
1178
    "verify_script",
1179
    "supported_variants",
1180
    "supported_parameters",
1181
    ]
1182

    
1183
  VARIANT_DELIM = "+"
1184

    
1185
  @classmethod
1186
  def SplitNameVariant(cls, name):
1187
    """Splits the name into the proper name and variant.
1188

1189
    @param name: the OS (unprocessed) name
1190
    @rtype: list
1191
    @return: a list of two elements; if the original name didn't
1192
        contain a variant, it's returned as an empty string
1193

1194
    """
1195
    nv = name.split(cls.VARIANT_DELIM, 1)
1196
    if len(nv) == 1:
1197
      nv.append("")
1198
    return nv
1199

    
1200
  @classmethod
1201
  def GetName(cls, name):
1202
    """Returns the proper name of the os (without the variant).
1203

1204
    @param name: the OS (unprocessed) name
1205

1206
    """
1207
    return cls.SplitNameVariant(name)[0]
1208

    
1209
  @classmethod
1210
  def GetVariant(cls, name):
1211
    """Returns the variant the os (without the base name).
1212

1213
    @param name: the OS (unprocessed) name
1214

1215
    """
1216
    return cls.SplitNameVariant(name)[1]
1217

    
1218

    
1219
class NodeHvState(ConfigObject):
1220
  """Hypvervisor state on a node.
1221

1222
  @ivar mem_total: Total amount of memory
1223
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1224
    available)
1225
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1226
    rounding
1227
  @ivar mem_inst: Memory used by instances living on node
1228
  @ivar cpu_total: Total node CPU core count
1229
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1230

1231
  """
1232
  __slots__ = [
1233
    "mem_total",
1234
    "mem_node",
1235
    "mem_hv",
1236
    "mem_inst",
1237
    "cpu_total",
1238
    "cpu_node",
1239
    ] + _TIMESTAMPS
1240

    
1241

    
1242
class NodeDiskState(ConfigObject):
1243
  """Disk state on a node.
1244

1245
  """
1246
  __slots__ = [
1247
    "total",
1248
    "reserved",
1249
    "overhead",
1250
    ] + _TIMESTAMPS
1251

    
1252

    
1253
class Node(TaggableObject):
1254
  """Config object representing a node.
1255

1256
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1257
  @ivar hv_state_static: Hypervisor state overriden by user
1258
  @ivar disk_state: Disk state (e.g. free space)
1259
  @ivar disk_state_static: Disk state overriden by user
1260

1261
  """
1262
  __slots__ = [
1263
    "name",
1264
    "primary_ip",
1265
    "secondary_ip",
1266
    "serial_no",
1267
    "master_candidate",
1268
    "offline",
1269
    "drained",
1270
    "group",
1271
    "master_capable",
1272
    "vm_capable",
1273
    "ndparams",
1274
    "powered",
1275
    "hv_state",
1276
    "hv_state_static",
1277
    "disk_state",
1278
    "disk_state_static",
1279
    ] + _TIMESTAMPS + _UUID
1280

    
1281
  def UpgradeConfig(self):
1282
    """Fill defaults for missing configuration values.
1283

1284
    """
1285
    # pylint: disable=E0203
1286
    # because these are "defined" via slots, not manually
1287
    if self.master_capable is None:
1288
      self.master_capable = True
1289

    
1290
    if self.vm_capable is None:
1291
      self.vm_capable = True
1292

    
1293
    if self.ndparams is None:
1294
      self.ndparams = {}
1295

    
1296
    if self.powered is None:
1297
      self.powered = True
1298

    
1299
  def ToDict(self):
1300
    """Custom function for serializing.
1301

1302
    """
1303
    data = super(Node, self).ToDict()
1304

    
1305
    hv_state = data.get("hv_state", None)
1306
    if hv_state is not None:
1307
      data["hv_state"] = self._ContainerToDicts(hv_state)
1308

    
1309
    disk_state = data.get("disk_state", None)
1310
    if disk_state is not None:
1311
      data["disk_state"] = \
1312
        dict((key, self._ContainerToDicts(value))
1313
             for (key, value) in disk_state.items())
1314

    
1315
    return data
1316

    
1317
  @classmethod
1318
  def FromDict(cls, val):
1319
    """Custom function for deserializing.
1320

1321
    """
1322
    obj = super(Node, cls).FromDict(val)
1323

    
1324
    if obj.hv_state is not None:
1325
      obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1326

    
1327
    if obj.disk_state is not None:
1328
      obj.disk_state = \
1329
        dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
1330
             for (key, value) in obj.disk_state.items())
1331

    
1332
    return obj
1333

    
1334

    
1335
class NodeGroup(TaggableObject):
1336
  """Config object representing a node group."""
1337
  __slots__ = [
1338
    "name",
1339
    "members",
1340
    "ndparams",
1341
    "diskparams",
1342
    "ipolicy",
1343
    "serial_no",
1344
    "hv_state_static",
1345
    "disk_state_static",
1346
    "alloc_policy",
1347
    ] + _TIMESTAMPS + _UUID
1348

    
1349
  def ToDict(self):
1350
    """Custom function for nodegroup.
1351

1352
    This discards the members object, which gets recalculated and is only kept
1353
    in memory.
1354

1355
    """
1356
    mydict = super(NodeGroup, self).ToDict()
1357
    del mydict["members"]
1358
    return mydict
1359

    
1360
  @classmethod
1361
  def FromDict(cls, val):
1362
    """Custom function for nodegroup.
1363

1364
    The members slot is initialized to an empty list, upon deserialization.
1365

1366
    """
1367
    obj = super(NodeGroup, cls).FromDict(val)
1368
    obj.members = []
1369
    return obj
1370

    
1371
  def UpgradeConfig(self):
1372
    """Fill defaults for missing configuration values.
1373

1374
    """
1375
    if self.ndparams is None:
1376
      self.ndparams = {}
1377

    
1378
    if self.serial_no is None:
1379
      self.serial_no = 1
1380

    
1381
    if self.alloc_policy is None:
1382
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1383

    
1384
    # We only update mtime, and not ctime, since we would not be able
1385
    # to provide a correct value for creation time.
1386
    if self.mtime is None:
1387
      self.mtime = time.time()
1388

    
1389
    self.diskparams = UpgradeDiskParams(self.diskparams)
1390
    if self.ipolicy is None:
1391
      self.ipolicy = MakeEmptyIPolicy()
1392

    
1393
  def FillND(self, node):
1394
    """Return filled out ndparams for L{objects.Node}
1395

1396
    @type node: L{objects.Node}
1397
    @param node: A Node object to fill
1398
    @return a copy of the node's ndparams with defaults filled
1399

1400
    """
1401
    return self.SimpleFillND(node.ndparams)
1402

    
1403
  def SimpleFillND(self, ndparams):
1404
    """Fill a given ndparams dict with defaults.
1405

1406
    @type ndparams: dict
1407
    @param ndparams: the dict to fill
1408
    @rtype: dict
1409
    @return: a copy of the passed in ndparams with missing keys filled
1410
        from the node group defaults
1411

1412
    """
1413
    return FillDict(self.ndparams, ndparams)
1414

    
1415

    
1416
class Cluster(TaggableObject):
1417
  """Config object representing the cluster."""
1418
  __slots__ = [
1419
    "serial_no",
1420
    "rsahostkeypub",
1421
    "highest_used_port",
1422
    "tcpudp_port_pool",
1423
    "mac_prefix",
1424
    "volume_group_name",
1425
    "reserved_lvs",
1426
    "drbd_usermode_helper",
1427
    "default_bridge",
1428
    "default_hypervisor",
1429
    "master_node",
1430
    "master_ip",
1431
    "master_netdev",
1432
    "master_netmask",
1433
    "use_external_mip_script",
1434
    "cluster_name",
1435
    "file_storage_dir",
1436
    "shared_file_storage_dir",
1437
    "enabled_hypervisors",
1438
    "hvparams",
1439
    "ipolicy",
1440
    "os_hvp",
1441
    "beparams",
1442
    "osparams",
1443
    "nicparams",
1444
    "ndparams",
1445
    "diskparams",
1446
    "candidate_pool_size",
1447
    "modify_etc_hosts",
1448
    "modify_ssh_setup",
1449
    "maintain_node_health",
1450
    "uid_pool",
1451
    "default_iallocator",
1452
    "hidden_os",
1453
    "blacklisted_os",
1454
    "primary_ip_family",
1455
    "prealloc_wipe_disks",
1456
    "hv_state_static",
1457
    "disk_state_static",
1458
    ] + _TIMESTAMPS + _UUID
1459

    
1460
  def UpgradeConfig(self):
1461
    """Fill defaults for missing configuration values.
1462

1463
    """
1464
    # pylint: disable=E0203
1465
    # because these are "defined" via slots, not manually
1466
    if self.hvparams is None:
1467
      self.hvparams = constants.HVC_DEFAULTS
1468
    else:
1469
      for hypervisor in self.hvparams:
1470
        self.hvparams[hypervisor] = FillDict(
1471
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1472

    
1473
    if self.os_hvp is None:
1474
      self.os_hvp = {}
1475

    
1476
    # osparams added before 2.2
1477
    if self.osparams is None:
1478
      self.osparams = {}
1479

    
1480
    if self.ndparams is None:
1481
      self.ndparams = constants.NDC_DEFAULTS
1482

    
1483
    self.beparams = UpgradeGroupedParams(self.beparams,
1484
                                         constants.BEC_DEFAULTS)
1485
    for beparams_group in self.beparams:
1486
      UpgradeBeParams(self.beparams[beparams_group])
1487

    
1488
    migrate_default_bridge = not self.nicparams
1489
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1490
                                          constants.NICC_DEFAULTS)
1491
    if migrate_default_bridge:
1492
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1493
        self.default_bridge
1494

    
1495
    if self.modify_etc_hosts is None:
1496
      self.modify_etc_hosts = True
1497

    
1498
    if self.modify_ssh_setup is None:
1499
      self.modify_ssh_setup = True
1500

    
1501
    # default_bridge is no longer used in 2.1. The slot is left there to
1502
    # support auto-upgrading. It can be removed once we decide to deprecate
1503
    # upgrading straight from 2.0.
1504
    if self.default_bridge is not None:
1505
      self.default_bridge = None
1506

    
1507
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1508
    # code can be removed once upgrading straight from 2.0 is deprecated.
1509
    if self.default_hypervisor is not None:
1510
      self.enabled_hypervisors = ([self.default_hypervisor] +
1511
        [hvname for hvname in self.enabled_hypervisors
1512
         if hvname != self.default_hypervisor])
1513
      self.default_hypervisor = None
1514

    
1515
    # maintain_node_health added after 2.1.1
1516
    if self.maintain_node_health is None:
1517
      self.maintain_node_health = False
1518

    
1519
    if self.uid_pool is None:
1520
      self.uid_pool = []
1521

    
1522
    if self.default_iallocator is None:
1523
      self.default_iallocator = ""
1524

    
1525
    # reserved_lvs added before 2.2
1526
    if self.reserved_lvs is None:
1527
      self.reserved_lvs = []
1528

    
1529
    # hidden and blacklisted operating systems added before 2.2.1
1530
    if self.hidden_os is None:
1531
      self.hidden_os = []
1532

    
1533
    if self.blacklisted_os is None:
1534
      self.blacklisted_os = []
1535

    
1536
    # primary_ip_family added before 2.3
1537
    if self.primary_ip_family is None:
1538
      self.primary_ip_family = AF_INET
1539

    
1540
    if self.master_netmask is None:
1541
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1542
      self.master_netmask = ipcls.iplen
1543

    
1544
    if self.prealloc_wipe_disks is None:
1545
      self.prealloc_wipe_disks = False
1546

    
1547
    # shared_file_storage_dir added before 2.5
1548
    if self.shared_file_storage_dir is None:
1549
      self.shared_file_storage_dir = ""
1550

    
1551
    if self.use_external_mip_script is None:
1552
      self.use_external_mip_script = False
1553

    
1554
    self.diskparams = UpgradeDiskParams(self.diskparams)
1555

    
1556
    # instance policy added before 2.6
1557
    if self.ipolicy is None:
1558
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1559
    else:
1560
      # we can either make sure to upgrade the ipolicy always, or only
1561
      # do it in some corner cases (e.g. missing keys); note that this
1562
      # will break any removal of keys from the ipolicy dict
1563
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1564

    
1565
  @property
1566
  def primary_hypervisor(self):
1567
    """The first hypervisor is the primary.
1568

1569
    Useful, for example, for L{Node}'s hv/disk state.
1570

1571
    """
1572
    return self.enabled_hypervisors[0]
1573

    
1574
  def ToDict(self):
1575
    """Custom function for cluster.
1576

1577
    """
1578
    mydict = super(Cluster, self).ToDict()
1579
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1580
    return mydict
1581

    
1582
  @classmethod
1583
  def FromDict(cls, val):
1584
    """Custom function for cluster.
1585

1586
    """
1587
    obj = super(Cluster, cls).FromDict(val)
1588
    if not isinstance(obj.tcpudp_port_pool, set):
1589
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1590
    return obj
1591

    
1592
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1593
    """Get the default hypervisor parameters for the cluster.
1594

1595
    @param hypervisor: the hypervisor name
1596
    @param os_name: if specified, we'll also update the defaults for this OS
1597
    @param skip_keys: if passed, list of keys not to use
1598
    @return: the defaults dict
1599

1600
    """
1601
    if skip_keys is None:
1602
      skip_keys = []
1603

    
1604
    fill_stack = [self.hvparams.get(hypervisor, {})]
1605
    if os_name is not None:
1606
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1607
      fill_stack.append(os_hvp)
1608

    
1609
    ret_dict = {}
1610
    for o_dict in fill_stack:
1611
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1612

    
1613
    return ret_dict
1614

    
1615
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1616
    """Fill a given hvparams dict with cluster defaults.
1617

1618
    @type hv_name: string
1619
    @param hv_name: the hypervisor to use
1620
    @type os_name: string
1621
    @param os_name: the OS to use for overriding the hypervisor defaults
1622
    @type skip_globals: boolean
1623
    @param skip_globals: if True, the global hypervisor parameters will
1624
        not be filled
1625
    @rtype: dict
1626
    @return: a copy of the given hvparams with missing keys filled from
1627
        the cluster defaults
1628

1629
    """
1630
    if skip_globals:
1631
      skip_keys = constants.HVC_GLOBALS
1632
    else:
1633
      skip_keys = []
1634

    
1635
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1636
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1637

    
1638
  def FillHV(self, instance, skip_globals=False):
1639
    """Fill an instance's hvparams dict with cluster defaults.
1640

1641
    @type instance: L{objects.Instance}
1642
    @param instance: the instance parameter to fill
1643
    @type skip_globals: boolean
1644
    @param skip_globals: if True, the global hypervisor parameters will
1645
        not be filled
1646
    @rtype: dict
1647
    @return: a copy of the instance's hvparams with missing keys filled from
1648
        the cluster defaults
1649

1650
    """
1651
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1652
                             instance.hvparams, skip_globals)
1653

    
1654
  def SimpleFillBE(self, beparams):
1655
    """Fill a given beparams dict with cluster defaults.
1656

1657
    @type beparams: dict
1658
    @param beparams: the dict to fill
1659
    @rtype: dict
1660
    @return: a copy of the passed in beparams with missing keys filled
1661
        from the cluster defaults
1662

1663
    """
1664
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1665

    
1666
  def FillBE(self, instance):
1667
    """Fill an instance's beparams dict with cluster defaults.
1668

1669
    @type instance: L{objects.Instance}
1670
    @param instance: the instance parameter to fill
1671
    @rtype: dict
1672
    @return: a copy of the instance's beparams with missing keys filled from
1673
        the cluster defaults
1674

1675
    """
1676
    return self.SimpleFillBE(instance.beparams)
1677

    
1678
  def SimpleFillNIC(self, nicparams):
1679
    """Fill a given nicparams dict with cluster defaults.
1680

1681
    @type nicparams: dict
1682
    @param nicparams: the dict to fill
1683
    @rtype: dict
1684
    @return: a copy of the passed in nicparams with missing keys filled
1685
        from the cluster defaults
1686

1687
    """
1688
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1689

    
1690
  def SimpleFillOS(self, os_name, os_params):
1691
    """Fill an instance's osparams dict with cluster defaults.
1692

1693
    @type os_name: string
1694
    @param os_name: the OS name to use
1695
    @type os_params: dict
1696
    @param os_params: the dict to fill with default values
1697
    @rtype: dict
1698
    @return: a copy of the instance's osparams with missing keys filled from
1699
        the cluster defaults
1700

1701
    """
1702
    name_only = os_name.split("+", 1)[0]
1703
    # base OS
1704
    result = self.osparams.get(name_only, {})
1705
    # OS with variant
1706
    result = FillDict(result, self.osparams.get(os_name, {}))
1707
    # specified params
1708
    return FillDict(result, os_params)
1709

    
1710
  @staticmethod
1711
  def SimpleFillHvState(hv_state):
1712
    """Fill an hv_state sub dict with cluster defaults.
1713

1714
    """
1715
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1716

    
1717
  @staticmethod
1718
  def SimpleFillDiskState(disk_state):
1719
    """Fill an disk_state sub dict with cluster defaults.
1720

1721
    """
1722
    return FillDict(constants.DS_DEFAULTS, disk_state)
1723

    
1724
  def FillND(self, node, nodegroup):
1725
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1726

1727
    @type node: L{objects.Node}
1728
    @param node: A Node object to fill
1729
    @type nodegroup: L{objects.NodeGroup}
1730
    @param nodegroup: A Node object to fill
1731
    @return a copy of the node's ndparams with defaults filled
1732

1733
    """
1734
    return self.SimpleFillND(nodegroup.FillND(node))
1735

    
1736
  def SimpleFillND(self, ndparams):
1737
    """Fill a given ndparams dict with defaults.
1738

1739
    @type ndparams: dict
1740
    @param ndparams: the dict to fill
1741
    @rtype: dict
1742
    @return: a copy of the passed in ndparams with missing keys filled
1743
        from the cluster defaults
1744

1745
    """
1746
    return FillDict(self.ndparams, ndparams)
1747

    
1748
  def SimpleFillIPolicy(self, ipolicy):
1749
    """ Fill instance policy dict with defaults.
1750

1751
    @type ipolicy: dict
1752
    @param ipolicy: the dict to fill
1753
    @rtype: dict
1754
    @return: a copy of passed ipolicy with missing keys filled from
1755
      the cluster defaults
1756

1757
    """
1758
    return FillIPolicy(self.ipolicy, ipolicy)
1759

    
1760

    
1761
class BlockDevStatus(ConfigObject):
1762
  """Config object representing the status of a block device."""
1763
  __slots__ = [
1764
    "dev_path",
1765
    "major",
1766
    "minor",
1767
    "sync_percent",
1768
    "estimated_time",
1769
    "is_degraded",
1770
    "ldisk_status",
1771
    ]
1772

    
1773

    
1774
class ImportExportStatus(ConfigObject):
1775
  """Config object representing the status of an import or export."""
1776
  __slots__ = [
1777
    "recent_output",
1778
    "listen_port",
1779
    "connected",
1780
    "progress_mbytes",
1781
    "progress_throughput",
1782
    "progress_eta",
1783
    "progress_percent",
1784
    "exit_status",
1785
    "error_message",
1786
    ] + _TIMESTAMPS
1787

    
1788

    
1789
class ImportExportOptions(ConfigObject):
1790
  """Options for import/export daemon
1791

1792
  @ivar key_name: X509 key name (None for cluster certificate)
1793
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1794
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1795
  @ivar magic: Used to ensure the connection goes to the right disk
1796
  @ivar ipv6: Whether to use IPv6
1797
  @ivar connect_timeout: Number of seconds for establishing connection
1798

1799
  """
1800
  __slots__ = [
1801
    "key_name",
1802
    "ca_pem",
1803
    "compress",
1804
    "magic",
1805
    "ipv6",
1806
    "connect_timeout",
1807
    ]
1808

    
1809

    
1810
class ConfdRequest(ConfigObject):
1811
  """Object holding a confd request.
1812

1813
  @ivar protocol: confd protocol version
1814
  @ivar type: confd query type
1815
  @ivar query: query request
1816
  @ivar rsalt: requested reply salt
1817

1818
  """
1819
  __slots__ = [
1820
    "protocol",
1821
    "type",
1822
    "query",
1823
    "rsalt",
1824
    ]
1825

    
1826

    
1827
class ConfdReply(ConfigObject):
1828
  """Object holding a confd reply.
1829

1830
  @ivar protocol: confd protocol version
1831
  @ivar status: reply status code (ok, error)
1832
  @ivar answer: confd query reply
1833
  @ivar serial: configuration serial number
1834

1835
  """
1836
  __slots__ = [
1837
    "protocol",
1838
    "status",
1839
    "answer",
1840
    "serial",
1841
    ]
1842

    
1843

    
1844
class QueryFieldDefinition(ConfigObject):
1845
  """Object holding a query field definition.
1846

1847
  @ivar name: Field name
1848
  @ivar title: Human-readable title
1849
  @ivar kind: Field type
1850
  @ivar doc: Human-readable description
1851

1852
  """
1853
  __slots__ = [
1854
    "name",
1855
    "title",
1856
    "kind",
1857
    "doc",
1858
    ]
1859

    
1860

    
1861
class _QueryResponseBase(ConfigObject):
1862
  __slots__ = [
1863
    "fields",
1864
    ]
1865

    
1866
  def ToDict(self):
1867
    """Custom function for serializing.
1868

1869
    """
1870
    mydict = super(_QueryResponseBase, self).ToDict()
1871
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1872
    return mydict
1873

    
1874
  @classmethod
1875
  def FromDict(cls, val):
1876
    """Custom function for de-serializing.
1877

1878
    """
1879
    obj = super(_QueryResponseBase, cls).FromDict(val)
1880
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1881
    return obj
1882

    
1883

    
1884
class QueryRequest(ConfigObject):
1885
  """Object holding a query request.
1886

1887
  """
1888
  __slots__ = [
1889
    "what",
1890
    "fields",
1891
    "qfilter",
1892
    ]
1893

    
1894

    
1895
class QueryResponse(_QueryResponseBase):
1896
  """Object holding the response to a query.
1897

1898
  @ivar fields: List of L{QueryFieldDefinition} objects
1899
  @ivar data: Requested data
1900

1901
  """
1902
  __slots__ = [
1903
    "data",
1904
    ]
1905

    
1906

    
1907
class QueryFieldsRequest(ConfigObject):
1908
  """Object holding a request for querying available fields.
1909

1910
  """
1911
  __slots__ = [
1912
    "what",
1913
    "fields",
1914
    ]
1915

    
1916

    
1917
class QueryFieldsResponse(_QueryResponseBase):
1918
  """Object holding the response to a query for fields.
1919

1920
  @ivar fields: List of L{QueryFieldDefinition} objects
1921

1922
  """
1923
  __slots__ = [
1924
    ]
1925

    
1926

    
1927
class MigrationStatus(ConfigObject):
1928
  """Object holding the status of a migration.
1929

1930
  """
1931
  __slots__ = [
1932
    "status",
1933
    "transferred_ram",
1934
    "total_ram",
1935
    ]
1936

    
1937

    
1938
class InstanceConsole(ConfigObject):
1939
  """Object describing how to access the console of an instance.
1940

1941
  """
1942
  __slots__ = [
1943
    "instance",
1944
    "kind",
1945
    "message",
1946
    "host",
1947
    "port",
1948
    "user",
1949
    "command",
1950
    "display",
1951
    ]
1952

    
1953
  def Validate(self):
1954
    """Validates contents of this object.
1955

1956
    """
1957
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1958
    assert self.instance, "Missing instance name"
1959
    assert self.message or self.kind in [constants.CONS_SSH,
1960
                                         constants.CONS_SPICE,
1961
                                         constants.CONS_VNC]
1962
    assert self.host or self.kind == constants.CONS_MESSAGE
1963
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1964
                                      constants.CONS_SSH]
1965
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1966
                                      constants.CONS_SPICE,
1967
                                      constants.CONS_VNC]
1968
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1969
                                         constants.CONS_SPICE,
1970
                                         constants.CONS_VNC]
1971
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1972
                                         constants.CONS_SPICE,
1973
                                         constants.CONS_SSH]
1974
    return True
1975

    
1976

    
1977
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1978
  """Simple wrapper over ConfigParse that allows serialization.
1979

1980
  This class is basically ConfigParser.SafeConfigParser with two
1981
  additional methods that allow it to serialize/unserialize to/from a
1982
  buffer.
1983

1984
  """
1985
  def Dumps(self):
1986
    """Dump this instance and return the string representation."""
1987
    buf = StringIO()
1988
    self.write(buf)
1989
    return buf.getvalue()
1990

    
1991
  @classmethod
1992
  def Loads(cls, data):
1993
    """Load data from a string."""
1994
    buf = StringIO(data)
1995
    cfp = cls()
1996
    cfp.readfp(buf)
1997
    return cfp