Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 32017174

History | View | Annotate | Download (53.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import time
42
from cStringIO import StringIO
43

    
44
from ganeti import errors
45
from ganeti import constants
46
from ganeti import netutils
47
from ganeti import utils
48

    
49
from socket import AF_INET
50

    
51

    
52
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
53
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
54

    
55
_TIMESTAMPS = ["ctime", "mtime"]
56
_UUID = ["uuid"]
57

    
58
# constants used to create InstancePolicy dictionary
59
TISPECS_GROUP_TYPES = {
60
  constants.MIN_ISPECS: constants.VTYPE_INT,
61
  constants.MAX_ISPECS: constants.VTYPE_INT,
62
}
63

    
64
TISPECS_CLUSTER_TYPES = {
65
  constants.MIN_ISPECS: constants.VTYPE_INT,
66
  constants.MAX_ISPECS: constants.VTYPE_INT,
67
  constants.STD_ISPECS: constants.VTYPE_INT,
68
  }
69

    
70

    
71
def FillDict(defaults_dict, custom_dict, skip_keys=None):
72
  """Basic function to apply settings on top a default dict.
73

74
  @type defaults_dict: dict
75
  @param defaults_dict: dictionary holding the default values
76
  @type custom_dict: dict
77
  @param custom_dict: dictionary holding customized value
78
  @type skip_keys: list
79
  @param skip_keys: which keys not to fill
80
  @rtype: dict
81
  @return: dict with the 'full' values
82

83
  """
84
  ret_dict = copy.deepcopy(defaults_dict)
85
  ret_dict.update(custom_dict)
86
  if skip_keys:
87
    for k in skip_keys:
88
      try:
89
        del ret_dict[k]
90
      except KeyError:
91
        pass
92
  return ret_dict
93

    
94

    
95
def FillDictOfDicts(defaults_dict, custom_dict, skip_keys=None):
96
  """Run FillDict for each key in dictionary.
97

98
  """
99
  ret_dict = {}
100
  for key in defaults_dict.keys():
101
    ret_dict[key] = FillDict(defaults_dict[key],
102
                             custom_dict.get(key, {}),
103
                             skip_keys=skip_keys)
104
  return ret_dict
105

    
106

    
107
def UpgradeGroupedParams(target, defaults):
108
  """Update all groups for the target parameter.
109

110
  @type target: dict of dicts
111
  @param target: {group: {parameter: value}}
112
  @type defaults: dict
113
  @param defaults: default parameter values
114

115
  """
116
  if target is None:
117
    target = {constants.PP_DEFAULT: defaults}
118
  else:
119
    for group in target:
120
      target[group] = FillDict(defaults, target[group])
121
  return target
122

    
123

    
124
def UpgradeBeParams(target):
125
  """Update the be parameters dict to the new format.
126

127
  @type target: dict
128
  @param target: "be" parameters dict
129

130
  """
131
  if constants.BE_MEMORY in target:
132
    memory = target[constants.BE_MEMORY]
133
    target[constants.BE_MAXMEM] = memory
134
    target[constants.BE_MINMEM] = memory
135
    del target[constants.BE_MEMORY]
136

    
137

    
138
def UpgradeDiskParams(diskparams):
139
  """Upgrade the disk parameters.
140

141
  @type diskparams: dict
142
  @param diskparams: disk parameters to upgrade
143
  @rtype: dict
144
  @return: the upgraded disk parameters dit
145

146
  """
147
  result = dict()
148
  if diskparams is None:
149
    result = constants.DISK_DT_DEFAULTS.copy()
150
  else:
151
    # Update the disk parameter values for each disk template.
152
    # The code iterates over constants.DISK_TEMPLATES because new templates
153
    # might have been added.
154
    for template in constants.DISK_TEMPLATES:
155
      if template not in diskparams:
156
        result[template] = constants.DISK_DT_DEFAULTS[template].copy()
157
      else:
158
        result[template] = FillDict(constants.DISK_DT_DEFAULTS[template],
159
                                    diskparams[template])
160

    
161
  return result
162

    
163

    
164
def MakeEmptyIPolicy():
165
  """Create empty IPolicy dictionary.
166

167
  """
168
  return dict([
169
    (constants.MIN_ISPECS, dict()),
170
    (constants.MAX_ISPECS, dict()),
171
    (constants.STD_ISPECS, dict()),
172
    ])
173

    
174

    
175
def CreateIPolicyFromOpts(ispecs_mem_size=None,
176
                          ispecs_cpu_count=None,
177
                          ispecs_disk_count=None,
178
                          ispecs_disk_size=None,
179
                          ispecs_nic_count=None,
180
                          group_ipolicy=False,
181
                          allowed_values=None):
182
  """Creation of instane policy based on command line options.
183

184

185
  """
186
  # prepare ipolicy dict
187
  ipolicy_transposed = {
188
    constants.MEM_SIZE_SPEC: ispecs_mem_size,
189
    constants.CPU_COUNT_SPEC: ispecs_cpu_count,
190
    constants.DISK_COUNT_SPEC: ispecs_disk_count,
191
    constants.DISK_SIZE_SPEC: ispecs_disk_size,
192
    constants.NIC_COUNT_SPEC: ispecs_nic_count,
193
    }
194

    
195
  # first, check that the values given are correct
196
  if group_ipolicy:
197
    forced_type = TISPECS_GROUP_TYPES
198
  else:
199
    forced_type = TISPECS_CLUSTER_TYPES
200

    
201
  for specs in ipolicy_transposed.values():
202
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
203

    
204
  # then transpose
205
  ipolicy_out = MakeEmptyIPolicy()
206
  for name, specs in ipolicy_transposed.iteritems():
207
    assert name in constants.ISPECS_PARAMETERS
208
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
209
      ipolicy_out[key][name] = val
210

    
211
  return ipolicy_out
212

    
213

    
214
class ConfigObject(object):
215
  """A generic config object.
216

217
  It has the following properties:
218

219
    - provides somewhat safe recursive unpickling and pickling for its classes
220
    - unset attributes which are defined in slots are always returned
221
      as None instead of raising an error
222

223
  Classes derived from this must always declare __slots__ (we use many
224
  config objects and the memory reduction is useful)
225

226
  """
227
  __slots__ = []
228

    
229
  def __init__(self, **kwargs):
230
    for k, v in kwargs.iteritems():
231
      setattr(self, k, v)
232

    
233
  def __getattr__(self, name):
234
    if name not in self._all_slots():
235
      raise AttributeError("Invalid object attribute %s.%s" %
236
                           (type(self).__name__, name))
237
    return None
238

    
239
  def __setstate__(self, state):
240
    slots = self._all_slots()
241
    for name in state:
242
      if name in slots:
243
        setattr(self, name, state[name])
244

    
245
  @classmethod
246
  def _all_slots(cls):
247
    """Compute the list of all declared slots for a class.
248

249
    """
250
    slots = []
251
    for parent in cls.__mro__:
252
      slots.extend(getattr(parent, "__slots__", []))
253
    return slots
254

    
255
  def ToDict(self):
256
    """Convert to a dict holding only standard python types.
257

258
    The generic routine just dumps all of this object's attributes in
259
    a dict. It does not work if the class has children who are
260
    ConfigObjects themselves (e.g. the nics list in an Instance), in
261
    which case the object should subclass the function in order to
262
    make sure all objects returned are only standard python types.
263

264
    """
265
    result = {}
266
    for name in self._all_slots():
267
      value = getattr(self, name, None)
268
      if value is not None:
269
        result[name] = value
270
    return result
271

    
272
  __getstate__ = ToDict
273

    
274
  @classmethod
275
  def FromDict(cls, val):
276
    """Create an object from a dictionary.
277

278
    This generic routine takes a dict, instantiates a new instance of
279
    the given class, and sets attributes based on the dict content.
280

281
    As for `ToDict`, this does not work if the class has children
282
    who are ConfigObjects themselves (e.g. the nics list in an
283
    Instance), in which case the object should subclass the function
284
    and alter the objects.
285

286
    """
287
    if not isinstance(val, dict):
288
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
289
                                      " expected dict, got %s" % type(val))
290
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
291
    obj = cls(**val_str) # pylint: disable=W0142
292
    return obj
293

    
294
  @staticmethod
295
  def _ContainerToDicts(container):
296
    """Convert the elements of a container to standard python types.
297

298
    This method converts a container with elements derived from
299
    ConfigData to standard python types. If the container is a dict,
300
    we don't touch the keys, only the values.
301

302
    """
303
    if isinstance(container, dict):
304
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
305
    elif isinstance(container, (list, tuple, set, frozenset)):
306
      ret = [elem.ToDict() for elem in container]
307
    else:
308
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
309
                      type(container))
310
    return ret
311

    
312
  @staticmethod
313
  def _ContainerFromDicts(source, c_type, e_type):
314
    """Convert a container from standard python types.
315

316
    This method converts a container with standard python types to
317
    ConfigData objects. If the container is a dict, we don't touch the
318
    keys, only the values.
319

320
    """
321
    if not isinstance(c_type, type):
322
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
323
                      " not a type" % type(c_type))
324
    if source is None:
325
      source = c_type()
326
    if c_type is dict:
327
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
328
    elif c_type in (list, tuple, set, frozenset):
329
      ret = c_type([e_type.FromDict(elem) for elem in source])
330
    else:
331
      raise TypeError("Invalid container type %s passed to"
332
                      " _ContainerFromDicts" % c_type)
333
    return ret
334

    
335
  def Copy(self):
336
    """Makes a deep copy of the current object and its children.
337

338
    """
339
    dict_form = self.ToDict()
340
    clone_obj = self.__class__.FromDict(dict_form)
341
    return clone_obj
342

    
343
  def __repr__(self):
344
    """Implement __repr__ for ConfigObjects."""
345
    return repr(self.ToDict())
346

    
347
  def UpgradeConfig(self):
348
    """Fill defaults for missing configuration values.
349

350
    This method will be called at configuration load time, and its
351
    implementation will be object dependent.
352

353
    """
354
    pass
355

    
356

    
357
class TaggableObject(ConfigObject):
358
  """An generic class supporting tags.
359

360
  """
361
  __slots__ = ["tags"]
362
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
363

    
364
  @classmethod
365
  def ValidateTag(cls, tag):
366
    """Check if a tag is valid.
367

368
    If the tag is invalid, an errors.TagError will be raised. The
369
    function has no return value.
370

371
    """
372
    if not isinstance(tag, basestring):
373
      raise errors.TagError("Invalid tag type (not a string)")
374
    if len(tag) > constants.MAX_TAG_LEN:
375
      raise errors.TagError("Tag too long (>%d characters)" %
376
                            constants.MAX_TAG_LEN)
377
    if not tag:
378
      raise errors.TagError("Tags cannot be empty")
379
    if not cls.VALID_TAG_RE.match(tag):
380
      raise errors.TagError("Tag contains invalid characters")
381

    
382
  def GetTags(self):
383
    """Return the tags list.
384

385
    """
386
    tags = getattr(self, "tags", None)
387
    if tags is None:
388
      tags = self.tags = set()
389
    return tags
390

    
391
  def AddTag(self, tag):
392
    """Add a new tag.
393

394
    """
395
    self.ValidateTag(tag)
396
    tags = self.GetTags()
397
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
398
      raise errors.TagError("Too many tags")
399
    self.GetTags().add(tag)
400

    
401
  def RemoveTag(self, tag):
402
    """Remove a tag.
403

404
    """
405
    self.ValidateTag(tag)
406
    tags = self.GetTags()
407
    try:
408
      tags.remove(tag)
409
    except KeyError:
410
      raise errors.TagError("Tag not found")
411

    
412
  def ToDict(self):
413
    """Taggable-object-specific conversion to standard python types.
414

415
    This replaces the tags set with a list.
416

417
    """
418
    bo = super(TaggableObject, self).ToDict()
419

    
420
    tags = bo.get("tags", None)
421
    if isinstance(tags, set):
422
      bo["tags"] = list(tags)
423
    return bo
424

    
425
  @classmethod
426
  def FromDict(cls, val):
427
    """Custom function for instances.
428

429
    """
430
    obj = super(TaggableObject, cls).FromDict(val)
431
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
432
      obj.tags = set(obj.tags)
433
    return obj
434

    
435

    
436
class MasterNetworkParameters(ConfigObject):
437
  """Network configuration parameters for the master
438

439
  @ivar name: master name
440
  @ivar ip: master IP
441
  @ivar netmask: master netmask
442
  @ivar netdev: master network device
443
  @ivar ip_family: master IP family
444

445
  """
446
  __slots__ = [
447
    "name",
448
    "ip",
449
    "netmask",
450
    "netdev",
451
    "ip_family"
452
    ]
453

    
454

    
455
class ConfigData(ConfigObject):
456
  """Top-level config object."""
457
  __slots__ = [
458
    "version",
459
    "cluster",
460
    "nodes",
461
    "nodegroups",
462
    "instances",
463
    "serial_no",
464
    ] + _TIMESTAMPS
465

    
466
  def ToDict(self):
467
    """Custom function for top-level config data.
468

469
    This just replaces the list of instances, nodes and the cluster
470
    with standard python types.
471

472
    """
473
    mydict = super(ConfigData, self).ToDict()
474
    mydict["cluster"] = mydict["cluster"].ToDict()
475
    for key in "nodes", "instances", "nodegroups":
476
      mydict[key] = self._ContainerToDicts(mydict[key])
477

    
478
    return mydict
479

    
480
  @classmethod
481
  def FromDict(cls, val):
482
    """Custom function for top-level config data
483

484
    """
485
    obj = super(ConfigData, cls).FromDict(val)
486
    obj.cluster = Cluster.FromDict(obj.cluster)
487
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
488
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
489
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
490
    return obj
491

    
492
  def HasAnyDiskOfType(self, dev_type):
493
    """Check if in there is at disk of the given type in the configuration.
494

495
    @type dev_type: L{constants.LDS_BLOCK}
496
    @param dev_type: the type to look for
497
    @rtype: boolean
498
    @return: boolean indicating if a disk of the given type was found or not
499

500
    """
501
    for instance in self.instances.values():
502
      for disk in instance.disks:
503
        if disk.IsBasedOnDiskType(dev_type):
504
          return True
505
    return False
506

    
507
  def UpgradeConfig(self):
508
    """Fill defaults for missing configuration values.
509

510
    """
511
    self.cluster.UpgradeConfig()
512
    for node in self.nodes.values():
513
      node.UpgradeConfig()
514
    for instance in self.instances.values():
515
      instance.UpgradeConfig()
516
    if self.nodegroups is None:
517
      self.nodegroups = {}
518
    for nodegroup in self.nodegroups.values():
519
      nodegroup.UpgradeConfig()
520
    if self.cluster.drbd_usermode_helper is None:
521
      # To decide if we set an helper let's check if at least one instance has
522
      # a DRBD disk. This does not cover all the possible scenarios but it
523
      # gives a good approximation.
524
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
525
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
526

    
527

    
528
class NIC(ConfigObject):
529
  """Config object representing a network card."""
530
  __slots__ = ["mac", "ip", "nicparams"]
531

    
532
  @classmethod
533
  def CheckParameterSyntax(cls, nicparams):
534
    """Check the given parameters for validity.
535

536
    @type nicparams:  dict
537
    @param nicparams: dictionary with parameter names/value
538
    @raise errors.ConfigurationError: when a parameter is not valid
539

540
    """
541
    if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
542
        nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
543
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
544
      raise errors.ConfigurationError(err)
545

    
546
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
547
        not nicparams[constants.NIC_LINK]):
548
      err = "Missing bridged nic link"
549
      raise errors.ConfigurationError(err)
550

    
551

    
552
class Disk(ConfigObject):
553
  """Config object representing a block device."""
554
  __slots__ = ["dev_type", "logical_id", "physical_id",
555
               "children", "iv_name", "size", "mode", "params"]
556

    
557
  def CreateOnSecondary(self):
558
    """Test if this device needs to be created on a secondary node."""
559
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
560

    
561
  def AssembleOnSecondary(self):
562
    """Test if this device needs to be assembled on a secondary node."""
563
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
564

    
565
  def OpenOnSecondary(self):
566
    """Test if this device needs to be opened on a secondary node."""
567
    return self.dev_type in (constants.LD_LV,)
568

    
569
  def StaticDevPath(self):
570
    """Return the device path if this device type has a static one.
571

572
    Some devices (LVM for example) live always at the same /dev/ path,
573
    irrespective of their status. For such devices, we return this
574
    path, for others we return None.
575

576
    @warning: The path returned is not a normalized pathname; callers
577
        should check that it is a valid path.
578

579
    """
580
    if self.dev_type == constants.LD_LV:
581
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
582
    elif self.dev_type == constants.LD_BLOCKDEV:
583
      return self.logical_id[1]
584
    return None
585

    
586
  def ChildrenNeeded(self):
587
    """Compute the needed number of children for activation.
588

589
    This method will return either -1 (all children) or a positive
590
    number denoting the minimum number of children needed for
591
    activation (only mirrored devices will usually return >=0).
592

593
    Currently, only DRBD8 supports diskless activation (therefore we
594
    return 0), for all other we keep the previous semantics and return
595
    -1.
596

597
    """
598
    if self.dev_type == constants.LD_DRBD8:
599
      return 0
600
    return -1
601

    
602
  def IsBasedOnDiskType(self, dev_type):
603
    """Check if the disk or its children are based on the given type.
604

605
    @type dev_type: L{constants.LDS_BLOCK}
606
    @param dev_type: the type to look for
607
    @rtype: boolean
608
    @return: boolean indicating if a device of the given type was found or not
609

610
    """
611
    if self.children:
612
      for child in self.children:
613
        if child.IsBasedOnDiskType(dev_type):
614
          return True
615
    return self.dev_type == dev_type
616

    
617
  def GetNodes(self, node):
618
    """This function returns the nodes this device lives on.
619

620
    Given the node on which the parent of the device lives on (or, in
621
    case of a top-level device, the primary node of the devices'
622
    instance), this function will return a list of nodes on which this
623
    devices needs to (or can) be assembled.
624

625
    """
626
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
627
                         constants.LD_BLOCKDEV]:
628
      result = [node]
629
    elif self.dev_type in constants.LDS_DRBD:
630
      result = [self.logical_id[0], self.logical_id[1]]
631
      if node not in result:
632
        raise errors.ConfigurationError("DRBD device passed unknown node")
633
    else:
634
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
635
    return result
636

    
637
  def ComputeNodeTree(self, parent_node):
638
    """Compute the node/disk tree for this disk and its children.
639

640
    This method, given the node on which the parent disk lives, will
641
    return the list of all (node, disk) pairs which describe the disk
642
    tree in the most compact way. For example, a drbd/lvm stack
643
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
644
    which represents all the top-level devices on the nodes.
645

646
    """
647
    my_nodes = self.GetNodes(parent_node)
648
    result = [(node, self) for node in my_nodes]
649
    if not self.children:
650
      # leaf device
651
      return result
652
    for node in my_nodes:
653
      for child in self.children:
654
        child_result = child.ComputeNodeTree(node)
655
        if len(child_result) == 1:
656
          # child (and all its descendants) is simple, doesn't split
657
          # over multiple hosts, so we don't need to describe it, our
658
          # own entry for this node describes it completely
659
          continue
660
        else:
661
          # check if child nodes differ from my nodes; note that
662
          # subdisk can differ from the child itself, and be instead
663
          # one of its descendants
664
          for subnode, subdisk in child_result:
665
            if subnode not in my_nodes:
666
              result.append((subnode, subdisk))
667
            # otherwise child is under our own node, so we ignore this
668
            # entry (but probably the other results in the list will
669
            # be different)
670
    return result
671

    
672
  def ComputeGrowth(self, amount):
673
    """Compute the per-VG growth requirements.
674

675
    This only works for VG-based disks.
676

677
    @type amount: integer
678
    @param amount: the desired increase in (user-visible) disk space
679
    @rtype: dict
680
    @return: a dictionary of volume-groups and the required size
681

682
    """
683
    if self.dev_type == constants.LD_LV:
684
      return {self.logical_id[0]: amount}
685
    elif self.dev_type == constants.LD_DRBD8:
686
      if self.children:
687
        return self.children[0].ComputeGrowth(amount)
688
      else:
689
        return {}
690
    else:
691
      # Other disk types do not require VG space
692
      return {}
693

    
694
  def RecordGrow(self, amount):
695
    """Update the size of this disk after growth.
696

697
    This method recurses over the disks's children and updates their
698
    size correspondigly. The method needs to be kept in sync with the
699
    actual algorithms from bdev.
700

701
    """
702
    if self.dev_type in (constants.LD_LV, constants.LD_FILE):
703
      self.size += amount
704
    elif self.dev_type == constants.LD_DRBD8:
705
      if self.children:
706
        self.children[0].RecordGrow(amount)
707
      self.size += amount
708
    else:
709
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
710
                                   " disk type %s" % self.dev_type)
711

    
712
  def UnsetSize(self):
713
    """Sets recursively the size to zero for the disk and its children.
714

715
    """
716
    if self.children:
717
      for child in self.children:
718
        child.UnsetSize()
719
    self.size = 0
720

    
721
  def SetPhysicalID(self, target_node, nodes_ip):
722
    """Convert the logical ID to the physical ID.
723

724
    This is used only for drbd, which needs ip/port configuration.
725

726
    The routine descends down and updates its children also, because
727
    this helps when the only the top device is passed to the remote
728
    node.
729

730
    Arguments:
731
      - target_node: the node we wish to configure for
732
      - nodes_ip: a mapping of node name to ip
733

734
    The target_node must exist in in nodes_ip, and must be one of the
735
    nodes in the logical ID for each of the DRBD devices encountered
736
    in the disk tree.
737

738
    """
739
    if self.children:
740
      for child in self.children:
741
        child.SetPhysicalID(target_node, nodes_ip)
742

    
743
    if self.logical_id is None and self.physical_id is not None:
744
      return
745
    if self.dev_type in constants.LDS_DRBD:
746
      pnode, snode, port, pminor, sminor, secret = self.logical_id
747
      if target_node not in (pnode, snode):
748
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
749
                                        target_node)
750
      pnode_ip = nodes_ip.get(pnode, None)
751
      snode_ip = nodes_ip.get(snode, None)
752
      if pnode_ip is None or snode_ip is None:
753
        raise errors.ConfigurationError("Can't find primary or secondary node"
754
                                        " for %s" % str(self))
755
      p_data = (pnode_ip, port)
756
      s_data = (snode_ip, port)
757
      if pnode == target_node:
758
        self.physical_id = p_data + s_data + (pminor, secret)
759
      else: # it must be secondary, we tested above
760
        self.physical_id = s_data + p_data + (sminor, secret)
761
    else:
762
      self.physical_id = self.logical_id
763
    return
764

    
765
  def ToDict(self):
766
    """Disk-specific conversion to standard python types.
767

768
    This replaces the children lists of objects with lists of
769
    standard python types.
770

771
    """
772
    bo = super(Disk, self).ToDict()
773

    
774
    for attr in ("children",):
775
      alist = bo.get(attr, None)
776
      if alist:
777
        bo[attr] = self._ContainerToDicts(alist)
778
    return bo
779

    
780
  @classmethod
781
  def FromDict(cls, val):
782
    """Custom function for Disks
783

784
    """
785
    obj = super(Disk, cls).FromDict(val)
786
    if obj.children:
787
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
788
    if obj.logical_id and isinstance(obj.logical_id, list):
789
      obj.logical_id = tuple(obj.logical_id)
790
    if obj.physical_id and isinstance(obj.physical_id, list):
791
      obj.physical_id = tuple(obj.physical_id)
792
    if obj.dev_type in constants.LDS_DRBD:
793
      # we need a tuple of length six here
794
      if len(obj.logical_id) < 6:
795
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
796
    return obj
797

    
798
  def __str__(self):
799
    """Custom str() formatter for disks.
800

801
    """
802
    if self.dev_type == constants.LD_LV:
803
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
804
    elif self.dev_type in constants.LDS_DRBD:
805
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
806
      val = "<DRBD8("
807
      if self.physical_id is None:
808
        phy = "unconfigured"
809
      else:
810
        phy = ("configured as %s:%s %s:%s" %
811
               (self.physical_id[0], self.physical_id[1],
812
                self.physical_id[2], self.physical_id[3]))
813

    
814
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
815
              (node_a, minor_a, node_b, minor_b, port, phy))
816
      if self.children and self.children.count(None) == 0:
817
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
818
      else:
819
        val += "no local storage"
820
    else:
821
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
822
             (self.dev_type, self.logical_id, self.physical_id, self.children))
823
    if self.iv_name is None:
824
      val += ", not visible"
825
    else:
826
      val += ", visible as /dev/%s" % self.iv_name
827
    if isinstance(self.size, int):
828
      val += ", size=%dm)>" % self.size
829
    else:
830
      val += ", size='%s')>" % (self.size,)
831
    return val
832

    
833
  def Verify(self):
834
    """Checks that this disk is correctly configured.
835

836
    """
837
    all_errors = []
838
    if self.mode not in constants.DISK_ACCESS_SET:
839
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
840
    return all_errors
841

    
842
  def UpgradeConfig(self):
843
    """Fill defaults for missing configuration values.
844

845
    """
846
    if self.children:
847
      for child in self.children:
848
        child.UpgradeConfig()
849

    
850
    if not self.params:
851
      self.params = constants.DISK_LD_DEFAULTS[self.dev_type].copy()
852
    else:
853
      self.params = FillDict(constants.DISK_LD_DEFAULTS[self.dev_type],
854
                             self.params)
855
    # add here config upgrade for this disk
856

    
857

    
858
class InstancePolicy(ConfigObject):
859
  """Config object representing instance policy limits dictionary."""
860
  __slots__ = ["min", "max", "std"]
861

    
862
  @classmethod
863
  def CheckParameterSyntax(cls, ipolicy):
864
    """ Check the instance policy for validity.
865

866
    """
867
    for param in constants.ISPECS_PARAMETERS:
868
      InstancePolicy.CheckISpecSyntax(ipolicy, param)
869

    
870
  @classmethod
871
  def CheckISpecSyntax(cls, ipolicy, name):
872
    """Check the instance policy for validity on a given key.
873

874
    We check if the instance policy makes sense for a given key, that is
875
    if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
876

877
    @type ipolicy: dict
878
    @param ipolicy: dictionary with min, max, std specs
879
    @type name: string
880
    @param name: what are the limits for
881
    @raise errors.ConfigureError: when specs for given name are not valid
882

883
    """
884
    min_v = ipolicy[constants.MIN_ISPECS].get(name, 0)
885
    std_v = ipolicy[constants.STD_ISPECS].get(name, min_v)
886
    max_v = ipolicy[constants.MAX_ISPECS].get(name, std_v)
887
    err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
888
           (name,
889
            ipolicy[constants.MIN_ISPECS].get(name, "-"),
890
            ipolicy[constants.MAX_ISPECS].get(name, "-"),
891
            ipolicy[constants.STD_ISPECS].get(name, "-")))
892
    if min_v > std_v or std_v > max_v:
893
      raise errors.ConfigurationError(err)
894

    
895

    
896
class Instance(TaggableObject):
897
  """Config object representing an instance."""
898
  __slots__ = [
899
    "name",
900
    "primary_node",
901
    "os",
902
    "hypervisor",
903
    "hvparams",
904
    "beparams",
905
    "osparams",
906
    "admin_state",
907
    "nics",
908
    "disks",
909
    "disk_template",
910
    "network_port",
911
    "serial_no",
912
    ] + _TIMESTAMPS + _UUID
913

    
914
  def _ComputeSecondaryNodes(self):
915
    """Compute the list of secondary nodes.
916

917
    This is a simple wrapper over _ComputeAllNodes.
918

919
    """
920
    all_nodes = set(self._ComputeAllNodes())
921
    all_nodes.discard(self.primary_node)
922
    return tuple(all_nodes)
923

    
924
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
925
                             "List of secondary nodes")
926

    
927
  def _ComputeAllNodes(self):
928
    """Compute the list of all nodes.
929

930
    Since the data is already there (in the drbd disks), keeping it as
931
    a separate normal attribute is redundant and if not properly
932
    synchronised can cause problems. Thus it's better to compute it
933
    dynamically.
934

935
    """
936
    def _Helper(nodes, device):
937
      """Recursively computes nodes given a top device."""
938
      if device.dev_type in constants.LDS_DRBD:
939
        nodea, nodeb = device.logical_id[:2]
940
        nodes.add(nodea)
941
        nodes.add(nodeb)
942
      if device.children:
943
        for child in device.children:
944
          _Helper(nodes, child)
945

    
946
    all_nodes = set()
947
    all_nodes.add(self.primary_node)
948
    for device in self.disks:
949
      _Helper(all_nodes, device)
950
    return tuple(all_nodes)
951

    
952
  all_nodes = property(_ComputeAllNodes, None, None,
953
                       "List of all nodes of the instance")
954

    
955
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
956
    """Provide a mapping of nodes to LVs this instance owns.
957

958
    This function figures out what logical volumes should belong on
959
    which nodes, recursing through a device tree.
960

961
    @param lvmap: optional dictionary to receive the
962
        'node' : ['lv', ...] data.
963

964
    @return: None if lvmap arg is given, otherwise, a dictionary of
965
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
966
        volumeN is of the form "vg_name/lv_name", compatible with
967
        GetVolumeList()
968

969
    """
970
    if node == None:
971
      node = self.primary_node
972

    
973
    if lvmap is None:
974
      lvmap = {
975
        node: [],
976
        }
977
      ret = lvmap
978
    else:
979
      if not node in lvmap:
980
        lvmap[node] = []
981
      ret = None
982

    
983
    if not devs:
984
      devs = self.disks
985

    
986
    for dev in devs:
987
      if dev.dev_type == constants.LD_LV:
988
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
989

    
990
      elif dev.dev_type in constants.LDS_DRBD:
991
        if dev.children:
992
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
993
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
994

    
995
      elif dev.children:
996
        self.MapLVsByNode(lvmap, dev.children, node)
997

    
998
    return ret
999

    
1000
  def FindDisk(self, idx):
1001
    """Find a disk given having a specified index.
1002

1003
    This is just a wrapper that does validation of the index.
1004

1005
    @type idx: int
1006
    @param idx: the disk index
1007
    @rtype: L{Disk}
1008
    @return: the corresponding disk
1009
    @raise errors.OpPrereqError: when the given index is not valid
1010

1011
    """
1012
    try:
1013
      idx = int(idx)
1014
      return self.disks[idx]
1015
    except (TypeError, ValueError), err:
1016
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1017
                                 errors.ECODE_INVAL)
1018
    except IndexError:
1019
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1020
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1021
                                 errors.ECODE_INVAL)
1022

    
1023
  def ToDict(self):
1024
    """Instance-specific conversion to standard python types.
1025

1026
    This replaces the children lists of objects with lists of standard
1027
    python types.
1028

1029
    """
1030
    bo = super(Instance, self).ToDict()
1031

    
1032
    for attr in "nics", "disks":
1033
      alist = bo.get(attr, None)
1034
      if alist:
1035
        nlist = self._ContainerToDicts(alist)
1036
      else:
1037
        nlist = []
1038
      bo[attr] = nlist
1039
    return bo
1040

    
1041
  @classmethod
1042
  def FromDict(cls, val):
1043
    """Custom function for instances.
1044

1045
    """
1046
    if "admin_state" not in val:
1047
      if val.get("admin_up", False):
1048
        val["admin_state"] = constants.ADMINST_UP
1049
      else:
1050
        val["admin_state"] = constants.ADMINST_DOWN
1051
    if "admin_up" in val:
1052
      del val["admin_up"]
1053
    obj = super(Instance, cls).FromDict(val)
1054
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
1055
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
1056
    return obj
1057

    
1058
  def UpgradeConfig(self):
1059
    """Fill defaults for missing configuration values.
1060

1061
    """
1062
    for nic in self.nics:
1063
      nic.UpgradeConfig()
1064
    for disk in self.disks:
1065
      disk.UpgradeConfig()
1066
    if self.hvparams:
1067
      for key in constants.HVC_GLOBALS:
1068
        try:
1069
          del self.hvparams[key]
1070
        except KeyError:
1071
          pass
1072
    if self.osparams is None:
1073
      self.osparams = {}
1074
    UpgradeBeParams(self.beparams)
1075

    
1076

    
1077
class OS(ConfigObject):
1078
  """Config object representing an operating system.
1079

1080
  @type supported_parameters: list
1081
  @ivar supported_parameters: a list of tuples, name and description,
1082
      containing the supported parameters by this OS
1083

1084
  @type VARIANT_DELIM: string
1085
  @cvar VARIANT_DELIM: the variant delimiter
1086

1087
  """
1088
  __slots__ = [
1089
    "name",
1090
    "path",
1091
    "api_versions",
1092
    "create_script",
1093
    "export_script",
1094
    "import_script",
1095
    "rename_script",
1096
    "verify_script",
1097
    "supported_variants",
1098
    "supported_parameters",
1099
    ]
1100

    
1101
  VARIANT_DELIM = "+"
1102

    
1103
  @classmethod
1104
  def SplitNameVariant(cls, name):
1105
    """Splits the name into the proper name and variant.
1106

1107
    @param name: the OS (unprocessed) name
1108
    @rtype: list
1109
    @return: a list of two elements; if the original name didn't
1110
        contain a variant, it's returned as an empty string
1111

1112
    """
1113
    nv = name.split(cls.VARIANT_DELIM, 1)
1114
    if len(nv) == 1:
1115
      nv.append("")
1116
    return nv
1117

    
1118
  @classmethod
1119
  def GetName(cls, name):
1120
    """Returns the proper name of the os (without the variant).
1121

1122
    @param name: the OS (unprocessed) name
1123

1124
    """
1125
    return cls.SplitNameVariant(name)[0]
1126

    
1127
  @classmethod
1128
  def GetVariant(cls, name):
1129
    """Returns the variant the os (without the base name).
1130

1131
    @param name: the OS (unprocessed) name
1132

1133
    """
1134
    return cls.SplitNameVariant(name)[1]
1135

    
1136

    
1137
class NodeHvState(ConfigObject):
1138
  """Hypvervisor state on a node.
1139

1140
  @ivar mem_total: Total amount of memory
1141
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1142
    available)
1143
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1144
    rounding
1145
  @ivar mem_inst: Memory used by instances living on node
1146
  @ivar cpu_total: Total node CPU core count
1147
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1148

1149
  """
1150
  __slots__ = [
1151
    "mem_total",
1152
    "mem_node",
1153
    "mem_hv",
1154
    "mem_inst",
1155
    "cpu_total",
1156
    "cpu_node",
1157
    ] + _TIMESTAMPS
1158

    
1159

    
1160
class NodeDiskState(ConfigObject):
1161
  """Disk state on a node.
1162

1163
  """
1164
  __slots__ = [
1165
    "total",
1166
    "reserved",
1167
    "overhead",
1168
    ] + _TIMESTAMPS
1169

    
1170

    
1171
class Node(TaggableObject):
1172
  """Config object representing a node.
1173

1174
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1175
  @ivar hv_state_static: Hypervisor state overriden by user
1176
  @ivar disk_state: Disk state (e.g. free space)
1177
  @ivar disk_state_static: Disk state overriden by user
1178

1179
  """
1180
  __slots__ = [
1181
    "name",
1182
    "primary_ip",
1183
    "secondary_ip",
1184
    "serial_no",
1185
    "master_candidate",
1186
    "offline",
1187
    "drained",
1188
    "group",
1189
    "master_capable",
1190
    "vm_capable",
1191
    "ndparams",
1192
    "powered",
1193
    "hv_state",
1194
    "hv_state_static",
1195
    "disk_state",
1196
    "disk_state_static",
1197
    ] + _TIMESTAMPS + _UUID
1198

    
1199
  def UpgradeConfig(self):
1200
    """Fill defaults for missing configuration values.
1201

1202
    """
1203
    # pylint: disable=E0203
1204
    # because these are "defined" via slots, not manually
1205
    if self.master_capable is None:
1206
      self.master_capable = True
1207

    
1208
    if self.vm_capable is None:
1209
      self.vm_capable = True
1210

    
1211
    if self.ndparams is None:
1212
      self.ndparams = {}
1213

    
1214
    if self.powered is None:
1215
      self.powered = True
1216

    
1217
  def ToDict(self):
1218
    """Custom function for serializing.
1219

1220
    """
1221
    data = super(Node, self).ToDict()
1222

    
1223
    hv_state = data.get("hv_state", None)
1224
    if hv_state is not None:
1225
      data["hv_state"] = self._ContainerToDicts(hv_state)
1226

    
1227
    disk_state = data.get("disk_state", None)
1228
    if disk_state is not None:
1229
      data["disk_state"] = \
1230
        dict((key, self._ContainerToDicts(value))
1231
             for (key, value) in disk_state.items())
1232

    
1233
    return data
1234

    
1235
  @classmethod
1236
  def FromDict(cls, val):
1237
    """Custom function for deserializing.
1238

1239
    """
1240
    obj = super(Node, cls).FromDict(val)
1241

    
1242
    if obj.hv_state is not None:
1243
      obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1244

    
1245
    if obj.disk_state is not None:
1246
      obj.disk_state = \
1247
        dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
1248
             for (key, value) in obj.disk_state.items())
1249

    
1250
    return obj
1251

    
1252

    
1253
class NodeGroup(TaggableObject):
1254
  """Config object representing a node group."""
1255
  __slots__ = [
1256
    "name",
1257
    "members",
1258
    "ndparams",
1259
    "diskparams",
1260
    "serial_no",
1261
    "hv_state_static",
1262
    "disk_state_static",
1263
    "alloc_policy",
1264
    ] + _TIMESTAMPS + _UUID
1265

    
1266
  def ToDict(self):
1267
    """Custom function for nodegroup.
1268

1269
    This discards the members object, which gets recalculated and is only kept
1270
    in memory.
1271

1272
    """
1273
    mydict = super(NodeGroup, self).ToDict()
1274
    del mydict["members"]
1275
    return mydict
1276

    
1277
  @classmethod
1278
  def FromDict(cls, val):
1279
    """Custom function for nodegroup.
1280

1281
    The members slot is initialized to an empty list, upon deserialization.
1282

1283
    """
1284
    obj = super(NodeGroup, cls).FromDict(val)
1285
    obj.members = []
1286
    return obj
1287

    
1288
  def UpgradeConfig(self):
1289
    """Fill defaults for missing configuration values.
1290

1291
    """
1292
    if self.ndparams is None:
1293
      self.ndparams = {}
1294

    
1295
    if self.serial_no is None:
1296
      self.serial_no = 1
1297

    
1298
    if self.alloc_policy is None:
1299
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1300

    
1301
    # We only update mtime, and not ctime, since we would not be able to provide
1302
    # a correct value for creation time.
1303
    if self.mtime is None:
1304
      self.mtime = time.time()
1305

    
1306
    self.diskparams = UpgradeDiskParams(self.diskparams)
1307

    
1308
  def FillND(self, node):
1309
    """Return filled out ndparams for L{objects.Node}
1310

1311
    @type node: L{objects.Node}
1312
    @param node: A Node object to fill
1313
    @return a copy of the node's ndparams with defaults filled
1314

1315
    """
1316
    return self.SimpleFillND(node.ndparams)
1317

    
1318
  def SimpleFillND(self, ndparams):
1319
    """Fill a given ndparams dict with defaults.
1320

1321
    @type ndparams: dict
1322
    @param ndparams: the dict to fill
1323
    @rtype: dict
1324
    @return: a copy of the passed in ndparams with missing keys filled
1325
        from the node group defaults
1326

1327
    """
1328
    return FillDict(self.ndparams, ndparams)
1329

    
1330

    
1331
class Cluster(TaggableObject):
1332
  """Config object representing the cluster."""
1333
  __slots__ = [
1334
    "serial_no",
1335
    "rsahostkeypub",
1336
    "highest_used_port",
1337
    "tcpudp_port_pool",
1338
    "mac_prefix",
1339
    "volume_group_name",
1340
    "reserved_lvs",
1341
    "drbd_usermode_helper",
1342
    "default_bridge",
1343
    "default_hypervisor",
1344
    "master_node",
1345
    "master_ip",
1346
    "master_netdev",
1347
    "master_netmask",
1348
    "use_external_mip_script",
1349
    "cluster_name",
1350
    "file_storage_dir",
1351
    "shared_file_storage_dir",
1352
    "enabled_hypervisors",
1353
    "hvparams",
1354
    "ipolicy",
1355
    "os_hvp",
1356
    "beparams",
1357
    "osparams",
1358
    "nicparams",
1359
    "ndparams",
1360
    "diskparams",
1361
    "candidate_pool_size",
1362
    "modify_etc_hosts",
1363
    "modify_ssh_setup",
1364
    "maintain_node_health",
1365
    "uid_pool",
1366
    "default_iallocator",
1367
    "hidden_os",
1368
    "blacklisted_os",
1369
    "primary_ip_family",
1370
    "prealloc_wipe_disks",
1371
    "hv_state_static",
1372
    "disk_state_static",
1373
    ] + _TIMESTAMPS + _UUID
1374

    
1375
  def UpgradeConfig(self):
1376
    """Fill defaults for missing configuration values.
1377

1378
    """
1379
    # pylint: disable=E0203
1380
    # because these are "defined" via slots, not manually
1381
    if self.hvparams is None:
1382
      self.hvparams = constants.HVC_DEFAULTS
1383
    else:
1384
      for hypervisor in self.hvparams:
1385
        self.hvparams[hypervisor] = FillDict(
1386
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1387

    
1388
    if self.os_hvp is None:
1389
      self.os_hvp = {}
1390

    
1391
    # osparams added before 2.2
1392
    if self.osparams is None:
1393
      self.osparams = {}
1394

    
1395
    if self.ndparams is None:
1396
      self.ndparams = constants.NDC_DEFAULTS
1397

    
1398
    self.beparams = UpgradeGroupedParams(self.beparams,
1399
                                         constants.BEC_DEFAULTS)
1400
    for beparams_group in self.beparams:
1401
      UpgradeBeParams(self.beparams[beparams_group])
1402

    
1403
    migrate_default_bridge = not self.nicparams
1404
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1405
                                          constants.NICC_DEFAULTS)
1406
    if migrate_default_bridge:
1407
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1408
        self.default_bridge
1409

    
1410
    if self.modify_etc_hosts is None:
1411
      self.modify_etc_hosts = True
1412

    
1413
    if self.modify_ssh_setup is None:
1414
      self.modify_ssh_setup = True
1415

    
1416
    # default_bridge is no longer used in 2.1. The slot is left there to
1417
    # support auto-upgrading. It can be removed once we decide to deprecate
1418
    # upgrading straight from 2.0.
1419
    if self.default_bridge is not None:
1420
      self.default_bridge = None
1421

    
1422
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1423
    # code can be removed once upgrading straight from 2.0 is deprecated.
1424
    if self.default_hypervisor is not None:
1425
      self.enabled_hypervisors = ([self.default_hypervisor] +
1426
        [hvname for hvname in self.enabled_hypervisors
1427
         if hvname != self.default_hypervisor])
1428
      self.default_hypervisor = None
1429

    
1430
    # maintain_node_health added after 2.1.1
1431
    if self.maintain_node_health is None:
1432
      self.maintain_node_health = False
1433

    
1434
    if self.uid_pool is None:
1435
      self.uid_pool = []
1436

    
1437
    if self.default_iallocator is None:
1438
      self.default_iallocator = ""
1439

    
1440
    # reserved_lvs added before 2.2
1441
    if self.reserved_lvs is None:
1442
      self.reserved_lvs = []
1443

    
1444
    # hidden and blacklisted operating systems added before 2.2.1
1445
    if self.hidden_os is None:
1446
      self.hidden_os = []
1447

    
1448
    if self.blacklisted_os is None:
1449
      self.blacklisted_os = []
1450

    
1451
    # primary_ip_family added before 2.3
1452
    if self.primary_ip_family is None:
1453
      self.primary_ip_family = AF_INET
1454

    
1455
    if self.master_netmask is None:
1456
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1457
      self.master_netmask = ipcls.iplen
1458

    
1459
    if self.prealloc_wipe_disks is None:
1460
      self.prealloc_wipe_disks = False
1461

    
1462
    # shared_file_storage_dir added before 2.5
1463
    if self.shared_file_storage_dir is None:
1464
      self.shared_file_storage_dir = ""
1465

    
1466
    if self.use_external_mip_script is None:
1467
      self.use_external_mip_script = False
1468

    
1469
    self.diskparams = UpgradeDiskParams(self.diskparams)
1470

    
1471
    # instance policy added before 2.6
1472
    if self.ipolicy is None:
1473
      self.ipolicy = MakeEmptyIPolicy()
1474

    
1475
  @property
1476
  def primary_hypervisor(self):
1477
    """The first hypervisor is the primary.
1478

1479
    Useful, for example, for L{Node}'s hv/disk state.
1480

1481
    """
1482
    return self.enabled_hypervisors[0]
1483

    
1484
  def ToDict(self):
1485
    """Custom function for cluster.
1486

1487
    """
1488
    mydict = super(Cluster, self).ToDict()
1489
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1490
    return mydict
1491

    
1492
  @classmethod
1493
  def FromDict(cls, val):
1494
    """Custom function for cluster.
1495

1496
    """
1497
    obj = super(Cluster, cls).FromDict(val)
1498
    if not isinstance(obj.tcpudp_port_pool, set):
1499
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1500
    return obj
1501

    
1502
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1503
    """Get the default hypervisor parameters for the cluster.
1504

1505
    @param hypervisor: the hypervisor name
1506
    @param os_name: if specified, we'll also update the defaults for this OS
1507
    @param skip_keys: if passed, list of keys not to use
1508
    @return: the defaults dict
1509

1510
    """
1511
    if skip_keys is None:
1512
      skip_keys = []
1513

    
1514
    fill_stack = [self.hvparams.get(hypervisor, {})]
1515
    if os_name is not None:
1516
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1517
      fill_stack.append(os_hvp)
1518

    
1519
    ret_dict = {}
1520
    for o_dict in fill_stack:
1521
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1522

    
1523
    return ret_dict
1524

    
1525
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1526
    """Fill a given hvparams dict with cluster defaults.
1527

1528
    @type hv_name: string
1529
    @param hv_name: the hypervisor to use
1530
    @type os_name: string
1531
    @param os_name: the OS to use for overriding the hypervisor defaults
1532
    @type skip_globals: boolean
1533
    @param skip_globals: if True, the global hypervisor parameters will
1534
        not be filled
1535
    @rtype: dict
1536
    @return: a copy of the given hvparams with missing keys filled from
1537
        the cluster defaults
1538

1539
    """
1540
    if skip_globals:
1541
      skip_keys = constants.HVC_GLOBALS
1542
    else:
1543
      skip_keys = []
1544

    
1545
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1546
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1547

    
1548
  def FillHV(self, instance, skip_globals=False):
1549
    """Fill an instance's hvparams dict with cluster defaults.
1550

1551
    @type instance: L{objects.Instance}
1552
    @param instance: the instance parameter to fill
1553
    @type skip_globals: boolean
1554
    @param skip_globals: if True, the global hypervisor parameters will
1555
        not be filled
1556
    @rtype: dict
1557
    @return: a copy of the instance's hvparams with missing keys filled from
1558
        the cluster defaults
1559

1560
    """
1561
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1562
                             instance.hvparams, skip_globals)
1563

    
1564
  def SimpleFillBE(self, beparams):
1565
    """Fill a given beparams dict with cluster defaults.
1566

1567
    @type beparams: dict
1568
    @param beparams: the dict to fill
1569
    @rtype: dict
1570
    @return: a copy of the passed in beparams with missing keys filled
1571
        from the cluster defaults
1572

1573
    """
1574
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1575

    
1576
  def FillBE(self, instance):
1577
    """Fill an instance's beparams dict with cluster defaults.
1578

1579
    @type instance: L{objects.Instance}
1580
    @param instance: the instance parameter to fill
1581
    @rtype: dict
1582
    @return: a copy of the instance's beparams with missing keys filled from
1583
        the cluster defaults
1584

1585
    """
1586
    return self.SimpleFillBE(instance.beparams)
1587

    
1588
  def SimpleFillNIC(self, nicparams):
1589
    """Fill a given nicparams dict with cluster defaults.
1590

1591
    @type nicparams: dict
1592
    @param nicparams: the dict to fill
1593
    @rtype: dict
1594
    @return: a copy of the passed in nicparams with missing keys filled
1595
        from the cluster defaults
1596

1597
    """
1598
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1599

    
1600
  def SimpleFillOS(self, os_name, os_params):
1601
    """Fill an instance's osparams dict with cluster defaults.
1602

1603
    @type os_name: string
1604
    @param os_name: the OS name to use
1605
    @type os_params: dict
1606
    @param os_params: the dict to fill with default values
1607
    @rtype: dict
1608
    @return: a copy of the instance's osparams with missing keys filled from
1609
        the cluster defaults
1610

1611
    """
1612
    name_only = os_name.split("+", 1)[0]
1613
    # base OS
1614
    result = self.osparams.get(name_only, {})
1615
    # OS with variant
1616
    result = FillDict(result, self.osparams.get(os_name, {}))
1617
    # specified params
1618
    return FillDict(result, os_params)
1619

    
1620
  @staticmethod
1621
  def SimpleFillHvState(hv_state):
1622
    """Fill an hv_state sub dict with cluster defaults.
1623

1624
    """
1625
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1626

    
1627
  @staticmethod
1628
  def SimpleFillDiskState(disk_state):
1629
    """Fill an disk_state sub dict with cluster defaults.
1630

1631
    """
1632
    return FillDict(constants.DS_DEFAULTS, disk_state)
1633

    
1634
  def FillND(self, node, nodegroup):
1635
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1636

1637
    @type node: L{objects.Node}
1638
    @param node: A Node object to fill
1639
    @type nodegroup: L{objects.NodeGroup}
1640
    @param nodegroup: A Node object to fill
1641
    @return a copy of the node's ndparams with defaults filled
1642

1643
    """
1644
    return self.SimpleFillND(nodegroup.FillND(node))
1645

    
1646
  def SimpleFillND(self, ndparams):
1647
    """Fill a given ndparams dict with defaults.
1648

1649
    @type ndparams: dict
1650
    @param ndparams: the dict to fill
1651
    @rtype: dict
1652
    @return: a copy of the passed in ndparams with missing keys filled
1653
        from the cluster defaults
1654

1655
    """
1656
    return FillDict(self.ndparams, ndparams)
1657

    
1658
  def SimpleFillIPolicy(self, ipolicy):
1659
    """ Fill instance policy dict with defaults.
1660

1661
    @type ipolicy: dict
1662
    @param ipolicy: the dict to fill
1663
    @rtype: dict
1664
    @return: a copy of passed ipolicy with missing keys filled from
1665
      the cluster defaults
1666

1667
    """
1668
    return FillDictOfDicts(self.ipolicy, ipolicy)
1669

    
1670

    
1671
class BlockDevStatus(ConfigObject):
1672
  """Config object representing the status of a block device."""
1673
  __slots__ = [
1674
    "dev_path",
1675
    "major",
1676
    "minor",
1677
    "sync_percent",
1678
    "estimated_time",
1679
    "is_degraded",
1680
    "ldisk_status",
1681
    ]
1682

    
1683

    
1684
class ImportExportStatus(ConfigObject):
1685
  """Config object representing the status of an import or export."""
1686
  __slots__ = [
1687
    "recent_output",
1688
    "listen_port",
1689
    "connected",
1690
    "progress_mbytes",
1691
    "progress_throughput",
1692
    "progress_eta",
1693
    "progress_percent",
1694
    "exit_status",
1695
    "error_message",
1696
    ] + _TIMESTAMPS
1697

    
1698

    
1699
class ImportExportOptions(ConfigObject):
1700
  """Options for import/export daemon
1701

1702
  @ivar key_name: X509 key name (None for cluster certificate)
1703
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1704
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1705
  @ivar magic: Used to ensure the connection goes to the right disk
1706
  @ivar ipv6: Whether to use IPv6
1707
  @ivar connect_timeout: Number of seconds for establishing connection
1708

1709
  """
1710
  __slots__ = [
1711
    "key_name",
1712
    "ca_pem",
1713
    "compress",
1714
    "magic",
1715
    "ipv6",
1716
    "connect_timeout",
1717
    ]
1718

    
1719

    
1720
class ConfdRequest(ConfigObject):
1721
  """Object holding a confd request.
1722

1723
  @ivar protocol: confd protocol version
1724
  @ivar type: confd query type
1725
  @ivar query: query request
1726
  @ivar rsalt: requested reply salt
1727

1728
  """
1729
  __slots__ = [
1730
    "protocol",
1731
    "type",
1732
    "query",
1733
    "rsalt",
1734
    ]
1735

    
1736

    
1737
class ConfdReply(ConfigObject):
1738
  """Object holding a confd reply.
1739

1740
  @ivar protocol: confd protocol version
1741
  @ivar status: reply status code (ok, error)
1742
  @ivar answer: confd query reply
1743
  @ivar serial: configuration serial number
1744

1745
  """
1746
  __slots__ = [
1747
    "protocol",
1748
    "status",
1749
    "answer",
1750
    "serial",
1751
    ]
1752

    
1753

    
1754
class QueryFieldDefinition(ConfigObject):
1755
  """Object holding a query field definition.
1756

1757
  @ivar name: Field name
1758
  @ivar title: Human-readable title
1759
  @ivar kind: Field type
1760
  @ivar doc: Human-readable description
1761

1762
  """
1763
  __slots__ = [
1764
    "name",
1765
    "title",
1766
    "kind",
1767
    "doc",
1768
    ]
1769

    
1770

    
1771
class _QueryResponseBase(ConfigObject):
1772
  __slots__ = [
1773
    "fields",
1774
    ]
1775

    
1776
  def ToDict(self):
1777
    """Custom function for serializing.
1778

1779
    """
1780
    mydict = super(_QueryResponseBase, self).ToDict()
1781
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1782
    return mydict
1783

    
1784
  @classmethod
1785
  def FromDict(cls, val):
1786
    """Custom function for de-serializing.
1787

1788
    """
1789
    obj = super(_QueryResponseBase, cls).FromDict(val)
1790
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1791
    return obj
1792

    
1793

    
1794
class QueryRequest(ConfigObject):
1795
  """Object holding a query request.
1796

1797
  """
1798
  __slots__ = [
1799
    "what",
1800
    "fields",
1801
    "qfilter",
1802
    ]
1803

    
1804

    
1805
class QueryResponse(_QueryResponseBase):
1806
  """Object holding the response to a query.
1807

1808
  @ivar fields: List of L{QueryFieldDefinition} objects
1809
  @ivar data: Requested data
1810

1811
  """
1812
  __slots__ = [
1813
    "data",
1814
    ]
1815

    
1816

    
1817
class QueryFieldsRequest(ConfigObject):
1818
  """Object holding a request for querying available fields.
1819

1820
  """
1821
  __slots__ = [
1822
    "what",
1823
    "fields",
1824
    ]
1825

    
1826

    
1827
class QueryFieldsResponse(_QueryResponseBase):
1828
  """Object holding the response to a query for fields.
1829

1830
  @ivar fields: List of L{QueryFieldDefinition} objects
1831

1832
  """
1833
  __slots__ = [
1834
    ]
1835

    
1836

    
1837
class MigrationStatus(ConfigObject):
1838
  """Object holding the status of a migration.
1839

1840
  """
1841
  __slots__ = [
1842
    "status",
1843
    "transferred_ram",
1844
    "total_ram",
1845
    ]
1846

    
1847

    
1848
class InstanceConsole(ConfigObject):
1849
  """Object describing how to access the console of an instance.
1850

1851
  """
1852
  __slots__ = [
1853
    "instance",
1854
    "kind",
1855
    "message",
1856
    "host",
1857
    "port",
1858
    "user",
1859
    "command",
1860
    "display",
1861
    ]
1862

    
1863
  def Validate(self):
1864
    """Validates contents of this object.
1865

1866
    """
1867
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1868
    assert self.instance, "Missing instance name"
1869
    assert self.message or self.kind in [constants.CONS_SSH,
1870
                                         constants.CONS_SPICE,
1871
                                         constants.CONS_VNC]
1872
    assert self.host or self.kind == constants.CONS_MESSAGE
1873
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1874
                                      constants.CONS_SSH]
1875
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1876
                                      constants.CONS_SPICE,
1877
                                      constants.CONS_VNC]
1878
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1879
                                         constants.CONS_SPICE,
1880
                                         constants.CONS_VNC]
1881
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1882
                                         constants.CONS_SPICE,
1883
                                         constants.CONS_SSH]
1884
    return True
1885

    
1886

    
1887
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1888
  """Simple wrapper over ConfigParse that allows serialization.
1889

1890
  This class is basically ConfigParser.SafeConfigParser with two
1891
  additional methods that allow it to serialize/unserialize to/from a
1892
  buffer.
1893

1894
  """
1895
  def Dumps(self):
1896
    """Dump this instance and return the string representation."""
1897
    buf = StringIO()
1898
    self.write(buf)
1899
    return buf.getvalue()
1900

    
1901
  @classmethod
1902
  def Loads(cls, data):
1903
    """Load data from a string."""
1904
    buf = StringIO(data)
1905
    cfp = cls()
1906
    cfp.readfp(buf)
1907
    return cfp