Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ bc5d0215

History | View | Annotate | Download (48.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import time
42
from cStringIO import StringIO
43

    
44
from ganeti import errors
45
from ganeti import constants
46
from ganeti import netutils
47

    
48
from socket import AF_INET
49

    
50

    
51
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
52
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
53

    
54
_TIMESTAMPS = ["ctime", "mtime"]
55
_UUID = ["uuid"]
56

    
57

    
58
def FillDict(defaults_dict, custom_dict, skip_keys=None):
59
  """Basic function to apply settings on top a default dict.
60

61
  @type defaults_dict: dict
62
  @param defaults_dict: dictionary holding the default values
63
  @type custom_dict: dict
64
  @param custom_dict: dictionary holding customized value
65
  @type skip_keys: list
66
  @param skip_keys: which keys not to fill
67
  @rtype: dict
68
  @return: dict with the 'full' values
69

70
  """
71
  ret_dict = copy.deepcopy(defaults_dict)
72
  ret_dict.update(custom_dict)
73
  if skip_keys:
74
    for k in skip_keys:
75
      try:
76
        del ret_dict[k]
77
      except KeyError:
78
        pass
79
  return ret_dict
80

    
81

    
82
def UpgradeGroupedParams(target, defaults):
83
  """Update all groups for the target parameter.
84

85
  @type target: dict of dicts
86
  @param target: {group: {parameter: value}}
87
  @type defaults: dict
88
  @param defaults: default parameter values
89

90
  """
91
  if target is None:
92
    target = {constants.PP_DEFAULT: defaults}
93
  else:
94
    for group in target:
95
      target[group] = FillDict(defaults, target[group])
96
  return target
97

    
98

    
99
def UpgradeBeParams(target):
100
  """Update the be parameters dict to the new format.
101

102
  @type target: dict
103
  @param target: "be" parameters dict
104

105
  """
106
  if constants.BE_MEMORY in target:
107
    memory = target[constants.BE_MEMORY]
108
    target[constants.BE_MAXMEM] = memory
109
    target[constants.BE_MINMEM] = memory
110
    del target[constants.BE_MEMORY]
111

    
112

    
113
def UpgradeDiskParams(diskparams):
114
  """Upgrade the disk parameters.
115

116
  @type diskparams: dict
117
  @param diskparams: disk parameters to upgrade
118
  @rtype: dict
119
  @return: the upgraded disk parameters dit
120

121
  """
122
  result = dict()
123
  if diskparams is None:
124
    result = constants.DISK_DT_DEFAULTS.copy()
125
  else:
126
    # Update the disk parameter values for each disk template.
127
    # The code iterates over constants.DISK_TEMPLATES because new templates
128
    # might have been added.
129
    for template in constants.DISK_TEMPLATES:
130
      if template not in diskparams:
131
        result[template] = constants.DISK_DT_DEFAULTS[template].copy()
132
      else:
133
        result[template] = FillDict(constants.DISK_DT_DEFAULTS[template],
134
                                    diskparams[template])
135

    
136
  return result
137

    
138

    
139
class ConfigObject(object):
140
  """A generic config object.
141

142
  It has the following properties:
143

144
    - provides somewhat safe recursive unpickling and pickling for its classes
145
    - unset attributes which are defined in slots are always returned
146
      as None instead of raising an error
147

148
  Classes derived from this must always declare __slots__ (we use many
149
  config objects and the memory reduction is useful)
150

151
  """
152
  __slots__ = []
153

    
154
  def __init__(self, **kwargs):
155
    for k, v in kwargs.iteritems():
156
      setattr(self, k, v)
157

    
158
  def __getattr__(self, name):
159
    if name not in self._all_slots():
160
      raise AttributeError("Invalid object attribute %s.%s" %
161
                           (type(self).__name__, name))
162
    return None
163

    
164
  def __setstate__(self, state):
165
    slots = self._all_slots()
166
    for name in state:
167
      if name in slots:
168
        setattr(self, name, state[name])
169

    
170
  @classmethod
171
  def _all_slots(cls):
172
    """Compute the list of all declared slots for a class.
173

174
    """
175
    slots = []
176
    for parent in cls.__mro__:
177
      slots.extend(getattr(parent, "__slots__", []))
178
    return slots
179

    
180
  def ToDict(self):
181
    """Convert to a dict holding only standard python types.
182

183
    The generic routine just dumps all of this object's attributes in
184
    a dict. It does not work if the class has children who are
185
    ConfigObjects themselves (e.g. the nics list in an Instance), in
186
    which case the object should subclass the function in order to
187
    make sure all objects returned are only standard python types.
188

189
    """
190
    result = {}
191
    for name in self._all_slots():
192
      value = getattr(self, name, None)
193
      if value is not None:
194
        result[name] = value
195
    return result
196

    
197
  __getstate__ = ToDict
198

    
199
  @classmethod
200
  def FromDict(cls, val):
201
    """Create an object from a dictionary.
202

203
    This generic routine takes a dict, instantiates a new instance of
204
    the given class, and sets attributes based on the dict content.
205

206
    As for `ToDict`, this does not work if the class has children
207
    who are ConfigObjects themselves (e.g. the nics list in an
208
    Instance), in which case the object should subclass the function
209
    and alter the objects.
210

211
    """
212
    if not isinstance(val, dict):
213
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
214
                                      " expected dict, got %s" % type(val))
215
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
216
    obj = cls(**val_str) # pylint: disable=W0142
217
    return obj
218

    
219
  @staticmethod
220
  def _ContainerToDicts(container):
221
    """Convert the elements of a container to standard python types.
222

223
    This method converts a container with elements derived from
224
    ConfigData to standard python types. If the container is a dict,
225
    we don't touch the keys, only the values.
226

227
    """
228
    if isinstance(container, dict):
229
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
230
    elif isinstance(container, (list, tuple, set, frozenset)):
231
      ret = [elem.ToDict() for elem in container]
232
    else:
233
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
234
                      type(container))
235
    return ret
236

    
237
  @staticmethod
238
  def _ContainerFromDicts(source, c_type, e_type):
239
    """Convert a container from standard python types.
240

241
    This method converts a container with standard python types to
242
    ConfigData objects. If the container is a dict, we don't touch the
243
    keys, only the values.
244

245
    """
246
    if not isinstance(c_type, type):
247
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
248
                      " not a type" % type(c_type))
249
    if source is None:
250
      source = c_type()
251
    if c_type is dict:
252
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
253
    elif c_type in (list, tuple, set, frozenset):
254
      ret = c_type([e_type.FromDict(elem) for elem in source])
255
    else:
256
      raise TypeError("Invalid container type %s passed to"
257
                      " _ContainerFromDicts" % c_type)
258
    return ret
259

    
260
  def Copy(self):
261
    """Makes a deep copy of the current object and its children.
262

263
    """
264
    dict_form = self.ToDict()
265
    clone_obj = self.__class__.FromDict(dict_form)
266
    return clone_obj
267

    
268
  def __repr__(self):
269
    """Implement __repr__ for ConfigObjects."""
270
    return repr(self.ToDict())
271

    
272
  def UpgradeConfig(self):
273
    """Fill defaults for missing configuration values.
274

275
    This method will be called at configuration load time, and its
276
    implementation will be object dependent.
277

278
    """
279
    pass
280

    
281

    
282
class TaggableObject(ConfigObject):
283
  """An generic class supporting tags.
284

285
  """
286
  __slots__ = ["tags"]
287
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
288

    
289
  @classmethod
290
  def ValidateTag(cls, tag):
291
    """Check if a tag is valid.
292

293
    If the tag is invalid, an errors.TagError will be raised. The
294
    function has no return value.
295

296
    """
297
    if not isinstance(tag, basestring):
298
      raise errors.TagError("Invalid tag type (not a string)")
299
    if len(tag) > constants.MAX_TAG_LEN:
300
      raise errors.TagError("Tag too long (>%d characters)" %
301
                            constants.MAX_TAG_LEN)
302
    if not tag:
303
      raise errors.TagError("Tags cannot be empty")
304
    if not cls.VALID_TAG_RE.match(tag):
305
      raise errors.TagError("Tag contains invalid characters")
306

    
307
  def GetTags(self):
308
    """Return the tags list.
309

310
    """
311
    tags = getattr(self, "tags", None)
312
    if tags is None:
313
      tags = self.tags = set()
314
    return tags
315

    
316
  def AddTag(self, tag):
317
    """Add a new tag.
318

319
    """
320
    self.ValidateTag(tag)
321
    tags = self.GetTags()
322
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
323
      raise errors.TagError("Too many tags")
324
    self.GetTags().add(tag)
325

    
326
  def RemoveTag(self, tag):
327
    """Remove a tag.
328

329
    """
330
    self.ValidateTag(tag)
331
    tags = self.GetTags()
332
    try:
333
      tags.remove(tag)
334
    except KeyError:
335
      raise errors.TagError("Tag not found")
336

    
337
  def ToDict(self):
338
    """Taggable-object-specific conversion to standard python types.
339

340
    This replaces the tags set with a list.
341

342
    """
343
    bo = super(TaggableObject, self).ToDict()
344

    
345
    tags = bo.get("tags", None)
346
    if isinstance(tags, set):
347
      bo["tags"] = list(tags)
348
    return bo
349

    
350
  @classmethod
351
  def FromDict(cls, val):
352
    """Custom function for instances.
353

354
    """
355
    obj = super(TaggableObject, cls).FromDict(val)
356
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
357
      obj.tags = set(obj.tags)
358
    return obj
359

    
360

    
361
class MasterNetworkParameters(ConfigObject):
362
  """Network configuration parameters for the master
363

364
  @ivar name: master name
365
  @ivar ip: master IP
366
  @ivar netmask: master netmask
367
  @ivar netdev: master network device
368
  @ivar ip_family: master IP family
369

370
  """
371
  __slots__ = [
372
    "name",
373
    "ip",
374
    "netmask",
375
    "netdev",
376
    "ip_family"
377
    ]
378

    
379

    
380
class ConfigData(ConfigObject):
381
  """Top-level config object."""
382
  __slots__ = [
383
    "version",
384
    "cluster",
385
    "nodes",
386
    "nodegroups",
387
    "instances",
388
    "serial_no",
389
    ] + _TIMESTAMPS
390

    
391
  def ToDict(self):
392
    """Custom function for top-level config data.
393

394
    This just replaces the list of instances, nodes and the cluster
395
    with standard python types.
396

397
    """
398
    mydict = super(ConfigData, self).ToDict()
399
    mydict["cluster"] = mydict["cluster"].ToDict()
400
    for key in "nodes", "instances", "nodegroups":
401
      mydict[key] = self._ContainerToDicts(mydict[key])
402

    
403
    return mydict
404

    
405
  @classmethod
406
  def FromDict(cls, val):
407
    """Custom function for top-level config data
408

409
    """
410
    obj = super(ConfigData, cls).FromDict(val)
411
    obj.cluster = Cluster.FromDict(obj.cluster)
412
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
413
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
414
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
415
    return obj
416

    
417
  def HasAnyDiskOfType(self, dev_type):
418
    """Check if in there is at disk of the given type in the configuration.
419

420
    @type dev_type: L{constants.LDS_BLOCK}
421
    @param dev_type: the type to look for
422
    @rtype: boolean
423
    @return: boolean indicating if a disk of the given type was found or not
424

425
    """
426
    for instance in self.instances.values():
427
      for disk in instance.disks:
428
        if disk.IsBasedOnDiskType(dev_type):
429
          return True
430
    return False
431

    
432
  def UpgradeConfig(self):
433
    """Fill defaults for missing configuration values.
434

435
    """
436
    self.cluster.UpgradeConfig()
437
    for node in self.nodes.values():
438
      node.UpgradeConfig()
439
    for instance in self.instances.values():
440
      instance.UpgradeConfig()
441
    if self.nodegroups is None:
442
      self.nodegroups = {}
443
    for nodegroup in self.nodegroups.values():
444
      nodegroup.UpgradeConfig()
445
    if self.cluster.drbd_usermode_helper is None:
446
      # To decide if we set an helper let's check if at least one instance has
447
      # a DRBD disk. This does not cover all the possible scenarios but it
448
      # gives a good approximation.
449
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
450
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
451

    
452

    
453
class NIC(ConfigObject):
454
  """Config object representing a network card."""
455
  __slots__ = ["mac", "ip", "nicparams"]
456

    
457
  @classmethod
458
  def CheckParameterSyntax(cls, nicparams):
459
    """Check the given parameters for validity.
460

461
    @type nicparams:  dict
462
    @param nicparams: dictionary with parameter names/value
463
    @raise errors.ConfigurationError: when a parameter is not valid
464

465
    """
466
    if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
467
        nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
468
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
469
      raise errors.ConfigurationError(err)
470

    
471
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
472
        not nicparams[constants.NIC_LINK]):
473
      err = "Missing bridged nic link"
474
      raise errors.ConfigurationError(err)
475

    
476

    
477
class Disk(ConfigObject):
478
  """Config object representing a block device."""
479
  __slots__ = ["dev_type", "logical_id", "physical_id",
480
               "children", "iv_name", "size", "mode", "params"]
481

    
482
  def CreateOnSecondary(self):
483
    """Test if this device needs to be created on a secondary node."""
484
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
485

    
486
  def AssembleOnSecondary(self):
487
    """Test if this device needs to be assembled on a secondary node."""
488
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
489

    
490
  def OpenOnSecondary(self):
491
    """Test if this device needs to be opened on a secondary node."""
492
    return self.dev_type in (constants.LD_LV,)
493

    
494
  def StaticDevPath(self):
495
    """Return the device path if this device type has a static one.
496

497
    Some devices (LVM for example) live always at the same /dev/ path,
498
    irrespective of their status. For such devices, we return this
499
    path, for others we return None.
500

501
    @warning: The path returned is not a normalized pathname; callers
502
        should check that it is a valid path.
503

504
    """
505
    if self.dev_type == constants.LD_LV:
506
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
507
    elif self.dev_type == constants.LD_BLOCKDEV:
508
      return self.logical_id[1]
509
    return None
510

    
511
  def ChildrenNeeded(self):
512
    """Compute the needed number of children for activation.
513

514
    This method will return either -1 (all children) or a positive
515
    number denoting the minimum number of children needed for
516
    activation (only mirrored devices will usually return >=0).
517

518
    Currently, only DRBD8 supports diskless activation (therefore we
519
    return 0), for all other we keep the previous semantics and return
520
    -1.
521

522
    """
523
    if self.dev_type == constants.LD_DRBD8:
524
      return 0
525
    return -1
526

    
527
  def IsBasedOnDiskType(self, dev_type):
528
    """Check if the disk or its children are based on the given type.
529

530
    @type dev_type: L{constants.LDS_BLOCK}
531
    @param dev_type: the type to look for
532
    @rtype: boolean
533
    @return: boolean indicating if a device of the given type was found or not
534

535
    """
536
    if self.children:
537
      for child in self.children:
538
        if child.IsBasedOnDiskType(dev_type):
539
          return True
540
    return self.dev_type == dev_type
541

    
542
  def GetNodes(self, node):
543
    """This function returns the nodes this device lives on.
544

545
    Given the node on which the parent of the device lives on (or, in
546
    case of a top-level device, the primary node of the devices'
547
    instance), this function will return a list of nodes on which this
548
    devices needs to (or can) be assembled.
549

550
    """
551
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
552
                         constants.LD_BLOCKDEV]:
553
      result = [node]
554
    elif self.dev_type in constants.LDS_DRBD:
555
      result = [self.logical_id[0], self.logical_id[1]]
556
      if node not in result:
557
        raise errors.ConfigurationError("DRBD device passed unknown node")
558
    else:
559
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
560
    return result
561

    
562
  def ComputeNodeTree(self, parent_node):
563
    """Compute the node/disk tree for this disk and its children.
564

565
    This method, given the node on which the parent disk lives, will
566
    return the list of all (node, disk) pairs which describe the disk
567
    tree in the most compact way. For example, a drbd/lvm stack
568
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
569
    which represents all the top-level devices on the nodes.
570

571
    """
572
    my_nodes = self.GetNodes(parent_node)
573
    result = [(node, self) for node in my_nodes]
574
    if not self.children:
575
      # leaf device
576
      return result
577
    for node in my_nodes:
578
      for child in self.children:
579
        child_result = child.ComputeNodeTree(node)
580
        if len(child_result) == 1:
581
          # child (and all its descendants) is simple, doesn't split
582
          # over multiple hosts, so we don't need to describe it, our
583
          # own entry for this node describes it completely
584
          continue
585
        else:
586
          # check if child nodes differ from my nodes; note that
587
          # subdisk can differ from the child itself, and be instead
588
          # one of its descendants
589
          for subnode, subdisk in child_result:
590
            if subnode not in my_nodes:
591
              result.append((subnode, subdisk))
592
            # otherwise child is under our own node, so we ignore this
593
            # entry (but probably the other results in the list will
594
            # be different)
595
    return result
596

    
597
  def ComputeGrowth(self, amount):
598
    """Compute the per-VG growth requirements.
599

600
    This only works for VG-based disks.
601

602
    @type amount: integer
603
    @param amount: the desired increase in (user-visible) disk space
604
    @rtype: dict
605
    @return: a dictionary of volume-groups and the required size
606

607
    """
608
    if self.dev_type == constants.LD_LV:
609
      return {self.logical_id[0]: amount}
610
    elif self.dev_type == constants.LD_DRBD8:
611
      if self.children:
612
        return self.children[0].ComputeGrowth(amount)
613
      else:
614
        return {}
615
    else:
616
      # Other disk types do not require VG space
617
      return {}
618

    
619
  def RecordGrow(self, amount):
620
    """Update the size of this disk after growth.
621

622
    This method recurses over the disks's children and updates their
623
    size correspondigly. The method needs to be kept in sync with the
624
    actual algorithms from bdev.
625

626
    """
627
    if self.dev_type in (constants.LD_LV, constants.LD_FILE):
628
      self.size += amount
629
    elif self.dev_type == constants.LD_DRBD8:
630
      if self.children:
631
        self.children[0].RecordGrow(amount)
632
      self.size += amount
633
    else:
634
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
635
                                   " disk type %s" % self.dev_type)
636

    
637
  def UnsetSize(self):
638
    """Sets recursively the size to zero for the disk and its children.
639

640
    """
641
    if self.children:
642
      for child in self.children:
643
        child.UnsetSize()
644
    self.size = 0
645

    
646
  def SetPhysicalID(self, target_node, nodes_ip):
647
    """Convert the logical ID to the physical ID.
648

649
    This is used only for drbd, which needs ip/port configuration.
650

651
    The routine descends down and updates its children also, because
652
    this helps when the only the top device is passed to the remote
653
    node.
654

655
    Arguments:
656
      - target_node: the node we wish to configure for
657
      - nodes_ip: a mapping of node name to ip
658

659
    The target_node must exist in in nodes_ip, and must be one of the
660
    nodes in the logical ID for each of the DRBD devices encountered
661
    in the disk tree.
662

663
    """
664
    if self.children:
665
      for child in self.children:
666
        child.SetPhysicalID(target_node, nodes_ip)
667

    
668
    if self.logical_id is None and self.physical_id is not None:
669
      return
670
    if self.dev_type in constants.LDS_DRBD:
671
      pnode, snode, port, pminor, sminor, secret = self.logical_id
672
      if target_node not in (pnode, snode):
673
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
674
                                        target_node)
675
      pnode_ip = nodes_ip.get(pnode, None)
676
      snode_ip = nodes_ip.get(snode, None)
677
      if pnode_ip is None or snode_ip is None:
678
        raise errors.ConfigurationError("Can't find primary or secondary node"
679
                                        " for %s" % str(self))
680
      p_data = (pnode_ip, port)
681
      s_data = (snode_ip, port)
682
      if pnode == target_node:
683
        self.physical_id = p_data + s_data + (pminor, secret)
684
      else: # it must be secondary, we tested above
685
        self.physical_id = s_data + p_data + (sminor, secret)
686
    else:
687
      self.physical_id = self.logical_id
688
    return
689

    
690
  def ToDict(self):
691
    """Disk-specific conversion to standard python types.
692

693
    This replaces the children lists of objects with lists of
694
    standard python types.
695

696
    """
697
    bo = super(Disk, self).ToDict()
698

    
699
    for attr in ("children",):
700
      alist = bo.get(attr, None)
701
      if alist:
702
        bo[attr] = self._ContainerToDicts(alist)
703
    return bo
704

    
705
  @classmethod
706
  def FromDict(cls, val):
707
    """Custom function for Disks
708

709
    """
710
    obj = super(Disk, cls).FromDict(val)
711
    if obj.children:
712
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
713
    if obj.logical_id and isinstance(obj.logical_id, list):
714
      obj.logical_id = tuple(obj.logical_id)
715
    if obj.physical_id and isinstance(obj.physical_id, list):
716
      obj.physical_id = tuple(obj.physical_id)
717
    if obj.dev_type in constants.LDS_DRBD:
718
      # we need a tuple of length six here
719
      if len(obj.logical_id) < 6:
720
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
721
    return obj
722

    
723
  def __str__(self):
724
    """Custom str() formatter for disks.
725

726
    """
727
    if self.dev_type == constants.LD_LV:
728
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
729
    elif self.dev_type in constants.LDS_DRBD:
730
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
731
      val = "<DRBD8("
732
      if self.physical_id is None:
733
        phy = "unconfigured"
734
      else:
735
        phy = ("configured as %s:%s %s:%s" %
736
               (self.physical_id[0], self.physical_id[1],
737
                self.physical_id[2], self.physical_id[3]))
738

    
739
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
740
              (node_a, minor_a, node_b, minor_b, port, phy))
741
      if self.children and self.children.count(None) == 0:
742
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
743
      else:
744
        val += "no local storage"
745
    else:
746
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
747
             (self.dev_type, self.logical_id, self.physical_id, self.children))
748
    if self.iv_name is None:
749
      val += ", not visible"
750
    else:
751
      val += ", visible as /dev/%s" % self.iv_name
752
    if isinstance(self.size, int):
753
      val += ", size=%dm)>" % self.size
754
    else:
755
      val += ", size='%s')>" % (self.size,)
756
    return val
757

    
758
  def Verify(self):
759
    """Checks that this disk is correctly configured.
760

761
    """
762
    all_errors = []
763
    if self.mode not in constants.DISK_ACCESS_SET:
764
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
765
    return all_errors
766

    
767
  def UpgradeConfig(self):
768
    """Fill defaults for missing configuration values.
769

770
    """
771
    if self.children:
772
      for child in self.children:
773
        child.UpgradeConfig()
774

    
775
    if not self.params:
776
      self.params = constants.DISK_LD_DEFAULTS[self.dev_type].copy()
777
    else:
778
      self.params = FillDict(constants.DISK_LD_DEFAULTS[self.dev_type],
779
                             self.params)
780
    # add here config upgrade for this disk
781

    
782

    
783
class Instance(TaggableObject):
784
  """Config object representing an instance."""
785
  __slots__ = [
786
    "name",
787
    "primary_node",
788
    "os",
789
    "hypervisor",
790
    "hvparams",
791
    "beparams",
792
    "osparams",
793
    "admin_state",
794
    "nics",
795
    "disks",
796
    "disk_template",
797
    "network_port",
798
    "serial_no",
799
    ] + _TIMESTAMPS + _UUID
800

    
801
  def _ComputeSecondaryNodes(self):
802
    """Compute the list of secondary nodes.
803

804
    This is a simple wrapper over _ComputeAllNodes.
805

806
    """
807
    all_nodes = set(self._ComputeAllNodes())
808
    all_nodes.discard(self.primary_node)
809
    return tuple(all_nodes)
810

    
811
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
812
                             "List of secondary nodes")
813

    
814
  def _ComputeAllNodes(self):
815
    """Compute the list of all nodes.
816

817
    Since the data is already there (in the drbd disks), keeping it as
818
    a separate normal attribute is redundant and if not properly
819
    synchronised can cause problems. Thus it's better to compute it
820
    dynamically.
821

822
    """
823
    def _Helper(nodes, device):
824
      """Recursively computes nodes given a top device."""
825
      if device.dev_type in constants.LDS_DRBD:
826
        nodea, nodeb = device.logical_id[:2]
827
        nodes.add(nodea)
828
        nodes.add(nodeb)
829
      if device.children:
830
        for child in device.children:
831
          _Helper(nodes, child)
832

    
833
    all_nodes = set()
834
    all_nodes.add(self.primary_node)
835
    for device in self.disks:
836
      _Helper(all_nodes, device)
837
    return tuple(all_nodes)
838

    
839
  all_nodes = property(_ComputeAllNodes, None, None,
840
                       "List of all nodes of the instance")
841

    
842
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
843
    """Provide a mapping of nodes to LVs this instance owns.
844

845
    This function figures out what logical volumes should belong on
846
    which nodes, recursing through a device tree.
847

848
    @param lvmap: optional dictionary to receive the
849
        'node' : ['lv', ...] data.
850

851
    @return: None if lvmap arg is given, otherwise, a dictionary of
852
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
853
        volumeN is of the form "vg_name/lv_name", compatible with
854
        GetVolumeList()
855

856
    """
857
    if node == None:
858
      node = self.primary_node
859

    
860
    if lvmap is None:
861
      lvmap = {
862
        node: [],
863
        }
864
      ret = lvmap
865
    else:
866
      if not node in lvmap:
867
        lvmap[node] = []
868
      ret = None
869

    
870
    if not devs:
871
      devs = self.disks
872

    
873
    for dev in devs:
874
      if dev.dev_type == constants.LD_LV:
875
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
876

    
877
      elif dev.dev_type in constants.LDS_DRBD:
878
        if dev.children:
879
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
880
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
881

    
882
      elif dev.children:
883
        self.MapLVsByNode(lvmap, dev.children, node)
884

    
885
    return ret
886

    
887
  def FindDisk(self, idx):
888
    """Find a disk given having a specified index.
889

890
    This is just a wrapper that does validation of the index.
891

892
    @type idx: int
893
    @param idx: the disk index
894
    @rtype: L{Disk}
895
    @return: the corresponding disk
896
    @raise errors.OpPrereqError: when the given index is not valid
897

898
    """
899
    try:
900
      idx = int(idx)
901
      return self.disks[idx]
902
    except (TypeError, ValueError), err:
903
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
904
                                 errors.ECODE_INVAL)
905
    except IndexError:
906
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
907
                                 " 0 to %d" % (idx, len(self.disks) - 1),
908
                                 errors.ECODE_INVAL)
909

    
910
  def ToDict(self):
911
    """Instance-specific conversion to standard python types.
912

913
    This replaces the children lists of objects with lists of standard
914
    python types.
915

916
    """
917
    bo = super(Instance, self).ToDict()
918

    
919
    for attr in "nics", "disks":
920
      alist = bo.get(attr, None)
921
      if alist:
922
        nlist = self._ContainerToDicts(alist)
923
      else:
924
        nlist = []
925
      bo[attr] = nlist
926
    return bo
927

    
928
  @classmethod
929
  def FromDict(cls, val):
930
    """Custom function for instances.
931

932
    """
933
    if "admin_state" not in val:
934
      if val.get("admin_up", False):
935
        val["admin_state"] = constants.ADMINST_UP
936
      else:
937
        val["admin_state"] = constants.ADMINST_DOWN
938
    if "admin_up" in val:
939
      del val["admin_up"]
940
    obj = super(Instance, cls).FromDict(val)
941
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
942
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
943
    return obj
944

    
945
  def UpgradeConfig(self):
946
    """Fill defaults for missing configuration values.
947

948
    """
949
    for nic in self.nics:
950
      nic.UpgradeConfig()
951
    for disk in self.disks:
952
      disk.UpgradeConfig()
953
    if self.hvparams:
954
      for key in constants.HVC_GLOBALS:
955
        try:
956
          del self.hvparams[key]
957
        except KeyError:
958
          pass
959
    if self.osparams is None:
960
      self.osparams = {}
961
    UpgradeBeParams(self.beparams)
962

    
963

    
964
class OS(ConfigObject):
965
  """Config object representing an operating system.
966

967
  @type supported_parameters: list
968
  @ivar supported_parameters: a list of tuples, name and description,
969
      containing the supported parameters by this OS
970

971
  @type VARIANT_DELIM: string
972
  @cvar VARIANT_DELIM: the variant delimiter
973

974
  """
975
  __slots__ = [
976
    "name",
977
    "path",
978
    "api_versions",
979
    "create_script",
980
    "export_script",
981
    "import_script",
982
    "rename_script",
983
    "verify_script",
984
    "supported_variants",
985
    "supported_parameters",
986
    ]
987

    
988
  VARIANT_DELIM = "+"
989

    
990
  @classmethod
991
  def SplitNameVariant(cls, name):
992
    """Splits the name into the proper name and variant.
993

994
    @param name: the OS (unprocessed) name
995
    @rtype: list
996
    @return: a list of two elements; if the original name didn't
997
        contain a variant, it's returned as an empty string
998

999
    """
1000
    nv = name.split(cls.VARIANT_DELIM, 1)
1001
    if len(nv) == 1:
1002
      nv.append("")
1003
    return nv
1004

    
1005
  @classmethod
1006
  def GetName(cls, name):
1007
    """Returns the proper name of the os (without the variant).
1008

1009
    @param name: the OS (unprocessed) name
1010

1011
    """
1012
    return cls.SplitNameVariant(name)[0]
1013

    
1014
  @classmethod
1015
  def GetVariant(cls, name):
1016
    """Returns the variant the os (without the base name).
1017

1018
    @param name: the OS (unprocessed) name
1019

1020
    """
1021
    return cls.SplitNameVariant(name)[1]
1022

    
1023

    
1024
class NodeHvState(ConfigObject):
1025
  """Hypvervisor state on a node.
1026

1027
  @ivar mem_total: Total amount of memory
1028
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1029
    available)
1030
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1031
    rounding
1032
  @ivar mem_inst: Memory used by instances living on node
1033
  @ivar cpu_total: Total node CPU core count
1034
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1035

1036
  """
1037
  __slots__ = [
1038
    "mem_total",
1039
    "mem_node",
1040
    "mem_hv",
1041
    "mem_inst",
1042
    "cpu_total",
1043
    "cpu_node",
1044
    ] + _TIMESTAMPS
1045

    
1046

    
1047
class NodeDiskState(ConfigObject):
1048
  """Disk state on a node.
1049

1050
  """
1051
  __slots__ = [
1052
    "total",
1053
    "reserved",
1054
    "overhead",
1055
    ] + _TIMESTAMPS
1056

    
1057

    
1058
class Node(TaggableObject):
1059
  """Config object representing a node.
1060

1061
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1062
  @ivar hv_state_static: Hypervisor state overriden by user
1063
  @ivar disk_state: Disk state (e.g. free space)
1064
  @ivar disk_state_static: Disk state overriden by user
1065

1066
  """
1067
  __slots__ = [
1068
    "name",
1069
    "primary_ip",
1070
    "secondary_ip",
1071
    "serial_no",
1072
    "master_candidate",
1073
    "offline",
1074
    "drained",
1075
    "group",
1076
    "master_capable",
1077
    "vm_capable",
1078
    "ndparams",
1079
    "powered",
1080
    "hv_state",
1081
    "hv_state_static",
1082
    "disk_state",
1083
    "disk_state_static",
1084
    ] + _TIMESTAMPS + _UUID
1085

    
1086
  def UpgradeConfig(self):
1087
    """Fill defaults for missing configuration values.
1088

1089
    """
1090
    # pylint: disable=E0203
1091
    # because these are "defined" via slots, not manually
1092
    if self.master_capable is None:
1093
      self.master_capable = True
1094

    
1095
    if self.vm_capable is None:
1096
      self.vm_capable = True
1097

    
1098
    if self.ndparams is None:
1099
      self.ndparams = {}
1100

    
1101
    if self.powered is None:
1102
      self.powered = True
1103

    
1104
  def ToDict(self):
1105
    """Custom function for serializing.
1106

1107
    """
1108
    data = super(Node, self).ToDict()
1109

    
1110
    hv_state = data.get("hv_state", None)
1111
    if hv_state is not None:
1112
      data["hv_state"] = self._ContainerToDicts(hv_state)
1113

    
1114
    disk_state = data.get("disk_state", None)
1115
    if disk_state is not None:
1116
      data["disk_state"] = \
1117
        dict((key, self._ContainerToDicts(value))
1118
             for (key, value) in disk_state.items())
1119

    
1120
    return data
1121

    
1122
  @classmethod
1123
  def FromDict(cls, val):
1124
    """Custom function for deserializing.
1125

1126
    """
1127
    obj = super(Node, cls).FromDict(val)
1128

    
1129
    if obj.hv_state is not None:
1130
      obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1131

    
1132
    if obj.disk_state is not None:
1133
      obj.disk_state = \
1134
        dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
1135
             for (key, value) in obj.disk_state.items())
1136

    
1137
    return obj
1138

    
1139

    
1140
class NodeGroup(TaggableObject):
1141
  """Config object representing a node group."""
1142
  __slots__ = [
1143
    "name",
1144
    "members",
1145
    "ndparams",
1146
    "diskparams",
1147
    "serial_no",
1148
    "alloc_policy",
1149
    ] + _TIMESTAMPS + _UUID
1150

    
1151
  def ToDict(self):
1152
    """Custom function for nodegroup.
1153

1154
    This discards the members object, which gets recalculated and is only kept
1155
    in memory.
1156

1157
    """
1158
    mydict = super(NodeGroup, self).ToDict()
1159
    del mydict["members"]
1160
    return mydict
1161

    
1162
  @classmethod
1163
  def FromDict(cls, val):
1164
    """Custom function for nodegroup.
1165

1166
    The members slot is initialized to an empty list, upon deserialization.
1167

1168
    """
1169
    obj = super(NodeGroup, cls).FromDict(val)
1170
    obj.members = []
1171
    return obj
1172

    
1173
  def UpgradeConfig(self):
1174
    """Fill defaults for missing configuration values.
1175

1176
    """
1177
    if self.ndparams is None:
1178
      self.ndparams = {}
1179

    
1180
    if self.serial_no is None:
1181
      self.serial_no = 1
1182

    
1183
    if self.alloc_policy is None:
1184
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1185

    
1186
    # We only update mtime, and not ctime, since we would not be able to provide
1187
    # a correct value for creation time.
1188
    if self.mtime is None:
1189
      self.mtime = time.time()
1190

    
1191
    self.diskparams = UpgradeDiskParams(self.diskparams)
1192

    
1193
  def FillND(self, node):
1194
    """Return filled out ndparams for L{objects.Node}
1195

1196
    @type node: L{objects.Node}
1197
    @param node: A Node object to fill
1198
    @return a copy of the node's ndparams with defaults filled
1199

1200
    """
1201
    return self.SimpleFillND(node.ndparams)
1202

    
1203
  def SimpleFillND(self, ndparams):
1204
    """Fill a given ndparams dict with defaults.
1205

1206
    @type ndparams: dict
1207
    @param ndparams: the dict to fill
1208
    @rtype: dict
1209
    @return: a copy of the passed in ndparams with missing keys filled
1210
        from the node group defaults
1211

1212
    """
1213
    return FillDict(self.ndparams, ndparams)
1214

    
1215

    
1216
class Cluster(TaggableObject):
1217
  """Config object representing the cluster."""
1218
  __slots__ = [
1219
    "serial_no",
1220
    "rsahostkeypub",
1221
    "highest_used_port",
1222
    "tcpudp_port_pool",
1223
    "mac_prefix",
1224
    "volume_group_name",
1225
    "reserved_lvs",
1226
    "drbd_usermode_helper",
1227
    "default_bridge",
1228
    "default_hypervisor",
1229
    "master_node",
1230
    "master_ip",
1231
    "master_netdev",
1232
    "master_netmask",
1233
    "use_external_mip_script",
1234
    "cluster_name",
1235
    "file_storage_dir",
1236
    "shared_file_storage_dir",
1237
    "enabled_hypervisors",
1238
    "hvparams",
1239
    "os_hvp",
1240
    "beparams",
1241
    "osparams",
1242
    "nicparams",
1243
    "ndparams",
1244
    "diskparams",
1245
    "candidate_pool_size",
1246
    "modify_etc_hosts",
1247
    "modify_ssh_setup",
1248
    "maintain_node_health",
1249
    "uid_pool",
1250
    "default_iallocator",
1251
    "hidden_os",
1252
    "blacklisted_os",
1253
    "primary_ip_family",
1254
    "prealloc_wipe_disks",
1255
    ] + _TIMESTAMPS + _UUID
1256

    
1257
  def UpgradeConfig(self):
1258
    """Fill defaults for missing configuration values.
1259

1260
    """
1261
    # pylint: disable=E0203
1262
    # because these are "defined" via slots, not manually
1263
    if self.hvparams is None:
1264
      self.hvparams = constants.HVC_DEFAULTS
1265
    else:
1266
      for hypervisor in self.hvparams:
1267
        self.hvparams[hypervisor] = FillDict(
1268
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1269

    
1270
    if self.os_hvp is None:
1271
      self.os_hvp = {}
1272

    
1273
    # osparams added before 2.2
1274
    if self.osparams is None:
1275
      self.osparams = {}
1276

    
1277
    if self.ndparams is None:
1278
      self.ndparams = constants.NDC_DEFAULTS
1279

    
1280
    self.beparams = UpgradeGroupedParams(self.beparams,
1281
                                         constants.BEC_DEFAULTS)
1282
    for beparams_group in self.beparams:
1283
      UpgradeBeParams(self.beparams[beparams_group])
1284

    
1285
    migrate_default_bridge = not self.nicparams
1286
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1287
                                          constants.NICC_DEFAULTS)
1288
    if migrate_default_bridge:
1289
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1290
        self.default_bridge
1291

    
1292
    if self.modify_etc_hosts is None:
1293
      self.modify_etc_hosts = True
1294

    
1295
    if self.modify_ssh_setup is None:
1296
      self.modify_ssh_setup = True
1297

    
1298
    # default_bridge is no longer used in 2.1. The slot is left there to
1299
    # support auto-upgrading. It can be removed once we decide to deprecate
1300
    # upgrading straight from 2.0.
1301
    if self.default_bridge is not None:
1302
      self.default_bridge = None
1303

    
1304
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1305
    # code can be removed once upgrading straight from 2.0 is deprecated.
1306
    if self.default_hypervisor is not None:
1307
      self.enabled_hypervisors = ([self.default_hypervisor] +
1308
        [hvname for hvname in self.enabled_hypervisors
1309
         if hvname != self.default_hypervisor])
1310
      self.default_hypervisor = None
1311

    
1312
    # maintain_node_health added after 2.1.1
1313
    if self.maintain_node_health is None:
1314
      self.maintain_node_health = False
1315

    
1316
    if self.uid_pool is None:
1317
      self.uid_pool = []
1318

    
1319
    if self.default_iallocator is None:
1320
      self.default_iallocator = ""
1321

    
1322
    # reserved_lvs added before 2.2
1323
    if self.reserved_lvs is None:
1324
      self.reserved_lvs = []
1325

    
1326
    # hidden and blacklisted operating systems added before 2.2.1
1327
    if self.hidden_os is None:
1328
      self.hidden_os = []
1329

    
1330
    if self.blacklisted_os is None:
1331
      self.blacklisted_os = []
1332

    
1333
    # primary_ip_family added before 2.3
1334
    if self.primary_ip_family is None:
1335
      self.primary_ip_family = AF_INET
1336

    
1337
    if self.master_netmask is None:
1338
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1339
      self.master_netmask = ipcls.iplen
1340

    
1341
    if self.prealloc_wipe_disks is None:
1342
      self.prealloc_wipe_disks = False
1343

    
1344
    # shared_file_storage_dir added before 2.5
1345
    if self.shared_file_storage_dir is None:
1346
      self.shared_file_storage_dir = ""
1347

    
1348
    if self.use_external_mip_script is None:
1349
      self.use_external_mip_script = False
1350

    
1351
    self.diskparams = UpgradeDiskParams(self.diskparams)
1352

    
1353
  def ToDict(self):
1354
    """Custom function for cluster.
1355

1356
    """
1357
    mydict = super(Cluster, self).ToDict()
1358
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1359
    return mydict
1360

    
1361
  @classmethod
1362
  def FromDict(cls, val):
1363
    """Custom function for cluster.
1364

1365
    """
1366
    obj = super(Cluster, cls).FromDict(val)
1367
    if not isinstance(obj.tcpudp_port_pool, set):
1368
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1369
    return obj
1370

    
1371
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1372
    """Get the default hypervisor parameters for the cluster.
1373

1374
    @param hypervisor: the hypervisor name
1375
    @param os_name: if specified, we'll also update the defaults for this OS
1376
    @param skip_keys: if passed, list of keys not to use
1377
    @return: the defaults dict
1378

1379
    """
1380
    if skip_keys is None:
1381
      skip_keys = []
1382

    
1383
    fill_stack = [self.hvparams.get(hypervisor, {})]
1384
    if os_name is not None:
1385
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1386
      fill_stack.append(os_hvp)
1387

    
1388
    ret_dict = {}
1389
    for o_dict in fill_stack:
1390
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1391

    
1392
    return ret_dict
1393

    
1394
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1395
    """Fill a given hvparams dict with cluster defaults.
1396

1397
    @type hv_name: string
1398
    @param hv_name: the hypervisor to use
1399
    @type os_name: string
1400
    @param os_name: the OS to use for overriding the hypervisor defaults
1401
    @type skip_globals: boolean
1402
    @param skip_globals: if True, the global hypervisor parameters will
1403
        not be filled
1404
    @rtype: dict
1405
    @return: a copy of the given hvparams with missing keys filled from
1406
        the cluster defaults
1407

1408
    """
1409
    if skip_globals:
1410
      skip_keys = constants.HVC_GLOBALS
1411
    else:
1412
      skip_keys = []
1413

    
1414
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1415
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1416

    
1417
  def FillHV(self, instance, skip_globals=False):
1418
    """Fill an instance's hvparams dict with cluster defaults.
1419

1420
    @type instance: L{objects.Instance}
1421
    @param instance: the instance parameter to fill
1422
    @type skip_globals: boolean
1423
    @param skip_globals: if True, the global hypervisor parameters will
1424
        not be filled
1425
    @rtype: dict
1426
    @return: a copy of the instance's hvparams with missing keys filled from
1427
        the cluster defaults
1428

1429
    """
1430
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1431
                             instance.hvparams, skip_globals)
1432

    
1433
  def SimpleFillBE(self, beparams):
1434
    """Fill a given beparams dict with cluster defaults.
1435

1436
    @type beparams: dict
1437
    @param beparams: the dict to fill
1438
    @rtype: dict
1439
    @return: a copy of the passed in beparams with missing keys filled
1440
        from the cluster defaults
1441

1442
    """
1443
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1444

    
1445
  def FillBE(self, instance):
1446
    """Fill an instance's beparams dict with cluster defaults.
1447

1448
    @type instance: L{objects.Instance}
1449
    @param instance: the instance parameter to fill
1450
    @rtype: dict
1451
    @return: a copy of the instance's beparams with missing keys filled from
1452
        the cluster defaults
1453

1454
    """
1455
    return self.SimpleFillBE(instance.beparams)
1456

    
1457
  def SimpleFillNIC(self, nicparams):
1458
    """Fill a given nicparams dict with cluster defaults.
1459

1460
    @type nicparams: dict
1461
    @param nicparams: the dict to fill
1462
    @rtype: dict
1463
    @return: a copy of the passed in nicparams with missing keys filled
1464
        from the cluster defaults
1465

1466
    """
1467
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1468

    
1469
  def SimpleFillOS(self, os_name, os_params):
1470
    """Fill an instance's osparams dict with cluster defaults.
1471

1472
    @type os_name: string
1473
    @param os_name: the OS name to use
1474
    @type os_params: dict
1475
    @param os_params: the dict to fill with default values
1476
    @rtype: dict
1477
    @return: a copy of the instance's osparams with missing keys filled from
1478
        the cluster defaults
1479

1480
    """
1481
    name_only = os_name.split("+", 1)[0]
1482
    # base OS
1483
    result = self.osparams.get(name_only, {})
1484
    # OS with variant
1485
    result = FillDict(result, self.osparams.get(os_name, {}))
1486
    # specified params
1487
    return FillDict(result, os_params)
1488

    
1489
  def FillND(self, node, nodegroup):
1490
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1491

1492
    @type node: L{objects.Node}
1493
    @param node: A Node object to fill
1494
    @type nodegroup: L{objects.NodeGroup}
1495
    @param nodegroup: A Node object to fill
1496
    @return a copy of the node's ndparams with defaults filled
1497

1498
    """
1499
    return self.SimpleFillND(nodegroup.FillND(node))
1500

    
1501
  def SimpleFillND(self, ndparams):
1502
    """Fill a given ndparams dict with defaults.
1503

1504
    @type ndparams: dict
1505
    @param ndparams: the dict to fill
1506
    @rtype: dict
1507
    @return: a copy of the passed in ndparams with missing keys filled
1508
        from the cluster defaults
1509

1510
    """
1511
    return FillDict(self.ndparams, ndparams)
1512

    
1513

    
1514
class BlockDevStatus(ConfigObject):
1515
  """Config object representing the status of a block device."""
1516
  __slots__ = [
1517
    "dev_path",
1518
    "major",
1519
    "minor",
1520
    "sync_percent",
1521
    "estimated_time",
1522
    "is_degraded",
1523
    "ldisk_status",
1524
    ]
1525

    
1526

    
1527
class ImportExportStatus(ConfigObject):
1528
  """Config object representing the status of an import or export."""
1529
  __slots__ = [
1530
    "recent_output",
1531
    "listen_port",
1532
    "connected",
1533
    "progress_mbytes",
1534
    "progress_throughput",
1535
    "progress_eta",
1536
    "progress_percent",
1537
    "exit_status",
1538
    "error_message",
1539
    ] + _TIMESTAMPS
1540

    
1541

    
1542
class ImportExportOptions(ConfigObject):
1543
  """Options for import/export daemon
1544

1545
  @ivar key_name: X509 key name (None for cluster certificate)
1546
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1547
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1548
  @ivar magic: Used to ensure the connection goes to the right disk
1549
  @ivar ipv6: Whether to use IPv6
1550
  @ivar connect_timeout: Number of seconds for establishing connection
1551

1552
  """
1553
  __slots__ = [
1554
    "key_name",
1555
    "ca_pem",
1556
    "compress",
1557
    "magic",
1558
    "ipv6",
1559
    "connect_timeout",
1560
    ]
1561

    
1562

    
1563
class ConfdRequest(ConfigObject):
1564
  """Object holding a confd request.
1565

1566
  @ivar protocol: confd protocol version
1567
  @ivar type: confd query type
1568
  @ivar query: query request
1569
  @ivar rsalt: requested reply salt
1570

1571
  """
1572
  __slots__ = [
1573
    "protocol",
1574
    "type",
1575
    "query",
1576
    "rsalt",
1577
    ]
1578

    
1579

    
1580
class ConfdReply(ConfigObject):
1581
  """Object holding a confd reply.
1582

1583
  @ivar protocol: confd protocol version
1584
  @ivar status: reply status code (ok, error)
1585
  @ivar answer: confd query reply
1586
  @ivar serial: configuration serial number
1587

1588
  """
1589
  __slots__ = [
1590
    "protocol",
1591
    "status",
1592
    "answer",
1593
    "serial",
1594
    ]
1595

    
1596

    
1597
class QueryFieldDefinition(ConfigObject):
1598
  """Object holding a query field definition.
1599

1600
  @ivar name: Field name
1601
  @ivar title: Human-readable title
1602
  @ivar kind: Field type
1603
  @ivar doc: Human-readable description
1604

1605
  """
1606
  __slots__ = [
1607
    "name",
1608
    "title",
1609
    "kind",
1610
    "doc",
1611
    ]
1612

    
1613

    
1614
class _QueryResponseBase(ConfigObject):
1615
  __slots__ = [
1616
    "fields",
1617
    ]
1618

    
1619
  def ToDict(self):
1620
    """Custom function for serializing.
1621

1622
    """
1623
    mydict = super(_QueryResponseBase, self).ToDict()
1624
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1625
    return mydict
1626

    
1627
  @classmethod
1628
  def FromDict(cls, val):
1629
    """Custom function for de-serializing.
1630

1631
    """
1632
    obj = super(_QueryResponseBase, cls).FromDict(val)
1633
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1634
    return obj
1635

    
1636

    
1637
class QueryRequest(ConfigObject):
1638
  """Object holding a query request.
1639

1640
  """
1641
  __slots__ = [
1642
    "what",
1643
    "fields",
1644
    "qfilter",
1645
    ]
1646

    
1647

    
1648
class QueryResponse(_QueryResponseBase):
1649
  """Object holding the response to a query.
1650

1651
  @ivar fields: List of L{QueryFieldDefinition} objects
1652
  @ivar data: Requested data
1653

1654
  """
1655
  __slots__ = [
1656
    "data",
1657
    ]
1658

    
1659

    
1660
class QueryFieldsRequest(ConfigObject):
1661
  """Object holding a request for querying available fields.
1662

1663
  """
1664
  __slots__ = [
1665
    "what",
1666
    "fields",
1667
    ]
1668

    
1669

    
1670
class QueryFieldsResponse(_QueryResponseBase):
1671
  """Object holding the response to a query for fields.
1672

1673
  @ivar fields: List of L{QueryFieldDefinition} objects
1674

1675
  """
1676
  __slots__ = [
1677
    ]
1678

    
1679

    
1680
class MigrationStatus(ConfigObject):
1681
  """Object holding the status of a migration.
1682

1683
  """
1684
  __slots__ = [
1685
    "status",
1686
    "transferred_ram",
1687
    "total_ram",
1688
    ]
1689

    
1690

    
1691
class InstanceConsole(ConfigObject):
1692
  """Object describing how to access the console of an instance.
1693

1694
  """
1695
  __slots__ = [
1696
    "instance",
1697
    "kind",
1698
    "message",
1699
    "host",
1700
    "port",
1701
    "user",
1702
    "command",
1703
    "display",
1704
    ]
1705

    
1706
  def Validate(self):
1707
    """Validates contents of this object.
1708

1709
    """
1710
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1711
    assert self.instance, "Missing instance name"
1712
    assert self.message or self.kind in [constants.CONS_SSH,
1713
                                         constants.CONS_SPICE,
1714
                                         constants.CONS_VNC]
1715
    assert self.host or self.kind == constants.CONS_MESSAGE
1716
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1717
                                      constants.CONS_SSH]
1718
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1719
                                      constants.CONS_SPICE,
1720
                                      constants.CONS_VNC]
1721
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1722
                                         constants.CONS_SPICE,
1723
                                         constants.CONS_VNC]
1724
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1725
                                         constants.CONS_SPICE,
1726
                                         constants.CONS_SSH]
1727
    return True
1728

    
1729

    
1730
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1731
  """Simple wrapper over ConfigParse that allows serialization.
1732

1733
  This class is basically ConfigParser.SafeConfigParser with two
1734
  additional methods that allow it to serialize/unserialize to/from a
1735
  buffer.
1736

1737
  """
1738
  def Dumps(self):
1739
    """Dump this instance and return the string representation."""
1740
    buf = StringIO()
1741
    self.write(buf)
1742
    return buf.getvalue()
1743

    
1744
  @classmethod
1745
  def Loads(cls, data):
1746
    """Load data from a string."""
1747
    buf = StringIO(data)
1748
    cfp = cls()
1749
    cfp.readfp(buf)
1750
    return cfp