Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 2a27dac3

History | View | Annotate | Download (56.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import time
42
from cStringIO import StringIO
43

    
44
from ganeti import errors
45
from ganeti import constants
46
from ganeti import netutils
47
from ganeti import utils
48

    
49
from socket import AF_INET
50

    
51

    
52
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
53
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
54

    
55
_TIMESTAMPS = ["ctime", "mtime"]
56
_UUID = ["uuid"]
57

    
58
# constants used to create InstancePolicy dictionary
59
TISPECS_GROUP_TYPES = {
60
  constants.ISPECS_MIN: constants.VTYPE_INT,
61
  constants.ISPECS_MAX: constants.VTYPE_INT,
62
  }
63

    
64
TISPECS_CLUSTER_TYPES = {
65
  constants.ISPECS_MIN: constants.VTYPE_INT,
66
  constants.ISPECS_MAX: constants.VTYPE_INT,
67
  constants.ISPECS_STD: constants.VTYPE_INT,
68
  }
69

    
70

    
71
def FillDict(defaults_dict, custom_dict, skip_keys=None):
72
  """Basic function to apply settings on top a default dict.
73

74
  @type defaults_dict: dict
75
  @param defaults_dict: dictionary holding the default values
76
  @type custom_dict: dict
77
  @param custom_dict: dictionary holding customized value
78
  @type skip_keys: list
79
  @param skip_keys: which keys not to fill
80
  @rtype: dict
81
  @return: dict with the 'full' values
82

83
  """
84
  ret_dict = copy.deepcopy(defaults_dict)
85
  ret_dict.update(custom_dict)
86
  if skip_keys:
87
    for k in skip_keys:
88
      try:
89
        del ret_dict[k]
90
      except KeyError:
91
        pass
92
  return ret_dict
93

    
94

    
95
def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
96
  """Fills an instance policy with defaults.
97

98
  """
99
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
100
  ret_dict = {}
101
  for key in constants.IPOLICY_ISPECS:
102
    ret_dict[key] = FillDict(default_ipolicy[key],
103
                             custom_ipolicy.get(key, {}),
104
                             skip_keys=skip_keys)
105
  # list items
106
  for key in [constants.IPOLICY_DTS]:
107
    ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
108
  # other items which we know we can directly copy (immutables)
109
  for key in constants.IPOLICY_PARAMETERS:
110
    ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
111

    
112
  return ret_dict
113

    
114

    
115
def UpgradeGroupedParams(target, defaults):
116
  """Update all groups for the target parameter.
117

118
  @type target: dict of dicts
119
  @param target: {group: {parameter: value}}
120
  @type defaults: dict
121
  @param defaults: default parameter values
122

123
  """
124
  if target is None:
125
    target = {constants.PP_DEFAULT: defaults}
126
  else:
127
    for group in target:
128
      target[group] = FillDict(defaults, target[group])
129
  return target
130

    
131

    
132
def UpgradeBeParams(target):
133
  """Update the be parameters dict to the new format.
134

135
  @type target: dict
136
  @param target: "be" parameters dict
137

138
  """
139
  if constants.BE_MEMORY in target:
140
    memory = target[constants.BE_MEMORY]
141
    target[constants.BE_MAXMEM] = memory
142
    target[constants.BE_MINMEM] = memory
143
    del target[constants.BE_MEMORY]
144

    
145

    
146
def UpgradeDiskParams(diskparams):
147
  """Upgrade the disk parameters.
148

149
  @type diskparams: dict
150
  @param diskparams: disk parameters to upgrade
151
  @rtype: dict
152
  @return: the upgraded disk parameters dict
153

154
  """
155
  if diskparams is None:
156
    result = constants.DISK_DT_DEFAULTS.copy()
157
  else:
158
    # Update the disk parameter values for each disk template.
159
    # The code iterates over constants.DISK_TEMPLATES because new templates
160
    # might have been added.
161
    result = dict((dt, FillDict(constants.DISK_DT_DEFAULTS[dt],
162
                                diskparams.get(dt, {})))
163
                  for dt in constants.DISK_TEMPLATES)
164

    
165
  return result
166

    
167

    
168
def UpgradeNDParams(ndparams):
169
  """Upgrade ndparams structure.
170

171
  @type ndparams: dict
172
  @param ndparams: disk parameters to upgrade
173
  @rtype: dict
174
  @return: the upgraded node parameters dict
175

176
  """
177
  if ndparams is None:
178
    ndparams = {}
179

    
180
  return FillDict(constants.NDC_DEFAULTS, ndparams)
181

    
182

    
183
def MakeEmptyIPolicy():
184
  """Create empty IPolicy dictionary.
185

186
  """
187
  return dict([
188
    (constants.ISPECS_MIN, {}),
189
    (constants.ISPECS_MAX, {}),
190
    (constants.ISPECS_STD, {}),
191
    ])
192

    
193

    
194
def CreateIPolicyFromOpts(ispecs_mem_size=None,
195
                          ispecs_cpu_count=None,
196
                          ispecs_disk_count=None,
197
                          ispecs_disk_size=None,
198
                          ispecs_nic_count=None,
199
                          ipolicy_disk_templates=None,
200
                          ipolicy_vcpu_ratio=None,
201
                          group_ipolicy=False,
202
                          allowed_values=None,
203
                          fill_all=False):
204
  """Creation of instance policy based on command line options.
205

206
  @param fill_all: whether for cluster policies we should ensure that
207
    all values are filled
208

209

210
  """
211
  # prepare ipolicy dict
212
  ipolicy_transposed = {
213
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
214
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
215
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
216
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
217
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
218
    }
219

    
220
  # first, check that the values given are correct
221
  if group_ipolicy:
222
    forced_type = TISPECS_GROUP_TYPES
223
  else:
224
    forced_type = TISPECS_CLUSTER_TYPES
225

    
226
  for specs in ipolicy_transposed.values():
227
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
228

    
229
  # then transpose
230
  ipolicy_out = MakeEmptyIPolicy()
231
  for name, specs in ipolicy_transposed.iteritems():
232
    assert name in constants.ISPECS_PARAMETERS
233
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
234
      ipolicy_out[key][name] = val
235

    
236
  # no filldict for non-dicts
237
  if not group_ipolicy and fill_all:
238
    if ipolicy_disk_templates is None:
239
      ipolicy_disk_templates = constants.DISK_TEMPLATES
240
    if ipolicy_vcpu_ratio is None:
241
      ipolicy_vcpu_ratio = \
242
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
243
  if ipolicy_disk_templates is not None:
244
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
245
  if ipolicy_vcpu_ratio is not None:
246
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
247

    
248
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
249

    
250
  return ipolicy_out
251

    
252

    
253
class ConfigObject(object):
254
  """A generic config object.
255

256
  It has the following properties:
257

258
    - provides somewhat safe recursive unpickling and pickling for its classes
259
    - unset attributes which are defined in slots are always returned
260
      as None instead of raising an error
261

262
  Classes derived from this must always declare __slots__ (we use many
263
  config objects and the memory reduction is useful)
264

265
  """
266
  __slots__ = []
267

    
268
  def __init__(self, **kwargs):
269
    for k, v in kwargs.iteritems():
270
      setattr(self, k, v)
271

    
272
  def __getattr__(self, name):
273
    if name not in self._all_slots():
274
      raise AttributeError("Invalid object attribute %s.%s" %
275
                           (type(self).__name__, name))
276
    return None
277

    
278
  def __setstate__(self, state):
279
    slots = self._all_slots()
280
    for name in state:
281
      if name in slots:
282
        setattr(self, name, state[name])
283

    
284
  @classmethod
285
  def _all_slots(cls):
286
    """Compute the list of all declared slots for a class.
287

288
    """
289
    slots = []
290
    for parent in cls.__mro__:
291
      slots.extend(getattr(parent, "__slots__", []))
292
    return slots
293

    
294
  def ToDict(self):
295
    """Convert to a dict holding only standard python types.
296

297
    The generic routine just dumps all of this object's attributes in
298
    a dict. It does not work if the class has children who are
299
    ConfigObjects themselves (e.g. the nics list in an Instance), in
300
    which case the object should subclass the function in order to
301
    make sure all objects returned are only standard python types.
302

303
    """
304
    result = {}
305
    for name in self._all_slots():
306
      value = getattr(self, name, None)
307
      if value is not None:
308
        result[name] = value
309
    return result
310

    
311
  __getstate__ = ToDict
312

    
313
  @classmethod
314
  def FromDict(cls, val):
315
    """Create an object from a dictionary.
316

317
    This generic routine takes a dict, instantiates a new instance of
318
    the given class, and sets attributes based on the dict content.
319

320
    As for `ToDict`, this does not work if the class has children
321
    who are ConfigObjects themselves (e.g. the nics list in an
322
    Instance), in which case the object should subclass the function
323
    and alter the objects.
324

325
    """
326
    if not isinstance(val, dict):
327
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
328
                                      " expected dict, got %s" % type(val))
329
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
330
    obj = cls(**val_str) # pylint: disable=W0142
331
    return obj
332

    
333
  @staticmethod
334
  def _ContainerToDicts(container):
335
    """Convert the elements of a container to standard python types.
336

337
    This method converts a container with elements derived from
338
    ConfigData to standard python types. If the container is a dict,
339
    we don't touch the keys, only the values.
340

341
    """
342
    if isinstance(container, dict):
343
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
344
    elif isinstance(container, (list, tuple, set, frozenset)):
345
      ret = [elem.ToDict() for elem in container]
346
    else:
347
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
348
                      type(container))
349
    return ret
350

    
351
  @staticmethod
352
  def _ContainerFromDicts(source, c_type, e_type):
353
    """Convert a container from standard python types.
354

355
    This method converts a container with standard python types to
356
    ConfigData objects. If the container is a dict, we don't touch the
357
    keys, only the values.
358

359
    """
360
    if not isinstance(c_type, type):
361
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
362
                      " not a type" % type(c_type))
363
    if source is None:
364
      source = c_type()
365
    if c_type is dict:
366
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
367
    elif c_type in (list, tuple, set, frozenset):
368
      ret = c_type([e_type.FromDict(elem) for elem in source])
369
    else:
370
      raise TypeError("Invalid container type %s passed to"
371
                      " _ContainerFromDicts" % c_type)
372
    return ret
373

    
374
  def Copy(self):
375
    """Makes a deep copy of the current object and its children.
376

377
    """
378
    dict_form = self.ToDict()
379
    clone_obj = self.__class__.FromDict(dict_form)
380
    return clone_obj
381

    
382
  def __repr__(self):
383
    """Implement __repr__ for ConfigObjects."""
384
    return repr(self.ToDict())
385

    
386
  def UpgradeConfig(self):
387
    """Fill defaults for missing configuration values.
388

389
    This method will be called at configuration load time, and its
390
    implementation will be object dependent.
391

392
    """
393
    pass
394

    
395

    
396
class TaggableObject(ConfigObject):
397
  """An generic class supporting tags.
398

399
  """
400
  __slots__ = ["tags"]
401
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
402

    
403
  @classmethod
404
  def ValidateTag(cls, tag):
405
    """Check if a tag is valid.
406

407
    If the tag is invalid, an errors.TagError will be raised. The
408
    function has no return value.
409

410
    """
411
    if not isinstance(tag, basestring):
412
      raise errors.TagError("Invalid tag type (not a string)")
413
    if len(tag) > constants.MAX_TAG_LEN:
414
      raise errors.TagError("Tag too long (>%d characters)" %
415
                            constants.MAX_TAG_LEN)
416
    if not tag:
417
      raise errors.TagError("Tags cannot be empty")
418
    if not cls.VALID_TAG_RE.match(tag):
419
      raise errors.TagError("Tag contains invalid characters")
420

    
421
  def GetTags(self):
422
    """Return the tags list.
423

424
    """
425
    tags = getattr(self, "tags", None)
426
    if tags is None:
427
      tags = self.tags = set()
428
    return tags
429

    
430
  def AddTag(self, tag):
431
    """Add a new tag.
432

433
    """
434
    self.ValidateTag(tag)
435
    tags = self.GetTags()
436
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
437
      raise errors.TagError("Too many tags")
438
    self.GetTags().add(tag)
439

    
440
  def RemoveTag(self, tag):
441
    """Remove a tag.
442

443
    """
444
    self.ValidateTag(tag)
445
    tags = self.GetTags()
446
    try:
447
      tags.remove(tag)
448
    except KeyError:
449
      raise errors.TagError("Tag not found")
450

    
451
  def ToDict(self):
452
    """Taggable-object-specific conversion to standard python types.
453

454
    This replaces the tags set with a list.
455

456
    """
457
    bo = super(TaggableObject, self).ToDict()
458

    
459
    tags = bo.get("tags", None)
460
    if isinstance(tags, set):
461
      bo["tags"] = list(tags)
462
    return bo
463

    
464
  @classmethod
465
  def FromDict(cls, val):
466
    """Custom function for instances.
467

468
    """
469
    obj = super(TaggableObject, cls).FromDict(val)
470
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
471
      obj.tags = set(obj.tags)
472
    return obj
473

    
474

    
475
class MasterNetworkParameters(ConfigObject):
476
  """Network configuration parameters for the master
477

478
  @ivar name: master name
479
  @ivar ip: master IP
480
  @ivar netmask: master netmask
481
  @ivar netdev: master network device
482
  @ivar ip_family: master IP family
483

484
  """
485
  __slots__ = [
486
    "name",
487
    "ip",
488
    "netmask",
489
    "netdev",
490
    "ip_family"
491
    ]
492

    
493

    
494
class ConfigData(ConfigObject):
495
  """Top-level config object."""
496
  __slots__ = [
497
    "version",
498
    "cluster",
499
    "nodes",
500
    "nodegroups",
501
    "instances",
502
    "serial_no",
503
    ] + _TIMESTAMPS
504

    
505
  def ToDict(self):
506
    """Custom function for top-level config data.
507

508
    This just replaces the list of instances, nodes and the cluster
509
    with standard python types.
510

511
    """
512
    mydict = super(ConfigData, self).ToDict()
513
    mydict["cluster"] = mydict["cluster"].ToDict()
514
    for key in "nodes", "instances", "nodegroups":
515
      mydict[key] = self._ContainerToDicts(mydict[key])
516

    
517
    return mydict
518

    
519
  @classmethod
520
  def FromDict(cls, val):
521
    """Custom function for top-level config data
522

523
    """
524
    obj = super(ConfigData, cls).FromDict(val)
525
    obj.cluster = Cluster.FromDict(obj.cluster)
526
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
527
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
528
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
529
    return obj
530

    
531
  def HasAnyDiskOfType(self, dev_type):
532
    """Check if in there is at disk of the given type in the configuration.
533

534
    @type dev_type: L{constants.LDS_BLOCK}
535
    @param dev_type: the type to look for
536
    @rtype: boolean
537
    @return: boolean indicating if a disk of the given type was found or not
538

539
    """
540
    for instance in self.instances.values():
541
      for disk in instance.disks:
542
        if disk.IsBasedOnDiskType(dev_type):
543
          return True
544
    return False
545

    
546
  def UpgradeConfig(self):
547
    """Fill defaults for missing configuration values.
548

549
    """
550
    self.cluster.UpgradeConfig()
551
    for node in self.nodes.values():
552
      node.UpgradeConfig()
553
    for instance in self.instances.values():
554
      instance.UpgradeConfig()
555
    if self.nodegroups is None:
556
      self.nodegroups = {}
557
    for nodegroup in self.nodegroups.values():
558
      nodegroup.UpgradeConfig()
559
    if self.cluster.drbd_usermode_helper is None:
560
      # To decide if we set an helper let's check if at least one instance has
561
      # a DRBD disk. This does not cover all the possible scenarios but it
562
      # gives a good approximation.
563
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
564
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
565

    
566

    
567
class NIC(ConfigObject):
568
  """Config object representing a network card."""
569
  __slots__ = ["mac", "ip", "nicparams"]
570

    
571
  @classmethod
572
  def CheckParameterSyntax(cls, nicparams):
573
    """Check the given parameters for validity.
574

575
    @type nicparams:  dict
576
    @param nicparams: dictionary with parameter names/value
577
    @raise errors.ConfigurationError: when a parameter is not valid
578

579
    """
580
    if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
581
        nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
582
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
583
      raise errors.ConfigurationError(err)
584

    
585
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
586
        not nicparams[constants.NIC_LINK]):
587
      err = "Missing bridged nic link"
588
      raise errors.ConfigurationError(err)
589

    
590

    
591
class Disk(ConfigObject):
592
  """Config object representing a block device."""
593
  __slots__ = ["dev_type", "logical_id", "physical_id",
594
               "children", "iv_name", "size", "mode", "params"]
595

    
596
  def CreateOnSecondary(self):
597
    """Test if this device needs to be created on a secondary node."""
598
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
599

    
600
  def AssembleOnSecondary(self):
601
    """Test if this device needs to be assembled on a secondary node."""
602
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
603

    
604
  def OpenOnSecondary(self):
605
    """Test if this device needs to be opened on a secondary node."""
606
    return self.dev_type in (constants.LD_LV,)
607

    
608
  def StaticDevPath(self):
609
    """Return the device path if this device type has a static one.
610

611
    Some devices (LVM for example) live always at the same /dev/ path,
612
    irrespective of their status. For such devices, we return this
613
    path, for others we return None.
614

615
    @warning: The path returned is not a normalized pathname; callers
616
        should check that it is a valid path.
617

618
    """
619
    if self.dev_type == constants.LD_LV:
620
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
621
    elif self.dev_type == constants.LD_BLOCKDEV:
622
      return self.logical_id[1]
623
    elif self.dev_type == constants.LD_RBD:
624
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
625
    return None
626

    
627
  def ChildrenNeeded(self):
628
    """Compute the needed number of children for activation.
629

630
    This method will return either -1 (all children) or a positive
631
    number denoting the minimum number of children needed for
632
    activation (only mirrored devices will usually return >=0).
633

634
    Currently, only DRBD8 supports diskless activation (therefore we
635
    return 0), for all other we keep the previous semantics and return
636
    -1.
637

638
    """
639
    if self.dev_type == constants.LD_DRBD8:
640
      return 0
641
    return -1
642

    
643
  def IsBasedOnDiskType(self, dev_type):
644
    """Check if the disk or its children are based on the given type.
645

646
    @type dev_type: L{constants.LDS_BLOCK}
647
    @param dev_type: the type to look for
648
    @rtype: boolean
649
    @return: boolean indicating if a device of the given type was found or not
650

651
    """
652
    if self.children:
653
      for child in self.children:
654
        if child.IsBasedOnDiskType(dev_type):
655
          return True
656
    return self.dev_type == dev_type
657

    
658
  def GetNodes(self, node):
659
    """This function returns the nodes this device lives on.
660

661
    Given the node on which the parent of the device lives on (or, in
662
    case of a top-level device, the primary node of the devices'
663
    instance), this function will return a list of nodes on which this
664
    devices needs to (or can) be assembled.
665

666
    """
667
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
668
                         constants.LD_BLOCKDEV, constants.LD_RBD]:
669
      result = [node]
670
    elif self.dev_type in constants.LDS_DRBD:
671
      result = [self.logical_id[0], self.logical_id[1]]
672
      if node not in result:
673
        raise errors.ConfigurationError("DRBD device passed unknown node")
674
    else:
675
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
676
    return result
677

    
678
  def ComputeNodeTree(self, parent_node):
679
    """Compute the node/disk tree for this disk and its children.
680

681
    This method, given the node on which the parent disk lives, will
682
    return the list of all (node, disk) pairs which describe the disk
683
    tree in the most compact way. For example, a drbd/lvm stack
684
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
685
    which represents all the top-level devices on the nodes.
686

687
    """
688
    my_nodes = self.GetNodes(parent_node)
689
    result = [(node, self) for node in my_nodes]
690
    if not self.children:
691
      # leaf device
692
      return result
693
    for node in my_nodes:
694
      for child in self.children:
695
        child_result = child.ComputeNodeTree(node)
696
        if len(child_result) == 1:
697
          # child (and all its descendants) is simple, doesn't split
698
          # over multiple hosts, so we don't need to describe it, our
699
          # own entry for this node describes it completely
700
          continue
701
        else:
702
          # check if child nodes differ from my nodes; note that
703
          # subdisk can differ from the child itself, and be instead
704
          # one of its descendants
705
          for subnode, subdisk in child_result:
706
            if subnode not in my_nodes:
707
              result.append((subnode, subdisk))
708
            # otherwise child is under our own node, so we ignore this
709
            # entry (but probably the other results in the list will
710
            # be different)
711
    return result
712

    
713
  def ComputeGrowth(self, amount):
714
    """Compute the per-VG growth requirements.
715

716
    This only works for VG-based disks.
717

718
    @type amount: integer
719
    @param amount: the desired increase in (user-visible) disk space
720
    @rtype: dict
721
    @return: a dictionary of volume-groups and the required size
722

723
    """
724
    if self.dev_type == constants.LD_LV:
725
      return {self.logical_id[0]: amount}
726
    elif self.dev_type == constants.LD_DRBD8:
727
      if self.children:
728
        return self.children[0].ComputeGrowth(amount)
729
      else:
730
        return {}
731
    else:
732
      # Other disk types do not require VG space
733
      return {}
734

    
735
  def RecordGrow(self, amount):
736
    """Update the size of this disk after growth.
737

738
    This method recurses over the disks's children and updates their
739
    size correspondigly. The method needs to be kept in sync with the
740
    actual algorithms from bdev.
741

742
    """
743
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
744
                         constants.LD_RBD):
745
      self.size += amount
746
    elif self.dev_type == constants.LD_DRBD8:
747
      if self.children:
748
        self.children[0].RecordGrow(amount)
749
      self.size += amount
750
    else:
751
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
752
                                   " disk type %s" % self.dev_type)
753

    
754
  def Update(self, size=None, mode=None):
755
    """Apply changes to size and mode.
756

757
    """
758
    if self.dev_type == constants.LD_DRBD8:
759
      if self.children:
760
        self.children[0].Update(size=size, mode=mode)
761
    else:
762
      assert not self.children
763

    
764
    if size is not None:
765
      self.size = size
766
    if mode is not None:
767
      self.mode = mode
768

    
769
  def UnsetSize(self):
770
    """Sets recursively the size to zero for the disk and its children.
771

772
    """
773
    if self.children:
774
      for child in self.children:
775
        child.UnsetSize()
776
    self.size = 0
777

    
778
  def SetPhysicalID(self, target_node, nodes_ip):
779
    """Convert the logical ID to the physical ID.
780

781
    This is used only for drbd, which needs ip/port configuration.
782

783
    The routine descends down and updates its children also, because
784
    this helps when the only the top device is passed to the remote
785
    node.
786

787
    Arguments:
788
      - target_node: the node we wish to configure for
789
      - nodes_ip: a mapping of node name to ip
790

791
    The target_node must exist in in nodes_ip, and must be one of the
792
    nodes in the logical ID for each of the DRBD devices encountered
793
    in the disk tree.
794

795
    """
796
    if self.children:
797
      for child in self.children:
798
        child.SetPhysicalID(target_node, nodes_ip)
799

    
800
    if self.logical_id is None and self.physical_id is not None:
801
      return
802
    if self.dev_type in constants.LDS_DRBD:
803
      pnode, snode, port, pminor, sminor, secret = self.logical_id
804
      if target_node not in (pnode, snode):
805
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
806
                                        target_node)
807
      pnode_ip = nodes_ip.get(pnode, None)
808
      snode_ip = nodes_ip.get(snode, None)
809
      if pnode_ip is None or snode_ip is None:
810
        raise errors.ConfigurationError("Can't find primary or secondary node"
811
                                        " for %s" % str(self))
812
      p_data = (pnode_ip, port)
813
      s_data = (snode_ip, port)
814
      if pnode == target_node:
815
        self.physical_id = p_data + s_data + (pminor, secret)
816
      else: # it must be secondary, we tested above
817
        self.physical_id = s_data + p_data + (sminor, secret)
818
    else:
819
      self.physical_id = self.logical_id
820
    return
821

    
822
  def ToDict(self):
823
    """Disk-specific conversion to standard python types.
824

825
    This replaces the children lists of objects with lists of
826
    standard python types.
827

828
    """
829
    bo = super(Disk, self).ToDict()
830

    
831
    for attr in ("children",):
832
      alist = bo.get(attr, None)
833
      if alist:
834
        bo[attr] = self._ContainerToDicts(alist)
835
    return bo
836

    
837
  @classmethod
838
  def FromDict(cls, val):
839
    """Custom function for Disks
840

841
    """
842
    obj = super(Disk, cls).FromDict(val)
843
    if obj.children:
844
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
845
    if obj.logical_id and isinstance(obj.logical_id, list):
846
      obj.logical_id = tuple(obj.logical_id)
847
    if obj.physical_id and isinstance(obj.physical_id, list):
848
      obj.physical_id = tuple(obj.physical_id)
849
    if obj.dev_type in constants.LDS_DRBD:
850
      # we need a tuple of length six here
851
      if len(obj.logical_id) < 6:
852
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
853
    return obj
854

    
855
  def __str__(self):
856
    """Custom str() formatter for disks.
857

858
    """
859
    if self.dev_type == constants.LD_LV:
860
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
861
    elif self.dev_type in constants.LDS_DRBD:
862
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
863
      val = "<DRBD8("
864
      if self.physical_id is None:
865
        phy = "unconfigured"
866
      else:
867
        phy = ("configured as %s:%s %s:%s" %
868
               (self.physical_id[0], self.physical_id[1],
869
                self.physical_id[2], self.physical_id[3]))
870

    
871
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
872
              (node_a, minor_a, node_b, minor_b, port, phy))
873
      if self.children and self.children.count(None) == 0:
874
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
875
      else:
876
        val += "no local storage"
877
    else:
878
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
879
             (self.dev_type, self.logical_id, self.physical_id, self.children))
880
    if self.iv_name is None:
881
      val += ", not visible"
882
    else:
883
      val += ", visible as /dev/%s" % self.iv_name
884
    if isinstance(self.size, int):
885
      val += ", size=%dm)>" % self.size
886
    else:
887
      val += ", size='%s')>" % (self.size,)
888
    return val
889

    
890
  def Verify(self):
891
    """Checks that this disk is correctly configured.
892

893
    """
894
    all_errors = []
895
    if self.mode not in constants.DISK_ACCESS_SET:
896
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
897
    return all_errors
898

    
899
  def UpgradeConfig(self):
900
    """Fill defaults for missing configuration values.
901

902
    """
903
    if self.children:
904
      for child in self.children:
905
        child.UpgradeConfig()
906

    
907
    if not self.params:
908
      self.params = constants.DISK_LD_DEFAULTS[self.dev_type].copy()
909
    else:
910
      self.params = FillDict(constants.DISK_LD_DEFAULTS[self.dev_type],
911
                             self.params)
912
    # add here config upgrade for this disk
913

    
914

    
915
class InstancePolicy(ConfigObject):
916
  """Config object representing instance policy limits dictionary.
917

918

919
  Note that this object is not actually used in the config, it's just
920
  used as a placeholder for a few functions.
921

922
  """
923
  @classmethod
924
  def CheckParameterSyntax(cls, ipolicy):
925
    """ Check the instance policy for validity.
926

927
    """
928
    for param in constants.ISPECS_PARAMETERS:
929
      InstancePolicy.CheckISpecSyntax(ipolicy, param)
930
    if constants.IPOLICY_DTS in ipolicy:
931
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
932
    for key in constants.IPOLICY_PARAMETERS:
933
      if key in ipolicy:
934
        InstancePolicy.CheckParameter(key, ipolicy[key])
935
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
936
    if wrong_keys:
937
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
938
                                      utils.CommaJoin(wrong_keys))
939

    
940
  @classmethod
941
  def CheckISpecSyntax(cls, ipolicy, name):
942
    """Check the instance policy for validity on a given key.
943

944
    We check if the instance policy makes sense for a given key, that is
945
    if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
946

947
    @type ipolicy: dict
948
    @param ipolicy: dictionary with min, max, std specs
949
    @type name: string
950
    @param name: what are the limits for
951
    @raise errors.ConfigureError: when specs for given name are not valid
952

953
    """
954
    min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
955
    std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
956
    max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
957
    err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
958
           (name,
959
            ipolicy[constants.ISPECS_MIN].get(name, "-"),
960
            ipolicy[constants.ISPECS_MAX].get(name, "-"),
961
            ipolicy[constants.ISPECS_STD].get(name, "-")))
962
    if min_v > std_v or std_v > max_v:
963
      raise errors.ConfigurationError(err)
964

    
965
  @classmethod
966
  def CheckDiskTemplates(cls, disk_templates):
967
    """Checks the disk templates for validity.
968

969
    """
970
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
971
    if wrong:
972
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
973
                                      utils.CommaJoin(wrong))
974

    
975
  @classmethod
976
  def CheckParameter(cls, key, value):
977
    """Checks a parameter.
978

979
    Currently we expect all parameters to be float values.
980

981
    """
982
    try:
983
      float(value)
984
    except (TypeError, ValueError), err:
985
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
986
                                      " '%s', error: %s" % (key, value, err))
987

    
988

    
989
class Instance(TaggableObject):
990
  """Config object representing an instance."""
991
  __slots__ = [
992
    "name",
993
    "primary_node",
994
    "os",
995
    "hypervisor",
996
    "hvparams",
997
    "beparams",
998
    "osparams",
999
    "admin_state",
1000
    "nics",
1001
    "disks",
1002
    "disk_template",
1003
    "network_port",
1004
    "serial_no",
1005
    ] + _TIMESTAMPS + _UUID
1006

    
1007
  def _ComputeSecondaryNodes(self):
1008
    """Compute the list of secondary nodes.
1009

1010
    This is a simple wrapper over _ComputeAllNodes.
1011

1012
    """
1013
    all_nodes = set(self._ComputeAllNodes())
1014
    all_nodes.discard(self.primary_node)
1015
    return tuple(all_nodes)
1016

    
1017
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1018
                             "List of secondary nodes")
1019

    
1020
  def _ComputeAllNodes(self):
1021
    """Compute the list of all nodes.
1022

1023
    Since the data is already there (in the drbd disks), keeping it as
1024
    a separate normal attribute is redundant and if not properly
1025
    synchronised can cause problems. Thus it's better to compute it
1026
    dynamically.
1027

1028
    """
1029
    def _Helper(nodes, device):
1030
      """Recursively computes nodes given a top device."""
1031
      if device.dev_type in constants.LDS_DRBD:
1032
        nodea, nodeb = device.logical_id[:2]
1033
        nodes.add(nodea)
1034
        nodes.add(nodeb)
1035
      if device.children:
1036
        for child in device.children:
1037
          _Helper(nodes, child)
1038

    
1039
    all_nodes = set()
1040
    all_nodes.add(self.primary_node)
1041
    for device in self.disks:
1042
      _Helper(all_nodes, device)
1043
    return tuple(all_nodes)
1044

    
1045
  all_nodes = property(_ComputeAllNodes, None, None,
1046
                       "List of all nodes of the instance")
1047

    
1048
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1049
    """Provide a mapping of nodes to LVs this instance owns.
1050

1051
    This function figures out what logical volumes should belong on
1052
    which nodes, recursing through a device tree.
1053

1054
    @param lvmap: optional dictionary to receive the
1055
        'node' : ['lv', ...] data.
1056

1057
    @return: None if lvmap arg is given, otherwise, a dictionary of
1058
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1059
        volumeN is of the form "vg_name/lv_name", compatible with
1060
        GetVolumeList()
1061

1062
    """
1063
    if node == None:
1064
      node = self.primary_node
1065

    
1066
    if lvmap is None:
1067
      lvmap = {
1068
        node: [],
1069
        }
1070
      ret = lvmap
1071
    else:
1072
      if not node in lvmap:
1073
        lvmap[node] = []
1074
      ret = None
1075

    
1076
    if not devs:
1077
      devs = self.disks
1078

    
1079
    for dev in devs:
1080
      if dev.dev_type == constants.LD_LV:
1081
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1082

    
1083
      elif dev.dev_type in constants.LDS_DRBD:
1084
        if dev.children:
1085
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1086
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1087

    
1088
      elif dev.children:
1089
        self.MapLVsByNode(lvmap, dev.children, node)
1090

    
1091
    return ret
1092

    
1093
  def FindDisk(self, idx):
1094
    """Find a disk given having a specified index.
1095

1096
    This is just a wrapper that does validation of the index.
1097

1098
    @type idx: int
1099
    @param idx: the disk index
1100
    @rtype: L{Disk}
1101
    @return: the corresponding disk
1102
    @raise errors.OpPrereqError: when the given index is not valid
1103

1104
    """
1105
    try:
1106
      idx = int(idx)
1107
      return self.disks[idx]
1108
    except (TypeError, ValueError), err:
1109
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1110
                                 errors.ECODE_INVAL)
1111
    except IndexError:
1112
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1113
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1114
                                 errors.ECODE_INVAL)
1115

    
1116
  def ToDict(self):
1117
    """Instance-specific conversion to standard python types.
1118

1119
    This replaces the children lists of objects with lists of standard
1120
    python types.
1121

1122
    """
1123
    bo = super(Instance, self).ToDict()
1124

    
1125
    for attr in "nics", "disks":
1126
      alist = bo.get(attr, None)
1127
      if alist:
1128
        nlist = self._ContainerToDicts(alist)
1129
      else:
1130
        nlist = []
1131
      bo[attr] = nlist
1132
    return bo
1133

    
1134
  @classmethod
1135
  def FromDict(cls, val):
1136
    """Custom function for instances.
1137

1138
    """
1139
    if "admin_state" not in val:
1140
      if val.get("admin_up", False):
1141
        val["admin_state"] = constants.ADMINST_UP
1142
      else:
1143
        val["admin_state"] = constants.ADMINST_DOWN
1144
    if "admin_up" in val:
1145
      del val["admin_up"]
1146
    obj = super(Instance, cls).FromDict(val)
1147
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
1148
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
1149
    return obj
1150

    
1151
  def UpgradeConfig(self):
1152
    """Fill defaults for missing configuration values.
1153

1154
    """
1155
    for nic in self.nics:
1156
      nic.UpgradeConfig()
1157
    for disk in self.disks:
1158
      disk.UpgradeConfig()
1159
    if self.hvparams:
1160
      for key in constants.HVC_GLOBALS:
1161
        try:
1162
          del self.hvparams[key]
1163
        except KeyError:
1164
          pass
1165
    if self.osparams is None:
1166
      self.osparams = {}
1167
    UpgradeBeParams(self.beparams)
1168

    
1169

    
1170
class OS(ConfigObject):
1171
  """Config object representing an operating system.
1172

1173
  @type supported_parameters: list
1174
  @ivar supported_parameters: a list of tuples, name and description,
1175
      containing the supported parameters by this OS
1176

1177
  @type VARIANT_DELIM: string
1178
  @cvar VARIANT_DELIM: the variant delimiter
1179

1180
  """
1181
  __slots__ = [
1182
    "name",
1183
    "path",
1184
    "api_versions",
1185
    "create_script",
1186
    "export_script",
1187
    "import_script",
1188
    "rename_script",
1189
    "verify_script",
1190
    "supported_variants",
1191
    "supported_parameters",
1192
    ]
1193

    
1194
  VARIANT_DELIM = "+"
1195

    
1196
  @classmethod
1197
  def SplitNameVariant(cls, name):
1198
    """Splits the name into the proper name and variant.
1199

1200
    @param name: the OS (unprocessed) name
1201
    @rtype: list
1202
    @return: a list of two elements; if the original name didn't
1203
        contain a variant, it's returned as an empty string
1204

1205
    """
1206
    nv = name.split(cls.VARIANT_DELIM, 1)
1207
    if len(nv) == 1:
1208
      nv.append("")
1209
    return nv
1210

    
1211
  @classmethod
1212
  def GetName(cls, name):
1213
    """Returns the proper name of the os (without the variant).
1214

1215
    @param name: the OS (unprocessed) name
1216

1217
    """
1218
    return cls.SplitNameVariant(name)[0]
1219

    
1220
  @classmethod
1221
  def GetVariant(cls, name):
1222
    """Returns the variant the os (without the base name).
1223

1224
    @param name: the OS (unprocessed) name
1225

1226
    """
1227
    return cls.SplitNameVariant(name)[1]
1228

    
1229

    
1230
class NodeHvState(ConfigObject):
1231
  """Hypvervisor state on a node.
1232

1233
  @ivar mem_total: Total amount of memory
1234
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1235
    available)
1236
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1237
    rounding
1238
  @ivar mem_inst: Memory used by instances living on node
1239
  @ivar cpu_total: Total node CPU core count
1240
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1241

1242
  """
1243
  __slots__ = [
1244
    "mem_total",
1245
    "mem_node",
1246
    "mem_hv",
1247
    "mem_inst",
1248
    "cpu_total",
1249
    "cpu_node",
1250
    ] + _TIMESTAMPS
1251

    
1252

    
1253
class NodeDiskState(ConfigObject):
1254
  """Disk state on a node.
1255

1256
  """
1257
  __slots__ = [
1258
    "total",
1259
    "reserved",
1260
    "overhead",
1261
    ] + _TIMESTAMPS
1262

    
1263

    
1264
class Node(TaggableObject):
1265
  """Config object representing a node.
1266

1267
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1268
  @ivar hv_state_static: Hypervisor state overriden by user
1269
  @ivar disk_state: Disk state (e.g. free space)
1270
  @ivar disk_state_static: Disk state overriden by user
1271

1272
  """
1273
  __slots__ = [
1274
    "name",
1275
    "primary_ip",
1276
    "secondary_ip",
1277
    "serial_no",
1278
    "master_candidate",
1279
    "offline",
1280
    "drained",
1281
    "group",
1282
    "master_capable",
1283
    "vm_capable",
1284
    "ndparams",
1285
    "powered",
1286
    "hv_state",
1287
    "hv_state_static",
1288
    "disk_state",
1289
    "disk_state_static",
1290
    ] + _TIMESTAMPS + _UUID
1291

    
1292
  def UpgradeConfig(self):
1293
    """Fill defaults for missing configuration values.
1294

1295
    """
1296
    # pylint: disable=E0203
1297
    # because these are "defined" via slots, not manually
1298
    if self.master_capable is None:
1299
      self.master_capable = True
1300

    
1301
    if self.vm_capable is None:
1302
      self.vm_capable = True
1303

    
1304
    if self.ndparams is None:
1305
      self.ndparams = {}
1306

    
1307
    if self.powered is None:
1308
      self.powered = True
1309

    
1310
  def ToDict(self):
1311
    """Custom function for serializing.
1312

1313
    """
1314
    data = super(Node, self).ToDict()
1315

    
1316
    hv_state = data.get("hv_state", None)
1317
    if hv_state is not None:
1318
      data["hv_state"] = self._ContainerToDicts(hv_state)
1319

    
1320
    disk_state = data.get("disk_state", None)
1321
    if disk_state is not None:
1322
      data["disk_state"] = \
1323
        dict((key, self._ContainerToDicts(value))
1324
             for (key, value) in disk_state.items())
1325

    
1326
    return data
1327

    
1328
  @classmethod
1329
  def FromDict(cls, val):
1330
    """Custom function for deserializing.
1331

1332
    """
1333
    obj = super(Node, cls).FromDict(val)
1334

    
1335
    if obj.hv_state is not None:
1336
      obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1337

    
1338
    if obj.disk_state is not None:
1339
      obj.disk_state = \
1340
        dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
1341
             for (key, value) in obj.disk_state.items())
1342

    
1343
    return obj
1344

    
1345

    
1346
class NodeGroup(TaggableObject):
1347
  """Config object representing a node group."""
1348
  __slots__ = [
1349
    "name",
1350
    "members",
1351
    "ndparams",
1352
    "diskparams",
1353
    "ipolicy",
1354
    "serial_no",
1355
    "hv_state_static",
1356
    "disk_state_static",
1357
    "alloc_policy",
1358
    ] + _TIMESTAMPS + _UUID
1359

    
1360
  def ToDict(self):
1361
    """Custom function for nodegroup.
1362

1363
    This discards the members object, which gets recalculated and is only kept
1364
    in memory.
1365

1366
    """
1367
    mydict = super(NodeGroup, self).ToDict()
1368
    del mydict["members"]
1369
    return mydict
1370

    
1371
  @classmethod
1372
  def FromDict(cls, val):
1373
    """Custom function for nodegroup.
1374

1375
    The members slot is initialized to an empty list, upon deserialization.
1376

1377
    """
1378
    obj = super(NodeGroup, cls).FromDict(val)
1379
    obj.members = []
1380
    return obj
1381

    
1382
  def UpgradeConfig(self):
1383
    """Fill defaults for missing configuration values.
1384

1385
    """
1386
    if self.ndparams is None:
1387
      self.ndparams = {}
1388

    
1389
    if self.serial_no is None:
1390
      self.serial_no = 1
1391

    
1392
    if self.alloc_policy is None:
1393
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1394

    
1395
    # We only update mtime, and not ctime, since we would not be able
1396
    # to provide a correct value for creation time.
1397
    if self.mtime is None:
1398
      self.mtime = time.time()
1399

    
1400
    self.diskparams = UpgradeDiskParams(self.diskparams)
1401
    if self.ipolicy is None:
1402
      self.ipolicy = MakeEmptyIPolicy()
1403

    
1404
  def FillND(self, node):
1405
    """Return filled out ndparams for L{objects.Node}
1406

1407
    @type node: L{objects.Node}
1408
    @param node: A Node object to fill
1409
    @return a copy of the node's ndparams with defaults filled
1410

1411
    """
1412
    return self.SimpleFillND(node.ndparams)
1413

    
1414
  def SimpleFillND(self, ndparams):
1415
    """Fill a given ndparams dict with defaults.
1416

1417
    @type ndparams: dict
1418
    @param ndparams: the dict to fill
1419
    @rtype: dict
1420
    @return: a copy of the passed in ndparams with missing keys filled
1421
        from the node group defaults
1422

1423
    """
1424
    return FillDict(self.ndparams, ndparams)
1425

    
1426

    
1427
class Cluster(TaggableObject):
1428
  """Config object representing the cluster."""
1429
  __slots__ = [
1430
    "serial_no",
1431
    "rsahostkeypub",
1432
    "highest_used_port",
1433
    "tcpudp_port_pool",
1434
    "mac_prefix",
1435
    "volume_group_name",
1436
    "reserved_lvs",
1437
    "drbd_usermode_helper",
1438
    "default_bridge",
1439
    "default_hypervisor",
1440
    "master_node",
1441
    "master_ip",
1442
    "master_netdev",
1443
    "master_netmask",
1444
    "use_external_mip_script",
1445
    "cluster_name",
1446
    "file_storage_dir",
1447
    "shared_file_storage_dir",
1448
    "enabled_hypervisors",
1449
    "hvparams",
1450
    "ipolicy",
1451
    "os_hvp",
1452
    "beparams",
1453
    "osparams",
1454
    "nicparams",
1455
    "ndparams",
1456
    "diskparams",
1457
    "candidate_pool_size",
1458
    "modify_etc_hosts",
1459
    "modify_ssh_setup",
1460
    "maintain_node_health",
1461
    "uid_pool",
1462
    "default_iallocator",
1463
    "hidden_os",
1464
    "blacklisted_os",
1465
    "primary_ip_family",
1466
    "prealloc_wipe_disks",
1467
    "hv_state_static",
1468
    "disk_state_static",
1469
    ] + _TIMESTAMPS + _UUID
1470

    
1471
  def UpgradeConfig(self):
1472
    """Fill defaults for missing configuration values.
1473

1474
    """
1475
    # pylint: disable=E0203
1476
    # because these are "defined" via slots, not manually
1477
    if self.hvparams is None:
1478
      self.hvparams = constants.HVC_DEFAULTS
1479
    else:
1480
      for hypervisor in self.hvparams:
1481
        self.hvparams[hypervisor] = FillDict(
1482
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1483

    
1484
    if self.os_hvp is None:
1485
      self.os_hvp = {}
1486

    
1487
    # osparams added before 2.2
1488
    if self.osparams is None:
1489
      self.osparams = {}
1490

    
1491
    self.ndparams = UpgradeNDParams(self.ndparams)
1492

    
1493
    self.beparams = UpgradeGroupedParams(self.beparams,
1494
                                         constants.BEC_DEFAULTS)
1495
    for beparams_group in self.beparams:
1496
      UpgradeBeParams(self.beparams[beparams_group])
1497

    
1498
    migrate_default_bridge = not self.nicparams
1499
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1500
                                          constants.NICC_DEFAULTS)
1501
    if migrate_default_bridge:
1502
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1503
        self.default_bridge
1504

    
1505
    if self.modify_etc_hosts is None:
1506
      self.modify_etc_hosts = True
1507

    
1508
    if self.modify_ssh_setup is None:
1509
      self.modify_ssh_setup = True
1510

    
1511
    # default_bridge is no longer used in 2.1. The slot is left there to
1512
    # support auto-upgrading. It can be removed once we decide to deprecate
1513
    # upgrading straight from 2.0.
1514
    if self.default_bridge is not None:
1515
      self.default_bridge = None
1516

    
1517
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1518
    # code can be removed once upgrading straight from 2.0 is deprecated.
1519
    if self.default_hypervisor is not None:
1520
      self.enabled_hypervisors = ([self.default_hypervisor] +
1521
        [hvname for hvname in self.enabled_hypervisors
1522
         if hvname != self.default_hypervisor])
1523
      self.default_hypervisor = None
1524

    
1525
    # maintain_node_health added after 2.1.1
1526
    if self.maintain_node_health is None:
1527
      self.maintain_node_health = False
1528

    
1529
    if self.uid_pool is None:
1530
      self.uid_pool = []
1531

    
1532
    if self.default_iallocator is None:
1533
      self.default_iallocator = ""
1534

    
1535
    # reserved_lvs added before 2.2
1536
    if self.reserved_lvs is None:
1537
      self.reserved_lvs = []
1538

    
1539
    # hidden and blacklisted operating systems added before 2.2.1
1540
    if self.hidden_os is None:
1541
      self.hidden_os = []
1542

    
1543
    if self.blacklisted_os is None:
1544
      self.blacklisted_os = []
1545

    
1546
    # primary_ip_family added before 2.3
1547
    if self.primary_ip_family is None:
1548
      self.primary_ip_family = AF_INET
1549

    
1550
    if self.master_netmask is None:
1551
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1552
      self.master_netmask = ipcls.iplen
1553

    
1554
    if self.prealloc_wipe_disks is None:
1555
      self.prealloc_wipe_disks = False
1556

    
1557
    # shared_file_storage_dir added before 2.5
1558
    if self.shared_file_storage_dir is None:
1559
      self.shared_file_storage_dir = ""
1560

    
1561
    if self.use_external_mip_script is None:
1562
      self.use_external_mip_script = False
1563

    
1564
    self.diskparams = UpgradeDiskParams(self.diskparams)
1565

    
1566
    # instance policy added before 2.6
1567
    if self.ipolicy is None:
1568
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1569
    else:
1570
      # we can either make sure to upgrade the ipolicy always, or only
1571
      # do it in some corner cases (e.g. missing keys); note that this
1572
      # will break any removal of keys from the ipolicy dict
1573
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1574

    
1575
  @property
1576
  def primary_hypervisor(self):
1577
    """The first hypervisor is the primary.
1578

1579
    Useful, for example, for L{Node}'s hv/disk state.
1580

1581
    """
1582
    return self.enabled_hypervisors[0]
1583

    
1584
  def ToDict(self):
1585
    """Custom function for cluster.
1586

1587
    """
1588
    mydict = super(Cluster, self).ToDict()
1589
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1590
    return mydict
1591

    
1592
  @classmethod
1593
  def FromDict(cls, val):
1594
    """Custom function for cluster.
1595

1596
    """
1597
    obj = super(Cluster, cls).FromDict(val)
1598
    if not isinstance(obj.tcpudp_port_pool, set):
1599
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1600
    return obj
1601

    
1602
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1603
    """Get the default hypervisor parameters for the cluster.
1604

1605
    @param hypervisor: the hypervisor name
1606
    @param os_name: if specified, we'll also update the defaults for this OS
1607
    @param skip_keys: if passed, list of keys not to use
1608
    @return: the defaults dict
1609

1610
    """
1611
    if skip_keys is None:
1612
      skip_keys = []
1613

    
1614
    fill_stack = [self.hvparams.get(hypervisor, {})]
1615
    if os_name is not None:
1616
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1617
      fill_stack.append(os_hvp)
1618

    
1619
    ret_dict = {}
1620
    for o_dict in fill_stack:
1621
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1622

    
1623
    return ret_dict
1624

    
1625
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1626
    """Fill a given hvparams dict with cluster defaults.
1627

1628
    @type hv_name: string
1629
    @param hv_name: the hypervisor to use
1630
    @type os_name: string
1631
    @param os_name: the OS to use for overriding the hypervisor defaults
1632
    @type skip_globals: boolean
1633
    @param skip_globals: if True, the global hypervisor parameters will
1634
        not be filled
1635
    @rtype: dict
1636
    @return: a copy of the given hvparams with missing keys filled from
1637
        the cluster defaults
1638

1639
    """
1640
    if skip_globals:
1641
      skip_keys = constants.HVC_GLOBALS
1642
    else:
1643
      skip_keys = []
1644

    
1645
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1646
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1647

    
1648
  def FillHV(self, instance, skip_globals=False):
1649
    """Fill an instance's hvparams dict with cluster defaults.
1650

1651
    @type instance: L{objects.Instance}
1652
    @param instance: the instance parameter to fill
1653
    @type skip_globals: boolean
1654
    @param skip_globals: if True, the global hypervisor parameters will
1655
        not be filled
1656
    @rtype: dict
1657
    @return: a copy of the instance's hvparams with missing keys filled from
1658
        the cluster defaults
1659

1660
    """
1661
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1662
                             instance.hvparams, skip_globals)
1663

    
1664
  def SimpleFillBE(self, beparams):
1665
    """Fill a given beparams dict with cluster defaults.
1666

1667
    @type beparams: dict
1668
    @param beparams: the dict to fill
1669
    @rtype: dict
1670
    @return: a copy of the passed in beparams with missing keys filled
1671
        from the cluster defaults
1672

1673
    """
1674
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1675

    
1676
  def FillBE(self, instance):
1677
    """Fill an instance's beparams dict with cluster defaults.
1678

1679
    @type instance: L{objects.Instance}
1680
    @param instance: the instance parameter to fill
1681
    @rtype: dict
1682
    @return: a copy of the instance's beparams with missing keys filled from
1683
        the cluster defaults
1684

1685
    """
1686
    return self.SimpleFillBE(instance.beparams)
1687

    
1688
  def SimpleFillNIC(self, nicparams):
1689
    """Fill a given nicparams dict with cluster defaults.
1690

1691
    @type nicparams: dict
1692
    @param nicparams: the dict to fill
1693
    @rtype: dict
1694
    @return: a copy of the passed in nicparams with missing keys filled
1695
        from the cluster defaults
1696

1697
    """
1698
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1699

    
1700
  def SimpleFillOS(self, os_name, os_params):
1701
    """Fill an instance's osparams dict with cluster defaults.
1702

1703
    @type os_name: string
1704
    @param os_name: the OS name to use
1705
    @type os_params: dict
1706
    @param os_params: the dict to fill with default values
1707
    @rtype: dict
1708
    @return: a copy of the instance's osparams with missing keys filled from
1709
        the cluster defaults
1710

1711
    """
1712
    name_only = os_name.split("+", 1)[0]
1713
    # base OS
1714
    result = self.osparams.get(name_only, {})
1715
    # OS with variant
1716
    result = FillDict(result, self.osparams.get(os_name, {}))
1717
    # specified params
1718
    return FillDict(result, os_params)
1719

    
1720
  @staticmethod
1721
  def SimpleFillHvState(hv_state):
1722
    """Fill an hv_state sub dict with cluster defaults.
1723

1724
    """
1725
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1726

    
1727
  @staticmethod
1728
  def SimpleFillDiskState(disk_state):
1729
    """Fill an disk_state sub dict with cluster defaults.
1730

1731
    """
1732
    return FillDict(constants.DS_DEFAULTS, disk_state)
1733

    
1734
  def FillND(self, node, nodegroup):
1735
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1736

1737
    @type node: L{objects.Node}
1738
    @param node: A Node object to fill
1739
    @type nodegroup: L{objects.NodeGroup}
1740
    @param nodegroup: A Node object to fill
1741
    @return a copy of the node's ndparams with defaults filled
1742

1743
    """
1744
    return self.SimpleFillND(nodegroup.FillND(node))
1745

    
1746
  def SimpleFillND(self, ndparams):
1747
    """Fill a given ndparams dict with defaults.
1748

1749
    @type ndparams: dict
1750
    @param ndparams: the dict to fill
1751
    @rtype: dict
1752
    @return: a copy of the passed in ndparams with missing keys filled
1753
        from the cluster defaults
1754

1755
    """
1756
    return FillDict(self.ndparams, ndparams)
1757

    
1758
  def SimpleFillIPolicy(self, ipolicy):
1759
    """ Fill instance policy dict with defaults.
1760

1761
    @type ipolicy: dict
1762
    @param ipolicy: the dict to fill
1763
    @rtype: dict
1764
    @return: a copy of passed ipolicy with missing keys filled from
1765
      the cluster defaults
1766

1767
    """
1768
    return FillIPolicy(self.ipolicy, ipolicy)
1769

    
1770

    
1771
class BlockDevStatus(ConfigObject):
1772
  """Config object representing the status of a block device."""
1773
  __slots__ = [
1774
    "dev_path",
1775
    "major",
1776
    "minor",
1777
    "sync_percent",
1778
    "estimated_time",
1779
    "is_degraded",
1780
    "ldisk_status",
1781
    ]
1782

    
1783

    
1784
class ImportExportStatus(ConfigObject):
1785
  """Config object representing the status of an import or export."""
1786
  __slots__ = [
1787
    "recent_output",
1788
    "listen_port",
1789
    "connected",
1790
    "progress_mbytes",
1791
    "progress_throughput",
1792
    "progress_eta",
1793
    "progress_percent",
1794
    "exit_status",
1795
    "error_message",
1796
    ] + _TIMESTAMPS
1797

    
1798

    
1799
class ImportExportOptions(ConfigObject):
1800
  """Options for import/export daemon
1801

1802
  @ivar key_name: X509 key name (None for cluster certificate)
1803
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1804
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1805
  @ivar magic: Used to ensure the connection goes to the right disk
1806
  @ivar ipv6: Whether to use IPv6
1807
  @ivar connect_timeout: Number of seconds for establishing connection
1808

1809
  """
1810
  __slots__ = [
1811
    "key_name",
1812
    "ca_pem",
1813
    "compress",
1814
    "magic",
1815
    "ipv6",
1816
    "connect_timeout",
1817
    ]
1818

    
1819

    
1820
class ConfdRequest(ConfigObject):
1821
  """Object holding a confd request.
1822

1823
  @ivar protocol: confd protocol version
1824
  @ivar type: confd query type
1825
  @ivar query: query request
1826
  @ivar rsalt: requested reply salt
1827

1828
  """
1829
  __slots__ = [
1830
    "protocol",
1831
    "type",
1832
    "query",
1833
    "rsalt",
1834
    ]
1835

    
1836

    
1837
class ConfdReply(ConfigObject):
1838
  """Object holding a confd reply.
1839

1840
  @ivar protocol: confd protocol version
1841
  @ivar status: reply status code (ok, error)
1842
  @ivar answer: confd query reply
1843
  @ivar serial: configuration serial number
1844

1845
  """
1846
  __slots__ = [
1847
    "protocol",
1848
    "status",
1849
    "answer",
1850
    "serial",
1851
    ]
1852

    
1853

    
1854
class QueryFieldDefinition(ConfigObject):
1855
  """Object holding a query field definition.
1856

1857
  @ivar name: Field name
1858
  @ivar title: Human-readable title
1859
  @ivar kind: Field type
1860
  @ivar doc: Human-readable description
1861

1862
  """
1863
  __slots__ = [
1864
    "name",
1865
    "title",
1866
    "kind",
1867
    "doc",
1868
    ]
1869

    
1870

    
1871
class _QueryResponseBase(ConfigObject):
1872
  __slots__ = [
1873
    "fields",
1874
    ]
1875

    
1876
  def ToDict(self):
1877
    """Custom function for serializing.
1878

1879
    """
1880
    mydict = super(_QueryResponseBase, self).ToDict()
1881
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1882
    return mydict
1883

    
1884
  @classmethod
1885
  def FromDict(cls, val):
1886
    """Custom function for de-serializing.
1887

1888
    """
1889
    obj = super(_QueryResponseBase, cls).FromDict(val)
1890
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1891
    return obj
1892

    
1893

    
1894
class QueryRequest(ConfigObject):
1895
  """Object holding a query request.
1896

1897
  """
1898
  __slots__ = [
1899
    "what",
1900
    "fields",
1901
    "qfilter",
1902
    ]
1903

    
1904

    
1905
class QueryResponse(_QueryResponseBase):
1906
  """Object holding the response to a query.
1907

1908
  @ivar fields: List of L{QueryFieldDefinition} objects
1909
  @ivar data: Requested data
1910

1911
  """
1912
  __slots__ = [
1913
    "data",
1914
    ]
1915

    
1916

    
1917
class QueryFieldsRequest(ConfigObject):
1918
  """Object holding a request for querying available fields.
1919

1920
  """
1921
  __slots__ = [
1922
    "what",
1923
    "fields",
1924
    ]
1925

    
1926

    
1927
class QueryFieldsResponse(_QueryResponseBase):
1928
  """Object holding the response to a query for fields.
1929

1930
  @ivar fields: List of L{QueryFieldDefinition} objects
1931

1932
  """
1933
  __slots__ = [
1934
    ]
1935

    
1936

    
1937
class MigrationStatus(ConfigObject):
1938
  """Object holding the status of a migration.
1939

1940
  """
1941
  __slots__ = [
1942
    "status",
1943
    "transferred_ram",
1944
    "total_ram",
1945
    ]
1946

    
1947

    
1948
class InstanceConsole(ConfigObject):
1949
  """Object describing how to access the console of an instance.
1950

1951
  """
1952
  __slots__ = [
1953
    "instance",
1954
    "kind",
1955
    "message",
1956
    "host",
1957
    "port",
1958
    "user",
1959
    "command",
1960
    "display",
1961
    ]
1962

    
1963
  def Validate(self):
1964
    """Validates contents of this object.
1965

1966
    """
1967
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1968
    assert self.instance, "Missing instance name"
1969
    assert self.message or self.kind in [constants.CONS_SSH,
1970
                                         constants.CONS_SPICE,
1971
                                         constants.CONS_VNC]
1972
    assert self.host or self.kind == constants.CONS_MESSAGE
1973
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1974
                                      constants.CONS_SSH]
1975
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1976
                                      constants.CONS_SPICE,
1977
                                      constants.CONS_VNC]
1978
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1979
                                         constants.CONS_SPICE,
1980
                                         constants.CONS_VNC]
1981
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1982
                                         constants.CONS_SPICE,
1983
                                         constants.CONS_SSH]
1984
    return True
1985

    
1986

    
1987
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1988
  """Simple wrapper over ConfigParse that allows serialization.
1989

1990
  This class is basically ConfigParser.SafeConfigParser with two
1991
  additional methods that allow it to serialize/unserialize to/from a
1992
  buffer.
1993

1994
  """
1995
  def Dumps(self):
1996
    """Dump this instance and return the string representation."""
1997
    buf = StringIO()
1998
    self.write(buf)
1999
    return buf.getvalue()
2000

    
2001
  @classmethod
2002
  def Loads(cls, data):
2003
    """Load data from a string."""
2004
    buf = StringIO(data)
2005
    cfp = cls()
2006
    cfp.readfp(buf)
2007
    return cfp