Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ a2112db5

History | View | Annotate | Download (65.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def FillIPolicy(default_ipolicy, custom_ipolicy):
86
  """Fills an instance policy with defaults.
87

88
  """
89
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90
  ret_dict = copy.deepcopy(custom_ipolicy)
91
  for key in default_ipolicy:
92
    if key not in ret_dict:
93
      ret_dict[key] = copy.deepcopy(default_ipolicy[key])
94
    elif key == constants.ISPECS_STD:
95
      ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
96
  return ret_dict
97

    
98

    
99
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
100
  """Fills the disk parameter defaults.
101

102
  @see: L{FillDict} for parameters and return value
103

104
  """
105
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
106

    
107
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
108
                             skip_keys=skip_keys))
109
              for dt in constants.DISK_TEMPLATES)
110

    
111

    
112
def UpgradeGroupedParams(target, defaults):
113
  """Update all groups for the target parameter.
114

115
  @type target: dict of dicts
116
  @param target: {group: {parameter: value}}
117
  @type defaults: dict
118
  @param defaults: default parameter values
119

120
  """
121
  if target is None:
122
    target = {constants.PP_DEFAULT: defaults}
123
  else:
124
    for group in target:
125
      target[group] = FillDict(defaults, target[group])
126
  return target
127

    
128

    
129
def UpgradeBeParams(target):
130
  """Update the be parameters dict to the new format.
131

132
  @type target: dict
133
  @param target: "be" parameters dict
134

135
  """
136
  if constants.BE_MEMORY in target:
137
    memory = target[constants.BE_MEMORY]
138
    target[constants.BE_MAXMEM] = memory
139
    target[constants.BE_MINMEM] = memory
140
    del target[constants.BE_MEMORY]
141

    
142

    
143
def UpgradeDiskParams(diskparams):
144
  """Upgrade the disk parameters.
145

146
  @type diskparams: dict
147
  @param diskparams: disk parameters to upgrade
148
  @rtype: dict
149
  @return: the upgraded disk parameters dict
150

151
  """
152
  if not diskparams:
153
    result = {}
154
  else:
155
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
156

    
157
  return result
158

    
159

    
160
def UpgradeNDParams(ndparams):
161
  """Upgrade ndparams structure.
162

163
  @type ndparams: dict
164
  @param ndparams: disk parameters to upgrade
165
  @rtype: dict
166
  @return: the upgraded node parameters dict
167

168
  """
169
  if ndparams is None:
170
    ndparams = {}
171

    
172
  if (constants.ND_OOB_PROGRAM in ndparams and
173
      ndparams[constants.ND_OOB_PROGRAM] is None):
174
    # will be reset by the line below
175
    del ndparams[constants.ND_OOB_PROGRAM]
176
  return FillDict(constants.NDC_DEFAULTS, ndparams)
177

    
178

    
179
def MakeEmptyIPolicy():
180
  """Create empty IPolicy dictionary.
181

182
  """
183
  return {}
184

    
185

    
186
class ConfigObject(outils.ValidatedSlots):
187
  """A generic config object.
188

189
  It has the following properties:
190

191
    - provides somewhat safe recursive unpickling and pickling for its classes
192
    - unset attributes which are defined in slots are always returned
193
      as None instead of raising an error
194

195
  Classes derived from this must always declare __slots__ (we use many
196
  config objects and the memory reduction is useful)
197

198
  """
199
  __slots__ = []
200

    
201
  def __getattr__(self, name):
202
    if name not in self.GetAllSlots():
203
      raise AttributeError("Invalid object attribute %s.%s" %
204
                           (type(self).__name__, name))
205
    return None
206

    
207
  def __setstate__(self, state):
208
    slots = self.GetAllSlots()
209
    for name in state:
210
      if name in slots:
211
        setattr(self, name, state[name])
212

    
213
  def Validate(self):
214
    """Validates the slots.
215

216
    """
217

    
218
  def ToDict(self):
219
    """Convert to a dict holding only standard python types.
220

221
    The generic routine just dumps all of this object's attributes in
222
    a dict. It does not work if the class has children who are
223
    ConfigObjects themselves (e.g. the nics list in an Instance), in
224
    which case the object should subclass the function in order to
225
    make sure all objects returned are only standard python types.
226

227
    """
228
    result = {}
229
    for name in self.GetAllSlots():
230
      value = getattr(self, name, None)
231
      if value is not None:
232
        result[name] = value
233
    return result
234

    
235
  __getstate__ = ToDict
236

    
237
  @classmethod
238
  def FromDict(cls, val):
239
    """Create an object from a dictionary.
240

241
    This generic routine takes a dict, instantiates a new instance of
242
    the given class, and sets attributes based on the dict content.
243

244
    As for `ToDict`, this does not work if the class has children
245
    who are ConfigObjects themselves (e.g. the nics list in an
246
    Instance), in which case the object should subclass the function
247
    and alter the objects.
248

249
    """
250
    if not isinstance(val, dict):
251
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
252
                                      " expected dict, got %s" % type(val))
253
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
254
    obj = cls(**val_str) # pylint: disable=W0142
255
    return obj
256

    
257
  def Copy(self):
258
    """Makes a deep copy of the current object and its children.
259

260
    """
261
    dict_form = self.ToDict()
262
    clone_obj = self.__class__.FromDict(dict_form)
263
    return clone_obj
264

    
265
  def __repr__(self):
266
    """Implement __repr__ for ConfigObjects."""
267
    return repr(self.ToDict())
268

    
269
  def UpgradeConfig(self):
270
    """Fill defaults for missing configuration values.
271

272
    This method will be called at configuration load time, and its
273
    implementation will be object dependent.
274

275
    """
276
    pass
277

    
278

    
279
class TaggableObject(ConfigObject):
280
  """An generic class supporting tags.
281

282
  """
283
  __slots__ = ["tags"]
284
  VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$")
285

    
286
  @classmethod
287
  def ValidateTag(cls, tag):
288
    """Check if a tag is valid.
289

290
    If the tag is invalid, an errors.TagError will be raised. The
291
    function has no return value.
292

293
    """
294
    if not isinstance(tag, basestring):
295
      raise errors.TagError("Invalid tag type (not a string)")
296
    if len(tag) > constants.MAX_TAG_LEN:
297
      raise errors.TagError("Tag too long (>%d characters)" %
298
                            constants.MAX_TAG_LEN)
299
    if not tag:
300
      raise errors.TagError("Tags cannot be empty")
301
    if not cls.VALID_TAG_RE.match(tag):
302
      raise errors.TagError("Tag contains invalid characters")
303

    
304
  def GetTags(self):
305
    """Return the tags list.
306

307
    """
308
    tags = getattr(self, "tags", None)
309
    if tags is None:
310
      tags = self.tags = set()
311
    return tags
312

    
313
  def AddTag(self, tag):
314
    """Add a new tag.
315

316
    """
317
    self.ValidateTag(tag)
318
    tags = self.GetTags()
319
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
320
      raise errors.TagError("Too many tags")
321
    self.GetTags().add(tag)
322

    
323
  def RemoveTag(self, tag):
324
    """Remove a tag.
325

326
    """
327
    self.ValidateTag(tag)
328
    tags = self.GetTags()
329
    try:
330
      tags.remove(tag)
331
    except KeyError:
332
      raise errors.TagError("Tag not found")
333

    
334
  def ToDict(self):
335
    """Taggable-object-specific conversion to standard python types.
336

337
    This replaces the tags set with a list.
338

339
    """
340
    bo = super(TaggableObject, self).ToDict()
341

    
342
    tags = bo.get("tags", None)
343
    if isinstance(tags, set):
344
      bo["tags"] = list(tags)
345
    return bo
346

    
347
  @classmethod
348
  def FromDict(cls, val):
349
    """Custom function for instances.
350

351
    """
352
    obj = super(TaggableObject, cls).FromDict(val)
353
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
354
      obj.tags = set(obj.tags)
355
    return obj
356

    
357

    
358
class MasterNetworkParameters(ConfigObject):
359
  """Network configuration parameters for the master
360

361
  @ivar uuid: master nodes UUID
362
  @ivar ip: master IP
363
  @ivar netmask: master netmask
364
  @ivar netdev: master network device
365
  @ivar ip_family: master IP family
366

367
  """
368
  __slots__ = [
369
    "uuid",
370
    "ip",
371
    "netmask",
372
    "netdev",
373
    "ip_family",
374
    ]
375

    
376

    
377
class ConfigData(ConfigObject):
378
  """Top-level config object."""
379
  __slots__ = [
380
    "version",
381
    "cluster",
382
    "nodes",
383
    "nodegroups",
384
    "instances",
385
    "networks",
386
    "serial_no",
387
    ] + _TIMESTAMPS
388

    
389
  def ToDict(self):
390
    """Custom function for top-level config data.
391

392
    This just replaces the list of instances, nodes and the cluster
393
    with standard python types.
394

395
    """
396
    mydict = super(ConfigData, self).ToDict()
397
    mydict["cluster"] = mydict["cluster"].ToDict()
398
    for key in "nodes", "instances", "nodegroups", "networks":
399
      mydict[key] = outils.ContainerToDicts(mydict[key])
400

    
401
    return mydict
402

    
403
  @classmethod
404
  def FromDict(cls, val):
405
    """Custom function for top-level config data
406

407
    """
408
    obj = super(ConfigData, cls).FromDict(val)
409
    obj.cluster = Cluster.FromDict(obj.cluster)
410
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
411
    obj.instances = \
412
      outils.ContainerFromDicts(obj.instances, dict, Instance)
413
    obj.nodegroups = \
414
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
415
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
416
    return obj
417

    
418
  def HasAnyDiskOfType(self, dev_type):
419
    """Check if in there is at disk of the given type in the configuration.
420

421
    @type dev_type: L{constants.DTS_BLOCK}
422
    @param dev_type: the type to look for
423
    @rtype: boolean
424
    @return: boolean indicating if a disk of the given type was found or not
425

426
    """
427
    for instance in self.instances.values():
428
      for disk in instance.disks:
429
        if disk.IsBasedOnDiskType(dev_type):
430
          return True
431
    return False
432

    
433
  def UpgradeConfig(self):
434
    """Fill defaults for missing configuration values.
435

436
    """
437
    self.cluster.UpgradeConfig()
438
    for node in self.nodes.values():
439
      node.UpgradeConfig()
440
    for instance in self.instances.values():
441
      instance.UpgradeConfig()
442
    self._UpgradeEnabledDiskTemplates()
443
    if self.nodegroups is None:
444
      self.nodegroups = {}
445
    for nodegroup in self.nodegroups.values():
446
      nodegroup.UpgradeConfig()
447
      InstancePolicy.UpgradeDiskTemplates(
448
        nodegroup.ipolicy, self.cluster.enabled_disk_templates)
449
    if self.cluster.drbd_usermode_helper is None:
450
      # To decide if we set an helper let's check if at least one instance has
451
      # a DRBD disk. This does not cover all the possible scenarios but it
452
      # gives a good approximation.
453
      if self.HasAnyDiskOfType(constants.DT_DRBD8):
454
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
455
    if self.networks is None:
456
      self.networks = {}
457
    for network in self.networks.values():
458
      network.UpgradeConfig()
459

    
460
  def _UpgradeEnabledDiskTemplates(self):
461
    """Upgrade the cluster's enabled disk templates by inspecting the currently
462
       enabled and/or used disk templates.
463

464
    """
465
    # enabled_disk_templates in the cluster config were introduced in 2.8.
466
    # Remove this code once upgrading from earlier versions is deprecated.
467
    if not self.cluster.enabled_disk_templates:
468
      template_set = \
469
        set([inst.disk_template for inst in self.instances.values()])
470
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
471
      if self.cluster.volume_group_name:
472
        template_set.add(constants.DT_DRBD8)
473
        template_set.add(constants.DT_PLAIN)
474
      # Set enabled_disk_templates to the inferred disk templates. Order them
475
      # according to a preference list that is based on Ganeti's history of
476
      # supported disk templates.
477
      self.cluster.enabled_disk_templates = []
478
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
479
        if preferred_template in template_set:
480
          self.cluster.enabled_disk_templates.append(preferred_template)
481
          template_set.remove(preferred_template)
482
      self.cluster.enabled_disk_templates.extend(list(template_set))
483
    InstancePolicy.UpgradeDiskTemplates(
484
      self.cluster.ipolicy, self.cluster.enabled_disk_templates)
485

    
486

    
487
class NIC(ConfigObject):
488
  """Config object representing a network card."""
489
  __slots__ = ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID
490

    
491
  @classmethod
492
  def CheckParameterSyntax(cls, nicparams):
493
    """Check the given parameters for validity.
494

495
    @type nicparams:  dict
496
    @param nicparams: dictionary with parameter names/value
497
    @raise errors.ConfigurationError: when a parameter is not valid
498

499
    """
500
    mode = nicparams[constants.NIC_MODE]
501
    if (mode not in constants.NIC_VALID_MODES and
502
        mode != constants.VALUE_AUTO):
503
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
504

    
505
    if (mode == constants.NIC_MODE_BRIDGED and
506
        not nicparams[constants.NIC_LINK]):
507
      raise errors.ConfigurationError("Missing bridged NIC link")
508

    
509

    
510
class Disk(ConfigObject):
511
  """Config object representing a block device."""
512
  __slots__ = (["name", "dev_type", "logical_id", "physical_id",
513
                "children", "iv_name", "size", "mode", "params", "spindles"] +
514
               _UUID)
515

    
516
  def CreateOnSecondary(self):
517
    """Test if this device needs to be created on a secondary node."""
518
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
519

    
520
  def AssembleOnSecondary(self):
521
    """Test if this device needs to be assembled on a secondary node."""
522
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
523

    
524
  def OpenOnSecondary(self):
525
    """Test if this device needs to be opened on a secondary node."""
526
    return self.dev_type in (constants.DT_PLAIN,)
527

    
528
  def StaticDevPath(self):
529
    """Return the device path if this device type has a static one.
530

531
    Some devices (LVM for example) live always at the same /dev/ path,
532
    irrespective of their status. For such devices, we return this
533
    path, for others we return None.
534

535
    @warning: The path returned is not a normalized pathname; callers
536
        should check that it is a valid path.
537

538
    """
539
    if self.dev_type == constants.DT_PLAIN:
540
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
541
    elif self.dev_type == constants.DT_BLOCK:
542
      return self.logical_id[1]
543
    elif self.dev_type == constants.DT_RBD:
544
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
545
    return None
546

    
547
  def ChildrenNeeded(self):
548
    """Compute the needed number of children for activation.
549

550
    This method will return either -1 (all children) or a positive
551
    number denoting the minimum number of children needed for
552
    activation (only mirrored devices will usually return >=0).
553

554
    Currently, only DRBD8 supports diskless activation (therefore we
555
    return 0), for all other we keep the previous semantics and return
556
    -1.
557

558
    """
559
    if self.dev_type == constants.DT_DRBD8:
560
      return 0
561
    return -1
562

    
563
  def IsBasedOnDiskType(self, dev_type):
564
    """Check if the disk or its children are based on the given type.
565

566
    @type dev_type: L{constants.DTS_BLOCK}
567
    @param dev_type: the type to look for
568
    @rtype: boolean
569
    @return: boolean indicating if a device of the given type was found or not
570

571
    """
572
    if self.children:
573
      for child in self.children:
574
        if child.IsBasedOnDiskType(dev_type):
575
          return True
576
    return self.dev_type == dev_type
577

    
578
  def GetNodes(self, node_uuid):
579
    """This function returns the nodes this device lives on.
580

581
    Given the node on which the parent of the device lives on (or, in
582
    case of a top-level device, the primary node of the devices'
583
    instance), this function will return a list of nodes on which this
584
    devices needs to (or can) be assembled.
585

586
    """
587
    if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
588
                         constants.DT_BLOCK, constants.DT_RBD,
589
                         constants.DT_EXT, constants.DT_SHARED_FILE]:
590
      result = [node_uuid]
591
    elif self.dev_type in constants.DTS_DRBD:
592
      result = [self.logical_id[0], self.logical_id[1]]
593
      if node_uuid not in result:
594
        raise errors.ConfigurationError("DRBD device passed unknown node")
595
    else:
596
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
597
    return result
598

    
599
  def ComputeNodeTree(self, parent_node_uuid):
600
    """Compute the node/disk tree for this disk and its children.
601

602
    This method, given the node on which the parent disk lives, will
603
    return the list of all (node UUID, disk) pairs which describe the disk
604
    tree in the most compact way. For example, a drbd/lvm stack
605
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
606
    which represents all the top-level devices on the nodes.
607

608
    """
609
    my_nodes = self.GetNodes(parent_node_uuid)
610
    result = [(node, self) for node in my_nodes]
611
    if not self.children:
612
      # leaf device
613
      return result
614
    for node in my_nodes:
615
      for child in self.children:
616
        child_result = child.ComputeNodeTree(node)
617
        if len(child_result) == 1:
618
          # child (and all its descendants) is simple, doesn't split
619
          # over multiple hosts, so we don't need to describe it, our
620
          # own entry for this node describes it completely
621
          continue
622
        else:
623
          # check if child nodes differ from my nodes; note that
624
          # subdisk can differ from the child itself, and be instead
625
          # one of its descendants
626
          for subnode, subdisk in child_result:
627
            if subnode not in my_nodes:
628
              result.append((subnode, subdisk))
629
            # otherwise child is under our own node, so we ignore this
630
            # entry (but probably the other results in the list will
631
            # be different)
632
    return result
633

    
634
  def ComputeGrowth(self, amount):
635
    """Compute the per-VG growth requirements.
636

637
    This only works for VG-based disks.
638

639
    @type amount: integer
640
    @param amount: the desired increase in (user-visible) disk space
641
    @rtype: dict
642
    @return: a dictionary of volume-groups and the required size
643

644
    """
645
    if self.dev_type == constants.DT_PLAIN:
646
      return {self.logical_id[0]: amount}
647
    elif self.dev_type == constants.DT_DRBD8:
648
      if self.children:
649
        return self.children[0].ComputeGrowth(amount)
650
      else:
651
        return {}
652
    else:
653
      # Other disk types do not require VG space
654
      return {}
655

    
656
  def RecordGrow(self, amount):
657
    """Update the size of this disk after growth.
658

659
    This method recurses over the disks's children and updates their
660
    size correspondigly. The method needs to be kept in sync with the
661
    actual algorithms from bdev.
662

663
    """
664
    if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE,
665
                         constants.DT_RBD, constants.DT_EXT,
666
                         constants.DT_SHARED_FILE):
667
      self.size += amount
668
    elif self.dev_type == constants.DT_DRBD8:
669
      if self.children:
670
        self.children[0].RecordGrow(amount)
671
      self.size += amount
672
    else:
673
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
674
                                   " disk type %s" % self.dev_type)
675

    
676
  def Update(self, size=None, mode=None, spindles=None):
677
    """Apply changes to size, spindles and mode.
678

679
    """
680
    if self.dev_type == constants.DT_DRBD8:
681
      if self.children:
682
        self.children[0].Update(size=size, mode=mode)
683
    else:
684
      assert not self.children
685

    
686
    if size is not None:
687
      self.size = size
688
    if mode is not None:
689
      self.mode = mode
690
    if spindles is not None:
691
      self.spindles = spindles
692

    
693
  def UnsetSize(self):
694
    """Sets recursively the size to zero for the disk and its children.
695

696
    """
697
    if self.children:
698
      for child in self.children:
699
        child.UnsetSize()
700
    self.size = 0
701

    
702
  def SetPhysicalID(self, target_node_uuid, nodes_ip):
703
    """Convert the logical ID to the physical ID.
704

705
    This is used only for drbd, which needs ip/port configuration.
706

707
    The routine descends down and updates its children also, because
708
    this helps when the only the top device is passed to the remote
709
    node.
710

711
    Arguments:
712
      - target_node_uuid: the node UUID we wish to configure for
713
      - nodes_ip: a mapping of node name to ip
714

715
    The target_node must exist in in nodes_ip, and must be one of the
716
    nodes in the logical ID for each of the DRBD devices encountered
717
    in the disk tree.
718

719
    """
720
    if self.children:
721
      for child in self.children:
722
        child.SetPhysicalID(target_node_uuid, nodes_ip)
723

    
724
    if self.logical_id is None and self.physical_id is not None:
725
      return
726
    if self.dev_type in constants.DTS_DRBD:
727
      pnode_uuid, snode_uuid, port, pminor, sminor, secret = self.logical_id
728
      if target_node_uuid not in (pnode_uuid, snode_uuid):
729
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
730
                                        target_node_uuid)
731
      pnode_ip = nodes_ip.get(pnode_uuid, None)
732
      snode_ip = nodes_ip.get(snode_uuid, None)
733
      if pnode_ip is None or snode_ip is None:
734
        raise errors.ConfigurationError("Can't find primary or secondary node"
735
                                        " for %s" % str(self))
736
      p_data = (pnode_ip, port)
737
      s_data = (snode_ip, port)
738
      if pnode_uuid == target_node_uuid:
739
        self.physical_id = p_data + s_data + (pminor, secret)
740
      else: # it must be secondary, we tested above
741
        self.physical_id = s_data + p_data + (sminor, secret)
742
    else:
743
      self.physical_id = self.logical_id
744
    return
745

    
746
  def ToDict(self):
747
    """Disk-specific conversion to standard python types.
748

749
    This replaces the children lists of objects with lists of
750
    standard python types.
751

752
    """
753
    bo = super(Disk, self).ToDict()
754

    
755
    for attr in ("children",):
756
      alist = bo.get(attr, None)
757
      if alist:
758
        bo[attr] = outils.ContainerToDicts(alist)
759
    return bo
760

    
761
  @classmethod
762
  def FromDict(cls, val):
763
    """Custom function for Disks
764

765
    """
766
    obj = super(Disk, cls).FromDict(val)
767
    if obj.children:
768
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
769
    if obj.logical_id and isinstance(obj.logical_id, list):
770
      obj.logical_id = tuple(obj.logical_id)
771
    if obj.physical_id and isinstance(obj.physical_id, list):
772
      obj.physical_id = tuple(obj.physical_id)
773
    if obj.dev_type in constants.DTS_DRBD:
774
      # we need a tuple of length six here
775
      if len(obj.logical_id) < 6:
776
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
777
    return obj
778

    
779
  def __str__(self):
780
    """Custom str() formatter for disks.
781

782
    """
783
    if self.dev_type == constants.DT_PLAIN:
784
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
785
    elif self.dev_type in constants.DTS_DRBD:
786
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
787
      val = "<DRBD8("
788
      if self.physical_id is None:
789
        phy = "unconfigured"
790
      else:
791
        phy = ("configured as %s:%s %s:%s" %
792
               (self.physical_id[0], self.physical_id[1],
793
                self.physical_id[2], self.physical_id[3]))
794

    
795
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
796
              (node_a, minor_a, node_b, minor_b, port, phy))
797
      if self.children and self.children.count(None) == 0:
798
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
799
      else:
800
        val += "no local storage"
801
    else:
802
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
803
             (self.dev_type, self.logical_id, self.physical_id, self.children))
804
    if self.iv_name is None:
805
      val += ", not visible"
806
    else:
807
      val += ", visible as /dev/%s" % self.iv_name
808
    if self.spindles is not None:
809
      val += ", spindles=%s" % self.spindles
810
    if isinstance(self.size, int):
811
      val += ", size=%dm)>" % self.size
812
    else:
813
      val += ", size='%s')>" % (self.size,)
814
    return val
815

    
816
  def Verify(self):
817
    """Checks that this disk is correctly configured.
818

819
    """
820
    all_errors = []
821
    if self.mode not in constants.DISK_ACCESS_SET:
822
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
823
    return all_errors
824

    
825
  def UpgradeConfig(self):
826
    """Fill defaults for missing configuration values.
827

828
    """
829
    if self.children:
830
      for child in self.children:
831
        child.UpgradeConfig()
832

    
833
    # FIXME: Make this configurable in Ganeti 2.7
834
    self.params = {}
835
    # add here config upgrade for this disk
836

    
837
    # map of legacy device types (mapping differing LD constants to new
838
    # DT constants)
839
    LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
840
    if self.dev_type in LEG_DEV_TYPE_MAP:
841
      self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
842

    
843
  @staticmethod
844
  def ComputeLDParams(disk_template, disk_params):
845
    """Computes Logical Disk parameters from Disk Template parameters.
846

847
    @type disk_template: string
848
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
849
    @type disk_params: dict
850
    @param disk_params: disk template parameters;
851
                        dict(template_name -> parameters
852
    @rtype: list(dict)
853
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
854
      contains the LD parameters of the node. The tree is flattened in-order.
855

856
    """
857
    if disk_template not in constants.DISK_TEMPLATES:
858
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
859

    
860
    assert disk_template in disk_params
861

    
862
    result = list()
863
    dt_params = disk_params[disk_template]
864
    if disk_template == constants.DT_DRBD8:
865
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], {
866
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
867
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
868
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
869
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
870
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
871
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
872
        constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
873
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
874
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
875
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
876
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
877
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
878
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
879
        }))
880

    
881
      # data LV
882
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
883
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
884
        }))
885

    
886
      # metadata LV
887
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
888
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
889
        }))
890

    
891
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
892
      result.append(constants.DISK_LD_DEFAULTS[disk_template])
893

    
894
    elif disk_template == constants.DT_PLAIN:
895
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
896
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
897
        }))
898

    
899
    elif disk_template == constants.DT_BLOCK:
900
      result.append(constants.DISK_LD_DEFAULTS[constants.DT_BLOCK])
901

    
902
    elif disk_template == constants.DT_RBD:
903
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_RBD], {
904
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
905
        }))
906

    
907
    elif disk_template == constants.DT_EXT:
908
      result.append(constants.DISK_LD_DEFAULTS[constants.DT_EXT])
909

    
910
    return result
911

    
912

    
913
class InstancePolicy(ConfigObject):
914
  """Config object representing instance policy limits dictionary.
915

916
  Note that this object is not actually used in the config, it's just
917
  used as a placeholder for a few functions.
918

919
  """
920
  @classmethod
921
  def UpgradeDiskTemplates(cls, ipolicy, enabled_disk_templates):
922
    """Upgrades the ipolicy configuration."""
923
    if constants.IPOLICY_DTS in ipolicy:
924
      if not set(ipolicy[constants.IPOLICY_DTS]).issubset(
925
        set(enabled_disk_templates)):
926
        ipolicy[constants.IPOLICY_DTS] = list(
927
          set(ipolicy[constants.IPOLICY_DTS]) & set(enabled_disk_templates))
928

    
929
  @classmethod
930
  def CheckParameterSyntax(cls, ipolicy, check_std):
931
    """ Check the instance policy for validity.
932

933
    @type ipolicy: dict
934
    @param ipolicy: dictionary with min/max/std specs and policies
935
    @type check_std: bool
936
    @param check_std: Whether to check std value or just assume compliance
937
    @raise errors.ConfigurationError: when the policy is not legal
938

939
    """
940
    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
941
    if constants.IPOLICY_DTS in ipolicy:
942
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
943
    for key in constants.IPOLICY_PARAMETERS:
944
      if key in ipolicy:
945
        InstancePolicy.CheckParameter(key, ipolicy[key])
946
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
947
    if wrong_keys:
948
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
949
                                      utils.CommaJoin(wrong_keys))
950

    
951
  @classmethod
952
  def _CheckIncompleteSpec(cls, spec, keyname):
953
    missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
954
    if missing_params:
955
      msg = ("Missing instance specs parameters for %s: %s" %
956
             (keyname, utils.CommaJoin(missing_params)))
957
      raise errors.ConfigurationError(msg)
958

    
959
  @classmethod
960
  def CheckISpecSyntax(cls, ipolicy, check_std):
961
    """Check the instance policy specs for validity.
962

963
    @type ipolicy: dict
964
    @param ipolicy: dictionary with min/max/std specs
965
    @type check_std: bool
966
    @param check_std: Whether to check std value or just assume compliance
967
    @raise errors.ConfigurationError: when specs are not valid
968

969
    """
970
    if constants.ISPECS_MINMAX not in ipolicy:
971
      # Nothing to check
972
      return
973

    
974
    if check_std and constants.ISPECS_STD not in ipolicy:
975
      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
976
      raise errors.ConfigurationError(msg)
977
    stdspec = ipolicy.get(constants.ISPECS_STD)
978
    if check_std:
979
      InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
980

    
981
    if not ipolicy[constants.ISPECS_MINMAX]:
982
      raise errors.ConfigurationError("Empty minmax specifications")
983
    std_is_good = False
984
    for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
985
      missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
986
      if missing:
987
        msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
988
        raise errors.ConfigurationError(msg)
989
      for (key, spec) in minmaxspecs.items():
990
        InstancePolicy._CheckIncompleteSpec(spec, key)
991

    
992
      spec_std_ok = True
993
      for param in constants.ISPECS_PARAMETERS:
994
        par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
995
                                                           param, check_std)
996
        spec_std_ok = spec_std_ok and par_std_ok
997
      std_is_good = std_is_good or spec_std_ok
998
    if not std_is_good:
999
      raise errors.ConfigurationError("Invalid std specifications")
1000

    
1001
  @classmethod
1002
  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
1003
    """Check the instance policy specs for validity on a given key.
1004

1005
    We check if the instance specs makes sense for a given key, that is
1006
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
1007

1008
    @type minmaxspecs: dict
1009
    @param minmaxspecs: dictionary with min and max instance spec
1010
    @type stdspec: dict
1011
    @param stdspec: dictionary with standard instance spec
1012
    @type name: string
1013
    @param name: what are the limits for
1014
    @type check_std: bool
1015
    @param check_std: Whether to check std value or just assume compliance
1016
    @rtype: bool
1017
    @return: C{True} when specs are valid, C{False} when standard spec for the
1018
        given name is not valid
1019
    @raise errors.ConfigurationError: when min/max specs for the given name
1020
        are not valid
1021

1022
    """
1023
    minspec = minmaxspecs[constants.ISPECS_MIN]
1024
    maxspec = minmaxspecs[constants.ISPECS_MAX]
1025
    min_v = minspec[name]
1026
    max_v = maxspec[name]
1027

    
1028
    if min_v > max_v:
1029
      err = ("Invalid specification of min/max values for %s: %s/%s" %
1030
             (name, min_v, max_v))
1031
      raise errors.ConfigurationError(err)
1032
    elif check_std:
1033
      std_v = stdspec.get(name, min_v)
1034
      return std_v >= min_v and std_v <= max_v
1035
    else:
1036
      return True
1037

    
1038
  @classmethod
1039
  def CheckDiskTemplates(cls, disk_templates):
1040
    """Checks the disk templates for validity.
1041

1042
    """
1043
    if not disk_templates:
1044
      raise errors.ConfigurationError("Instance policy must contain" +
1045
                                      " at least one disk template")
1046
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1047
    if wrong:
1048
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1049
                                      utils.CommaJoin(wrong))
1050

    
1051
  @classmethod
1052
  def CheckParameter(cls, key, value):
1053
    """Checks a parameter.
1054

1055
    Currently we expect all parameters to be float values.
1056

1057
    """
1058
    try:
1059
      float(value)
1060
    except (TypeError, ValueError), err:
1061
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1062
                                      " '%s', error: %s" % (key, value, err))
1063

    
1064

    
1065
class Instance(TaggableObject):
1066
  """Config object representing an instance."""
1067
  __slots__ = [
1068
    "name",
1069
    "primary_node",
1070
    "os",
1071
    "hypervisor",
1072
    "hvparams",
1073
    "beparams",
1074
    "osparams",
1075
    "admin_state",
1076
    "nics",
1077
    "disks",
1078
    "disk_template",
1079
    "disks_active",
1080
    "network_port",
1081
    "serial_no",
1082
    ] + _TIMESTAMPS + _UUID
1083

    
1084
  def _ComputeSecondaryNodes(self):
1085
    """Compute the list of secondary nodes.
1086

1087
    This is a simple wrapper over _ComputeAllNodes.
1088

1089
    """
1090
    all_nodes = set(self._ComputeAllNodes())
1091
    all_nodes.discard(self.primary_node)
1092
    return tuple(all_nodes)
1093

    
1094
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1095
                             "List of names of secondary nodes")
1096

    
1097
  def _ComputeAllNodes(self):
1098
    """Compute the list of all nodes.
1099

1100
    Since the data is already there (in the drbd disks), keeping it as
1101
    a separate normal attribute is redundant and if not properly
1102
    synchronised can cause problems. Thus it's better to compute it
1103
    dynamically.
1104

1105
    """
1106
    def _Helper(nodes, device):
1107
      """Recursively computes nodes given a top device."""
1108
      if device.dev_type in constants.DTS_DRBD:
1109
        nodea, nodeb = device.logical_id[:2]
1110
        nodes.add(nodea)
1111
        nodes.add(nodeb)
1112
      if device.children:
1113
        for child in device.children:
1114
          _Helper(nodes, child)
1115

    
1116
    all_nodes = set()
1117
    all_nodes.add(self.primary_node)
1118
    for device in self.disks:
1119
      _Helper(all_nodes, device)
1120
    return tuple(all_nodes)
1121

    
1122
  all_nodes = property(_ComputeAllNodes, None, None,
1123
                       "List of names of all the nodes of the instance")
1124

    
1125
  def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1126
    """Provide a mapping of nodes to LVs this instance owns.
1127

1128
    This function figures out what logical volumes should belong on
1129
    which nodes, recursing through a device tree.
1130

1131
    @type lvmap: dict
1132
    @param lvmap: optional dictionary to receive the
1133
        'node' : ['lv', ...] data.
1134
    @type devs: list of L{Disk}
1135
    @param devs: disks to get the LV name for. If None, all disk of this
1136
        instance are used.
1137
    @type node_uuid: string
1138
    @param node_uuid: UUID of the node to get the LV names for. If None, the
1139
        primary node of this instance is used.
1140
    @return: None if lvmap arg is given, otherwise, a dictionary of
1141
        the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
1142
        volumeN is of the form "vg_name/lv_name", compatible with
1143
        GetVolumeList()
1144

1145
    """
1146
    if node_uuid is None:
1147
      node_uuid = self.primary_node
1148

    
1149
    if lvmap is None:
1150
      lvmap = {
1151
        node_uuid: [],
1152
        }
1153
      ret = lvmap
1154
    else:
1155
      if not node_uuid in lvmap:
1156
        lvmap[node_uuid] = []
1157
      ret = None
1158

    
1159
    if not devs:
1160
      devs = self.disks
1161

    
1162
    for dev in devs:
1163
      if dev.dev_type == constants.DT_PLAIN:
1164
        lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1165

    
1166
      elif dev.dev_type in constants.DTS_DRBD:
1167
        if dev.children:
1168
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1169
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1170

    
1171
      elif dev.children:
1172
        self.MapLVsByNode(lvmap, dev.children, node_uuid)
1173

    
1174
    return ret
1175

    
1176
  def FindDisk(self, idx):
1177
    """Find a disk given having a specified index.
1178

1179
    This is just a wrapper that does validation of the index.
1180

1181
    @type idx: int
1182
    @param idx: the disk index
1183
    @rtype: L{Disk}
1184
    @return: the corresponding disk
1185
    @raise errors.OpPrereqError: when the given index is not valid
1186

1187
    """
1188
    try:
1189
      idx = int(idx)
1190
      return self.disks[idx]
1191
    except (TypeError, ValueError), err:
1192
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1193
                                 errors.ECODE_INVAL)
1194
    except IndexError:
1195
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1196
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1197
                                 errors.ECODE_INVAL)
1198

    
1199
  def ToDict(self):
1200
    """Instance-specific conversion to standard python types.
1201

1202
    This replaces the children lists of objects with lists of standard
1203
    python types.
1204

1205
    """
1206
    bo = super(Instance, self).ToDict()
1207

    
1208
    for attr in "nics", "disks":
1209
      alist = bo.get(attr, None)
1210
      if alist:
1211
        nlist = outils.ContainerToDicts(alist)
1212
      else:
1213
        nlist = []
1214
      bo[attr] = nlist
1215
    return bo
1216

    
1217
  @classmethod
1218
  def FromDict(cls, val):
1219
    """Custom function for instances.
1220

1221
    """
1222
    if "admin_state" not in val:
1223
      if val.get("admin_up", False):
1224
        val["admin_state"] = constants.ADMINST_UP
1225
      else:
1226
        val["admin_state"] = constants.ADMINST_DOWN
1227
    if "admin_up" in val:
1228
      del val["admin_up"]
1229
    obj = super(Instance, cls).FromDict(val)
1230
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1231
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1232
    return obj
1233

    
1234
  def UpgradeConfig(self):
1235
    """Fill defaults for missing configuration values.
1236

1237
    """
1238
    for nic in self.nics:
1239
      nic.UpgradeConfig()
1240
    for disk in self.disks:
1241
      disk.UpgradeConfig()
1242
    if self.hvparams:
1243
      for key in constants.HVC_GLOBALS:
1244
        try:
1245
          del self.hvparams[key]
1246
        except KeyError:
1247
          pass
1248
    if self.osparams is None:
1249
      self.osparams = {}
1250
    UpgradeBeParams(self.beparams)
1251
    if self.disks_active is None:
1252
      self.disks_active = self.admin_state == constants.ADMINST_UP
1253

    
1254

    
1255
class OS(ConfigObject):
1256
  """Config object representing an operating system.
1257

1258
  @type supported_parameters: list
1259
  @ivar supported_parameters: a list of tuples, name and description,
1260
      containing the supported parameters by this OS
1261

1262
  @type VARIANT_DELIM: string
1263
  @cvar VARIANT_DELIM: the variant delimiter
1264

1265
  """
1266
  __slots__ = [
1267
    "name",
1268
    "path",
1269
    "api_versions",
1270
    "create_script",
1271
    "export_script",
1272
    "import_script",
1273
    "rename_script",
1274
    "verify_script",
1275
    "supported_variants",
1276
    "supported_parameters",
1277
    ]
1278

    
1279
  VARIANT_DELIM = "+"
1280

    
1281
  @classmethod
1282
  def SplitNameVariant(cls, name):
1283
    """Splits the name into the proper name and variant.
1284

1285
    @param name: the OS (unprocessed) name
1286
    @rtype: list
1287
    @return: a list of two elements; if the original name didn't
1288
        contain a variant, it's returned as an empty string
1289

1290
    """
1291
    nv = name.split(cls.VARIANT_DELIM, 1)
1292
    if len(nv) == 1:
1293
      nv.append("")
1294
    return nv
1295

    
1296
  @classmethod
1297
  def GetName(cls, name):
1298
    """Returns the proper name of the os (without the variant).
1299

1300
    @param name: the OS (unprocessed) name
1301

1302
    """
1303
    return cls.SplitNameVariant(name)[0]
1304

    
1305
  @classmethod
1306
  def GetVariant(cls, name):
1307
    """Returns the variant the os (without the base name).
1308

1309
    @param name: the OS (unprocessed) name
1310

1311
    """
1312
    return cls.SplitNameVariant(name)[1]
1313

    
1314

    
1315
class ExtStorage(ConfigObject):
1316
  """Config object representing an External Storage Provider.
1317

1318
  """
1319
  __slots__ = [
1320
    "name",
1321
    "path",
1322
    "create_script",
1323
    "remove_script",
1324
    "grow_script",
1325
    "attach_script",
1326
    "detach_script",
1327
    "setinfo_script",
1328
    "verify_script",
1329
    "supported_parameters",
1330
    ]
1331

    
1332

    
1333
class NodeHvState(ConfigObject):
1334
  """Hypvervisor state on a node.
1335

1336
  @ivar mem_total: Total amount of memory
1337
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1338
    available)
1339
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1340
    rounding
1341
  @ivar mem_inst: Memory used by instances living on node
1342
  @ivar cpu_total: Total node CPU core count
1343
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1344

1345
  """
1346
  __slots__ = [
1347
    "mem_total",
1348
    "mem_node",
1349
    "mem_hv",
1350
    "mem_inst",
1351
    "cpu_total",
1352
    "cpu_node",
1353
    ] + _TIMESTAMPS
1354

    
1355

    
1356
class NodeDiskState(ConfigObject):
1357
  """Disk state on a node.
1358

1359
  """
1360
  __slots__ = [
1361
    "total",
1362
    "reserved",
1363
    "overhead",
1364
    ] + _TIMESTAMPS
1365

    
1366

    
1367
class Node(TaggableObject):
1368
  """Config object representing a node.
1369

1370
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1371
  @ivar hv_state_static: Hypervisor state overriden by user
1372
  @ivar disk_state: Disk state (e.g. free space)
1373
  @ivar disk_state_static: Disk state overriden by user
1374

1375
  """
1376
  __slots__ = [
1377
    "name",
1378
    "primary_ip",
1379
    "secondary_ip",
1380
    "serial_no",
1381
    "master_candidate",
1382
    "offline",
1383
    "drained",
1384
    "group",
1385
    "master_capable",
1386
    "vm_capable",
1387
    "ndparams",
1388
    "powered",
1389
    "hv_state",
1390
    "hv_state_static",
1391
    "disk_state",
1392
    "disk_state_static",
1393
    ] + _TIMESTAMPS + _UUID
1394

    
1395
  def UpgradeConfig(self):
1396
    """Fill defaults for missing configuration values.
1397

1398
    """
1399
    # pylint: disable=E0203
1400
    # because these are "defined" via slots, not manually
1401
    if self.master_capable is None:
1402
      self.master_capable = True
1403

    
1404
    if self.vm_capable is None:
1405
      self.vm_capable = True
1406

    
1407
    if self.ndparams is None:
1408
      self.ndparams = {}
1409
    # And remove any global parameter
1410
    for key in constants.NDC_GLOBALS:
1411
      if key in self.ndparams:
1412
        logging.warning("Ignoring %s node parameter for node %s",
1413
                        key, self.name)
1414
        del self.ndparams[key]
1415

    
1416
    if self.powered is None:
1417
      self.powered = True
1418

    
1419
  def ToDict(self):
1420
    """Custom function for serializing.
1421

1422
    """
1423
    data = super(Node, self).ToDict()
1424

    
1425
    hv_state = data.get("hv_state", None)
1426
    if hv_state is not None:
1427
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1428

    
1429
    disk_state = data.get("disk_state", None)
1430
    if disk_state is not None:
1431
      data["disk_state"] = \
1432
        dict((key, outils.ContainerToDicts(value))
1433
             for (key, value) in disk_state.items())
1434

    
1435
    return data
1436

    
1437
  @classmethod
1438
  def FromDict(cls, val):
1439
    """Custom function for deserializing.
1440

1441
    """
1442
    obj = super(Node, cls).FromDict(val)
1443

    
1444
    if obj.hv_state is not None:
1445
      obj.hv_state = \
1446
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1447

    
1448
    if obj.disk_state is not None:
1449
      obj.disk_state = \
1450
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1451
             for (key, value) in obj.disk_state.items())
1452

    
1453
    return obj
1454

    
1455

    
1456
class NodeGroup(TaggableObject):
1457
  """Config object representing a node group."""
1458
  __slots__ = [
1459
    "name",
1460
    "members",
1461
    "ndparams",
1462
    "diskparams",
1463
    "ipolicy",
1464
    "serial_no",
1465
    "hv_state_static",
1466
    "disk_state_static",
1467
    "alloc_policy",
1468
    "networks",
1469
    ] + _TIMESTAMPS + _UUID
1470

    
1471
  def ToDict(self):
1472
    """Custom function for nodegroup.
1473

1474
    This discards the members object, which gets recalculated and is only kept
1475
    in memory.
1476

1477
    """
1478
    mydict = super(NodeGroup, self).ToDict()
1479
    del mydict["members"]
1480
    return mydict
1481

    
1482
  @classmethod
1483
  def FromDict(cls, val):
1484
    """Custom function for nodegroup.
1485

1486
    The members slot is initialized to an empty list, upon deserialization.
1487

1488
    """
1489
    obj = super(NodeGroup, cls).FromDict(val)
1490
    obj.members = []
1491
    return obj
1492

    
1493
  def UpgradeConfig(self):
1494
    """Fill defaults for missing configuration values.
1495

1496
    """
1497
    if self.ndparams is None:
1498
      self.ndparams = {}
1499

    
1500
    if self.serial_no is None:
1501
      self.serial_no = 1
1502

    
1503
    if self.alloc_policy is None:
1504
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1505

    
1506
    # We only update mtime, and not ctime, since we would not be able
1507
    # to provide a correct value for creation time.
1508
    if self.mtime is None:
1509
      self.mtime = time.time()
1510

    
1511
    if self.diskparams is None:
1512
      self.diskparams = {}
1513
    if self.ipolicy is None:
1514
      self.ipolicy = MakeEmptyIPolicy()
1515

    
1516
    if self.networks is None:
1517
      self.networks = {}
1518

    
1519
  def FillND(self, node):
1520
    """Return filled out ndparams for L{objects.Node}
1521

1522
    @type node: L{objects.Node}
1523
    @param node: A Node object to fill
1524
    @return a copy of the node's ndparams with defaults filled
1525

1526
    """
1527
    return self.SimpleFillND(node.ndparams)
1528

    
1529
  def SimpleFillND(self, ndparams):
1530
    """Fill a given ndparams dict with defaults.
1531

1532
    @type ndparams: dict
1533
    @param ndparams: the dict to fill
1534
    @rtype: dict
1535
    @return: a copy of the passed in ndparams with missing keys filled
1536
        from the node group defaults
1537

1538
    """
1539
    return FillDict(self.ndparams, ndparams)
1540

    
1541

    
1542
class Cluster(TaggableObject):
1543
  """Config object representing the cluster."""
1544
  __slots__ = [
1545
    "serial_no",
1546
    "rsahostkeypub",
1547
    "dsahostkeypub",
1548
    "highest_used_port",
1549
    "tcpudp_port_pool",
1550
    "mac_prefix",
1551
    "volume_group_name",
1552
    "reserved_lvs",
1553
    "drbd_usermode_helper",
1554
    "default_bridge",
1555
    "default_hypervisor",
1556
    "master_node",
1557
    "master_ip",
1558
    "master_netdev",
1559
    "master_netmask",
1560
    "use_external_mip_script",
1561
    "cluster_name",
1562
    "file_storage_dir",
1563
    "shared_file_storage_dir",
1564
    "enabled_hypervisors",
1565
    "hvparams",
1566
    "ipolicy",
1567
    "os_hvp",
1568
    "beparams",
1569
    "osparams",
1570
    "nicparams",
1571
    "ndparams",
1572
    "diskparams",
1573
    "candidate_pool_size",
1574
    "modify_etc_hosts",
1575
    "modify_ssh_setup",
1576
    "maintain_node_health",
1577
    "uid_pool",
1578
    "default_iallocator",
1579
    "hidden_os",
1580
    "blacklisted_os",
1581
    "primary_ip_family",
1582
    "prealloc_wipe_disks",
1583
    "hv_state_static",
1584
    "disk_state_static",
1585
    "enabled_disk_templates",
1586
    ] + _TIMESTAMPS + _UUID
1587

    
1588
  def UpgradeConfig(self):
1589
    """Fill defaults for missing configuration values.
1590

1591
    """
1592
    # pylint: disable=E0203
1593
    # because these are "defined" via slots, not manually
1594
    if self.hvparams is None:
1595
      self.hvparams = constants.HVC_DEFAULTS
1596
    else:
1597
      for hypervisor in self.hvparams:
1598
        self.hvparams[hypervisor] = FillDict(
1599
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1600

    
1601
    if self.os_hvp is None:
1602
      self.os_hvp = {}
1603

    
1604
    # osparams added before 2.2
1605
    if self.osparams is None:
1606
      self.osparams = {}
1607

    
1608
    self.ndparams = UpgradeNDParams(self.ndparams)
1609

    
1610
    self.beparams = UpgradeGroupedParams(self.beparams,
1611
                                         constants.BEC_DEFAULTS)
1612
    for beparams_group in self.beparams:
1613
      UpgradeBeParams(self.beparams[beparams_group])
1614

    
1615
    migrate_default_bridge = not self.nicparams
1616
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1617
                                          constants.NICC_DEFAULTS)
1618
    if migrate_default_bridge:
1619
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1620
        self.default_bridge
1621

    
1622
    if self.modify_etc_hosts is None:
1623
      self.modify_etc_hosts = True
1624

    
1625
    if self.modify_ssh_setup is None:
1626
      self.modify_ssh_setup = True
1627

    
1628
    # default_bridge is no longer used in 2.1. The slot is left there to
1629
    # support auto-upgrading. It can be removed once we decide to deprecate
1630
    # upgrading straight from 2.0.
1631
    if self.default_bridge is not None:
1632
      self.default_bridge = None
1633

    
1634
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1635
    # code can be removed once upgrading straight from 2.0 is deprecated.
1636
    if self.default_hypervisor is not None:
1637
      self.enabled_hypervisors = ([self.default_hypervisor] +
1638
                                  [hvname for hvname in self.enabled_hypervisors
1639
                                   if hvname != self.default_hypervisor])
1640
      self.default_hypervisor = None
1641

    
1642
    # maintain_node_health added after 2.1.1
1643
    if self.maintain_node_health is None:
1644
      self.maintain_node_health = False
1645

    
1646
    if self.uid_pool is None:
1647
      self.uid_pool = []
1648

    
1649
    if self.default_iallocator is None:
1650
      self.default_iallocator = ""
1651

    
1652
    # reserved_lvs added before 2.2
1653
    if self.reserved_lvs is None:
1654
      self.reserved_lvs = []
1655

    
1656
    # hidden and blacklisted operating systems added before 2.2.1
1657
    if self.hidden_os is None:
1658
      self.hidden_os = []
1659

    
1660
    if self.blacklisted_os is None:
1661
      self.blacklisted_os = []
1662

    
1663
    # primary_ip_family added before 2.3
1664
    if self.primary_ip_family is None:
1665
      self.primary_ip_family = AF_INET
1666

    
1667
    if self.master_netmask is None:
1668
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1669
      self.master_netmask = ipcls.iplen
1670

    
1671
    if self.prealloc_wipe_disks is None:
1672
      self.prealloc_wipe_disks = False
1673

    
1674
    # shared_file_storage_dir added before 2.5
1675
    if self.shared_file_storage_dir is None:
1676
      self.shared_file_storage_dir = ""
1677

    
1678
    if self.use_external_mip_script is None:
1679
      self.use_external_mip_script = False
1680

    
1681
    if self.diskparams:
1682
      self.diskparams = UpgradeDiskParams(self.diskparams)
1683
    else:
1684
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1685

    
1686
    # instance policy added before 2.6
1687
    if self.ipolicy is None:
1688
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1689
    else:
1690
      # we can either make sure to upgrade the ipolicy always, or only
1691
      # do it in some corner cases (e.g. missing keys); note that this
1692
      # will break any removal of keys from the ipolicy dict
1693
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1694
      if wrongkeys:
1695
        # These keys would be silently removed by FillIPolicy()
1696
        msg = ("Cluster instance policy contains spurious keys: %s" %
1697
               utils.CommaJoin(wrongkeys))
1698
        raise errors.ConfigurationError(msg)
1699
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1700

    
1701
  @property
1702
  def primary_hypervisor(self):
1703
    """The first hypervisor is the primary.
1704

1705
    Useful, for example, for L{Node}'s hv/disk state.
1706

1707
    """
1708
    return self.enabled_hypervisors[0]
1709

    
1710
  def ToDict(self):
1711
    """Custom function for cluster.
1712

1713
    """
1714
    mydict = super(Cluster, self).ToDict()
1715

    
1716
    if self.tcpudp_port_pool is None:
1717
      tcpudp_port_pool = []
1718
    else:
1719
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1720

    
1721
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1722

    
1723
    return mydict
1724

    
1725
  @classmethod
1726
  def FromDict(cls, val):
1727
    """Custom function for cluster.
1728

1729
    """
1730
    obj = super(Cluster, cls).FromDict(val)
1731

    
1732
    if obj.tcpudp_port_pool is None:
1733
      obj.tcpudp_port_pool = set()
1734
    elif not isinstance(obj.tcpudp_port_pool, set):
1735
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1736

    
1737
    return obj
1738

    
1739
  def SimpleFillDP(self, diskparams):
1740
    """Fill a given diskparams dict with cluster defaults.
1741

1742
    @param diskparams: The diskparams
1743
    @return: The defaults dict
1744

1745
    """
1746
    return FillDiskParams(self.diskparams, diskparams)
1747

    
1748
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1749
    """Get the default hypervisor parameters for the cluster.
1750

1751
    @param hypervisor: the hypervisor name
1752
    @param os_name: if specified, we'll also update the defaults for this OS
1753
    @param skip_keys: if passed, list of keys not to use
1754
    @return: the defaults dict
1755

1756
    """
1757
    if skip_keys is None:
1758
      skip_keys = []
1759

    
1760
    fill_stack = [self.hvparams.get(hypervisor, {})]
1761
    if os_name is not None:
1762
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1763
      fill_stack.append(os_hvp)
1764

    
1765
    ret_dict = {}
1766
    for o_dict in fill_stack:
1767
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1768

    
1769
    return ret_dict
1770

    
1771
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1772
    """Fill a given hvparams dict with cluster defaults.
1773

1774
    @type hv_name: string
1775
    @param hv_name: the hypervisor to use
1776
    @type os_name: string
1777
    @param os_name: the OS to use for overriding the hypervisor defaults
1778
    @type skip_globals: boolean
1779
    @param skip_globals: if True, the global hypervisor parameters will
1780
        not be filled
1781
    @rtype: dict
1782
    @return: a copy of the given hvparams with missing keys filled from
1783
        the cluster defaults
1784

1785
    """
1786
    if skip_globals:
1787
      skip_keys = constants.HVC_GLOBALS
1788
    else:
1789
      skip_keys = []
1790

    
1791
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1792
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1793

    
1794
  def FillHV(self, instance, skip_globals=False):
1795
    """Fill an instance's hvparams dict with cluster defaults.
1796

1797
    @type instance: L{objects.Instance}
1798
    @param instance: the instance parameter to fill
1799
    @type skip_globals: boolean
1800
    @param skip_globals: if True, the global hypervisor parameters will
1801
        not be filled
1802
    @rtype: dict
1803
    @return: a copy of the instance's hvparams with missing keys filled from
1804
        the cluster defaults
1805

1806
    """
1807
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1808
                             instance.hvparams, skip_globals)
1809

    
1810
  def SimpleFillBE(self, beparams):
1811
    """Fill a given beparams dict with cluster defaults.
1812

1813
    @type beparams: dict
1814
    @param beparams: the dict to fill
1815
    @rtype: dict
1816
    @return: a copy of the passed in beparams with missing keys filled
1817
        from the cluster defaults
1818

1819
    """
1820
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1821

    
1822
  def FillBE(self, instance):
1823
    """Fill an instance's beparams dict with cluster defaults.
1824

1825
    @type instance: L{objects.Instance}
1826
    @param instance: the instance parameter to fill
1827
    @rtype: dict
1828
    @return: a copy of the instance's beparams with missing keys filled from
1829
        the cluster defaults
1830

1831
    """
1832
    return self.SimpleFillBE(instance.beparams)
1833

    
1834
  def SimpleFillNIC(self, nicparams):
1835
    """Fill a given nicparams dict with cluster defaults.
1836

1837
    @type nicparams: dict
1838
    @param nicparams: the dict to fill
1839
    @rtype: dict
1840
    @return: a copy of the passed in nicparams with missing keys filled
1841
        from the cluster defaults
1842

1843
    """
1844
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1845

    
1846
  def SimpleFillOS(self, os_name, os_params):
1847
    """Fill an instance's osparams dict with cluster defaults.
1848

1849
    @type os_name: string
1850
    @param os_name: the OS name to use
1851
    @type os_params: dict
1852
    @param os_params: the dict to fill with default values
1853
    @rtype: dict
1854
    @return: a copy of the instance's osparams with missing keys filled from
1855
        the cluster defaults
1856

1857
    """
1858
    name_only = os_name.split("+", 1)[0]
1859
    # base OS
1860
    result = self.osparams.get(name_only, {})
1861
    # OS with variant
1862
    result = FillDict(result, self.osparams.get(os_name, {}))
1863
    # specified params
1864
    return FillDict(result, os_params)
1865

    
1866
  @staticmethod
1867
  def SimpleFillHvState(hv_state):
1868
    """Fill an hv_state sub dict with cluster defaults.
1869

1870
    """
1871
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1872

    
1873
  @staticmethod
1874
  def SimpleFillDiskState(disk_state):
1875
    """Fill an disk_state sub dict with cluster defaults.
1876

1877
    """
1878
    return FillDict(constants.DS_DEFAULTS, disk_state)
1879

    
1880
  def FillND(self, node, nodegroup):
1881
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1882

1883
    @type node: L{objects.Node}
1884
    @param node: A Node object to fill
1885
    @type nodegroup: L{objects.NodeGroup}
1886
    @param nodegroup: A Node object to fill
1887
    @return a copy of the node's ndparams with defaults filled
1888

1889
    """
1890
    return self.SimpleFillND(nodegroup.FillND(node))
1891

    
1892
  def SimpleFillND(self, ndparams):
1893
    """Fill a given ndparams dict with defaults.
1894

1895
    @type ndparams: dict
1896
    @param ndparams: the dict to fill
1897
    @rtype: dict
1898
    @return: a copy of the passed in ndparams with missing keys filled
1899
        from the cluster defaults
1900

1901
    """
1902
    return FillDict(self.ndparams, ndparams)
1903

    
1904
  def SimpleFillIPolicy(self, ipolicy):
1905
    """ Fill instance policy dict with defaults.
1906

1907
    @type ipolicy: dict
1908
    @param ipolicy: the dict to fill
1909
    @rtype: dict
1910
    @return: a copy of passed ipolicy with missing keys filled from
1911
      the cluster defaults
1912

1913
    """
1914
    return FillIPolicy(self.ipolicy, ipolicy)
1915

    
1916
  def IsDiskTemplateEnabled(self, disk_template):
1917
    """Checks if a particular disk template is enabled.
1918

1919
    """
1920
    return utils.storage.IsDiskTemplateEnabled(
1921
        disk_template, self.enabled_disk_templates)
1922

    
1923
  def IsFileStorageEnabled(self):
1924
    """Checks if file storage is enabled.
1925

1926
    """
1927
    return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
1928

    
1929
  def IsSharedFileStorageEnabled(self):
1930
    """Checks if shared file storage is enabled.
1931

1932
    """
1933
    return utils.storage.IsSharedFileStorageEnabled(
1934
        self.enabled_disk_templates)
1935

    
1936

    
1937
class BlockDevStatus(ConfigObject):
1938
  """Config object representing the status of a block device."""
1939
  __slots__ = [
1940
    "dev_path",
1941
    "major",
1942
    "minor",
1943
    "sync_percent",
1944
    "estimated_time",
1945
    "is_degraded",
1946
    "ldisk_status",
1947
    ]
1948

    
1949

    
1950
class ImportExportStatus(ConfigObject):
1951
  """Config object representing the status of an import or export."""
1952
  __slots__ = [
1953
    "recent_output",
1954
    "listen_port",
1955
    "connected",
1956
    "progress_mbytes",
1957
    "progress_throughput",
1958
    "progress_eta",
1959
    "progress_percent",
1960
    "exit_status",
1961
    "error_message",
1962
    ] + _TIMESTAMPS
1963

    
1964

    
1965
class ImportExportOptions(ConfigObject):
1966
  """Options for import/export daemon
1967

1968
  @ivar key_name: X509 key name (None for cluster certificate)
1969
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1970
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1971
  @ivar magic: Used to ensure the connection goes to the right disk
1972
  @ivar ipv6: Whether to use IPv6
1973
  @ivar connect_timeout: Number of seconds for establishing connection
1974

1975
  """
1976
  __slots__ = [
1977
    "key_name",
1978
    "ca_pem",
1979
    "compress",
1980
    "magic",
1981
    "ipv6",
1982
    "connect_timeout",
1983
    ]
1984

    
1985

    
1986
class ConfdRequest(ConfigObject):
1987
  """Object holding a confd request.
1988

1989
  @ivar protocol: confd protocol version
1990
  @ivar type: confd query type
1991
  @ivar query: query request
1992
  @ivar rsalt: requested reply salt
1993

1994
  """
1995
  __slots__ = [
1996
    "protocol",
1997
    "type",
1998
    "query",
1999
    "rsalt",
2000
    ]
2001

    
2002

    
2003
class ConfdReply(ConfigObject):
2004
  """Object holding a confd reply.
2005

2006
  @ivar protocol: confd protocol version
2007
  @ivar status: reply status code (ok, error)
2008
  @ivar answer: confd query reply
2009
  @ivar serial: configuration serial number
2010

2011
  """
2012
  __slots__ = [
2013
    "protocol",
2014
    "status",
2015
    "answer",
2016
    "serial",
2017
    ]
2018

    
2019

    
2020
class QueryFieldDefinition(ConfigObject):
2021
  """Object holding a query field definition.
2022

2023
  @ivar name: Field name
2024
  @ivar title: Human-readable title
2025
  @ivar kind: Field type
2026
  @ivar doc: Human-readable description
2027

2028
  """
2029
  __slots__ = [
2030
    "name",
2031
    "title",
2032
    "kind",
2033
    "doc",
2034
    ]
2035

    
2036

    
2037
class _QueryResponseBase(ConfigObject):
2038
  __slots__ = [
2039
    "fields",
2040
    ]
2041

    
2042
  def ToDict(self):
2043
    """Custom function for serializing.
2044

2045
    """
2046
    mydict = super(_QueryResponseBase, self).ToDict()
2047
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2048
    return mydict
2049

    
2050
  @classmethod
2051
  def FromDict(cls, val):
2052
    """Custom function for de-serializing.
2053

2054
    """
2055
    obj = super(_QueryResponseBase, cls).FromDict(val)
2056
    obj.fields = \
2057
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2058
    return obj
2059

    
2060

    
2061
class QueryResponse(_QueryResponseBase):
2062
  """Object holding the response to a query.
2063

2064
  @ivar fields: List of L{QueryFieldDefinition} objects
2065
  @ivar data: Requested data
2066

2067
  """
2068
  __slots__ = [
2069
    "data",
2070
    ]
2071

    
2072

    
2073
class QueryFieldsRequest(ConfigObject):
2074
  """Object holding a request for querying available fields.
2075

2076
  """
2077
  __slots__ = [
2078
    "what",
2079
    "fields",
2080
    ]
2081

    
2082

    
2083
class QueryFieldsResponse(_QueryResponseBase):
2084
  """Object holding the response to a query for fields.
2085

2086
  @ivar fields: List of L{QueryFieldDefinition} objects
2087

2088
  """
2089
  __slots__ = []
2090

    
2091

    
2092
class MigrationStatus(ConfigObject):
2093
  """Object holding the status of a migration.
2094

2095
  """
2096
  __slots__ = [
2097
    "status",
2098
    "transferred_ram",
2099
    "total_ram",
2100
    ]
2101

    
2102

    
2103
class InstanceConsole(ConfigObject):
2104
  """Object describing how to access the console of an instance.
2105

2106
  """
2107
  __slots__ = [
2108
    "instance",
2109
    "kind",
2110
    "message",
2111
    "host",
2112
    "port",
2113
    "user",
2114
    "command",
2115
    "display",
2116
    ]
2117

    
2118
  def Validate(self):
2119
    """Validates contents of this object.
2120

2121
    """
2122
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2123
    assert self.instance, "Missing instance name"
2124
    assert self.message or self.kind in [constants.CONS_SSH,
2125
                                         constants.CONS_SPICE,
2126
                                         constants.CONS_VNC]
2127
    assert self.host or self.kind == constants.CONS_MESSAGE
2128
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2129
                                      constants.CONS_SSH]
2130
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2131
                                      constants.CONS_SPICE,
2132
                                      constants.CONS_VNC]
2133
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2134
                                         constants.CONS_SPICE,
2135
                                         constants.CONS_VNC]
2136
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2137
                                         constants.CONS_SPICE,
2138
                                         constants.CONS_SSH]
2139
    return True
2140

    
2141

    
2142
class Network(TaggableObject):
2143
  """Object representing a network definition for ganeti.
2144

2145
  """
2146
  __slots__ = [
2147
    "name",
2148
    "serial_no",
2149
    "mac_prefix",
2150
    "network",
2151
    "network6",
2152
    "gateway",
2153
    "gateway6",
2154
    "reservations",
2155
    "ext_reservations",
2156
    ] + _TIMESTAMPS + _UUID
2157

    
2158
  def HooksDict(self, prefix=""):
2159
    """Export a dictionary used by hooks with a network's information.
2160

2161
    @type prefix: String
2162
    @param prefix: Prefix to prepend to the dict entries
2163

2164
    """
2165
    result = {
2166
      "%sNETWORK_NAME" % prefix: self.name,
2167
      "%sNETWORK_UUID" % prefix: self.uuid,
2168
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2169
    }
2170
    if self.network:
2171
      result["%sNETWORK_SUBNET" % prefix] = self.network
2172
    if self.gateway:
2173
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2174
    if self.network6:
2175
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2176
    if self.gateway6:
2177
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2178
    if self.mac_prefix:
2179
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2180

    
2181
    return result
2182

    
2183
  @classmethod
2184
  def FromDict(cls, val):
2185
    """Custom function for networks.
2186

2187
    Remove deprecated network_type and family.
2188

2189
    """
2190
    if "network_type" in val:
2191
      del val["network_type"]
2192
    if "family" in val:
2193
      del val["family"]
2194
    obj = super(Network, cls).FromDict(val)
2195
    return obj
2196

    
2197

    
2198
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2199
  """Simple wrapper over ConfigParse that allows serialization.
2200

2201
  This class is basically ConfigParser.SafeConfigParser with two
2202
  additional methods that allow it to serialize/unserialize to/from a
2203
  buffer.
2204

2205
  """
2206
  def Dumps(self):
2207
    """Dump this instance and return the string representation."""
2208
    buf = StringIO()
2209
    self.write(buf)
2210
    return buf.getvalue()
2211

    
2212
  @classmethod
2213
  def Loads(cls, data):
2214
    """Load data from a string."""
2215
    buf = StringIO(data)
2216
    cfp = cls()
2217
    cfp.readfp(buf)
2218
    return cfp
2219

    
2220

    
2221
class LvmPvInfo(ConfigObject):
2222
  """Information about an LVM physical volume (PV).
2223

2224
  @type name: string
2225
  @ivar name: name of the PV
2226
  @type vg_name: string
2227
  @ivar vg_name: name of the volume group containing the PV
2228
  @type size: float
2229
  @ivar size: size of the PV in MiB
2230
  @type free: float
2231
  @ivar free: free space in the PV, in MiB
2232
  @type attributes: string
2233
  @ivar attributes: PV attributes
2234
  @type lv_list: list of strings
2235
  @ivar lv_list: names of the LVs hosted on the PV
2236
  """
2237
  __slots__ = [
2238
    "name",
2239
    "vg_name",
2240
    "size",
2241
    "free",
2242
    "attributes",
2243
    "lv_list"
2244
    ]
2245

    
2246
  def IsEmpty(self):
2247
    """Is this PV empty?
2248

2249
    """
2250
    return self.size <= (self.free + 1)
2251

    
2252
  def IsAllocatable(self):
2253
    """Is this PV allocatable?
2254

2255
    """
2256
    return ("a" in self.attributes)