Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 49a924bc

History | View | Annotate | Download (64.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def FillIPolicy(default_ipolicy, custom_ipolicy):
86
  """Fills an instance policy with defaults.
87

88
  """
89
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90
  ret_dict = copy.deepcopy(custom_ipolicy)
91
  for key in default_ipolicy:
92
    if key not in ret_dict:
93
      ret_dict[key] = copy.deepcopy(default_ipolicy[key])
94
    elif key == constants.ISPECS_STD:
95
      ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
96
  return ret_dict
97

    
98

    
99
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
100
  """Fills the disk parameter defaults.
101

102
  @see: L{FillDict} for parameters and return value
103

104
  """
105
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
106

    
107
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
108
                             skip_keys=skip_keys))
109
              for dt in constants.DISK_TEMPLATES)
110

    
111

    
112
def UpgradeGroupedParams(target, defaults):
113
  """Update all groups for the target parameter.
114

115
  @type target: dict of dicts
116
  @param target: {group: {parameter: value}}
117
  @type defaults: dict
118
  @param defaults: default parameter values
119

120
  """
121
  if target is None:
122
    target = {constants.PP_DEFAULT: defaults}
123
  else:
124
    for group in target:
125
      target[group] = FillDict(defaults, target[group])
126
  return target
127

    
128

    
129
def UpgradeBeParams(target):
130
  """Update the be parameters dict to the new format.
131

132
  @type target: dict
133
  @param target: "be" parameters dict
134

135
  """
136
  if constants.BE_MEMORY in target:
137
    memory = target[constants.BE_MEMORY]
138
    target[constants.BE_MAXMEM] = memory
139
    target[constants.BE_MINMEM] = memory
140
    del target[constants.BE_MEMORY]
141

    
142

    
143
def UpgradeDiskParams(diskparams):
144
  """Upgrade the disk parameters.
145

146
  @type diskparams: dict
147
  @param diskparams: disk parameters to upgrade
148
  @rtype: dict
149
  @return: the upgraded disk parameters dict
150

151
  """
152
  if not diskparams:
153
    result = {}
154
  else:
155
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
156

    
157
  return result
158

    
159

    
160
def UpgradeNDParams(ndparams):
161
  """Upgrade ndparams structure.
162

163
  @type ndparams: dict
164
  @param ndparams: disk parameters to upgrade
165
  @rtype: dict
166
  @return: the upgraded node parameters dict
167

168
  """
169
  if ndparams is None:
170
    ndparams = {}
171

    
172
  if (constants.ND_OOB_PROGRAM in ndparams and
173
      ndparams[constants.ND_OOB_PROGRAM] is None):
174
    # will be reset by the line below
175
    del ndparams[constants.ND_OOB_PROGRAM]
176
  return FillDict(constants.NDC_DEFAULTS, ndparams)
177

    
178

    
179
def MakeEmptyIPolicy():
180
  """Create empty IPolicy dictionary.
181

182
  """
183
  return {}
184

    
185

    
186
class ConfigObject(outils.ValidatedSlots):
187
  """A generic config object.
188

189
  It has the following properties:
190

191
    - provides somewhat safe recursive unpickling and pickling for its classes
192
    - unset attributes which are defined in slots are always returned
193
      as None instead of raising an error
194

195
  Classes derived from this must always declare __slots__ (we use many
196
  config objects and the memory reduction is useful)
197

198
  """
199
  __slots__ = []
200

    
201
  def __getattr__(self, name):
202
    if name not in self.GetAllSlots():
203
      raise AttributeError("Invalid object attribute %s.%s" %
204
                           (type(self).__name__, name))
205
    return None
206

    
207
  def __setstate__(self, state):
208
    slots = self.GetAllSlots()
209
    for name in state:
210
      if name in slots:
211
        setattr(self, name, state[name])
212

    
213
  def Validate(self):
214
    """Validates the slots.
215

216
    """
217

    
218
  def ToDict(self):
219
    """Convert to a dict holding only standard python types.
220

221
    The generic routine just dumps all of this object's attributes in
222
    a dict. It does not work if the class has children who are
223
    ConfigObjects themselves (e.g. the nics list in an Instance), in
224
    which case the object should subclass the function in order to
225
    make sure all objects returned are only standard python types.
226

227
    """
228
    result = {}
229
    for name in self.GetAllSlots():
230
      value = getattr(self, name, None)
231
      if value is not None:
232
        result[name] = value
233
    return result
234

    
235
  __getstate__ = ToDict
236

    
237
  @classmethod
238
  def FromDict(cls, val):
239
    """Create an object from a dictionary.
240

241
    This generic routine takes a dict, instantiates a new instance of
242
    the given class, and sets attributes based on the dict content.
243

244
    As for `ToDict`, this does not work if the class has children
245
    who are ConfigObjects themselves (e.g. the nics list in an
246
    Instance), in which case the object should subclass the function
247
    and alter the objects.
248

249
    """
250
    if not isinstance(val, dict):
251
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
252
                                      " expected dict, got %s" % type(val))
253
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
254
    obj = cls(**val_str) # pylint: disable=W0142
255
    return obj
256

    
257
  def Copy(self):
258
    """Makes a deep copy of the current object and its children.
259

260
    """
261
    dict_form = self.ToDict()
262
    clone_obj = self.__class__.FromDict(dict_form)
263
    return clone_obj
264

    
265
  def __repr__(self):
266
    """Implement __repr__ for ConfigObjects."""
267
    return repr(self.ToDict())
268

    
269
  def UpgradeConfig(self):
270
    """Fill defaults for missing configuration values.
271

272
    This method will be called at configuration load time, and its
273
    implementation will be object dependent.
274

275
    """
276
    pass
277

    
278

    
279
class TaggableObject(ConfigObject):
280
  """An generic class supporting tags.
281

282
  """
283
  __slots__ = ["tags"]
284
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
285

    
286
  @classmethod
287
  def ValidateTag(cls, tag):
288
    """Check if a tag is valid.
289

290
    If the tag is invalid, an errors.TagError will be raised. The
291
    function has no return value.
292

293
    """
294
    if not isinstance(tag, basestring):
295
      raise errors.TagError("Invalid tag type (not a string)")
296
    if len(tag) > constants.MAX_TAG_LEN:
297
      raise errors.TagError("Tag too long (>%d characters)" %
298
                            constants.MAX_TAG_LEN)
299
    if not tag:
300
      raise errors.TagError("Tags cannot be empty")
301
    if not cls.VALID_TAG_RE.match(tag):
302
      raise errors.TagError("Tag contains invalid characters")
303

    
304
  def GetTags(self):
305
    """Return the tags list.
306

307
    """
308
    tags = getattr(self, "tags", None)
309
    if tags is None:
310
      tags = self.tags = set()
311
    return tags
312

    
313
  def AddTag(self, tag):
314
    """Add a new tag.
315

316
    """
317
    self.ValidateTag(tag)
318
    tags = self.GetTags()
319
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
320
      raise errors.TagError("Too many tags")
321
    self.GetTags().add(tag)
322

    
323
  def RemoveTag(self, tag):
324
    """Remove a tag.
325

326
    """
327
    self.ValidateTag(tag)
328
    tags = self.GetTags()
329
    try:
330
      tags.remove(tag)
331
    except KeyError:
332
      raise errors.TagError("Tag not found")
333

    
334
  def ToDict(self):
335
    """Taggable-object-specific conversion to standard python types.
336

337
    This replaces the tags set with a list.
338

339
    """
340
    bo = super(TaggableObject, self).ToDict()
341

    
342
    tags = bo.get("tags", None)
343
    if isinstance(tags, set):
344
      bo["tags"] = list(tags)
345
    return bo
346

    
347
  @classmethod
348
  def FromDict(cls, val):
349
    """Custom function for instances.
350

351
    """
352
    obj = super(TaggableObject, cls).FromDict(val)
353
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
354
      obj.tags = set(obj.tags)
355
    return obj
356

    
357

    
358
class MasterNetworkParameters(ConfigObject):
359
  """Network configuration parameters for the master
360

361
  @ivar uuid: master nodes UUID
362
  @ivar ip: master IP
363
  @ivar netmask: master netmask
364
  @ivar netdev: master network device
365
  @ivar ip_family: master IP family
366

367
  """
368
  __slots__ = [
369
    "uuid",
370
    "ip",
371
    "netmask",
372
    "netdev",
373
    "ip_family",
374
    ]
375

    
376

    
377
class ConfigData(ConfigObject):
378
  """Top-level config object."""
379
  __slots__ = [
380
    "version",
381
    "cluster",
382
    "nodes",
383
    "nodegroups",
384
    "instances",
385
    "networks",
386
    "serial_no",
387
    ] + _TIMESTAMPS
388

    
389
  def ToDict(self):
390
    """Custom function for top-level config data.
391

392
    This just replaces the list of instances, nodes and the cluster
393
    with standard python types.
394

395
    """
396
    mydict = super(ConfigData, self).ToDict()
397
    mydict["cluster"] = mydict["cluster"].ToDict()
398
    for key in "nodes", "instances", "nodegroups", "networks":
399
      mydict[key] = outils.ContainerToDicts(mydict[key])
400

    
401
    return mydict
402

    
403
  @classmethod
404
  def FromDict(cls, val):
405
    """Custom function for top-level config data
406

407
    """
408
    obj = super(ConfigData, cls).FromDict(val)
409
    obj.cluster = Cluster.FromDict(obj.cluster)
410
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
411
    obj.instances = \
412
      outils.ContainerFromDicts(obj.instances, dict, Instance)
413
    obj.nodegroups = \
414
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
415
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
416
    return obj
417

    
418
  def HasAnyDiskOfType(self, dev_type):
419
    """Check if in there is at disk of the given type in the configuration.
420

421
    @type dev_type: L{constants.LDS_BLOCK}
422
    @param dev_type: the type to look for
423
    @rtype: boolean
424
    @return: boolean indicating if a disk of the given type was found or not
425

426
    """
427
    for instance in self.instances.values():
428
      for disk in instance.disks:
429
        if disk.IsBasedOnDiskType(dev_type):
430
          return True
431
    return False
432

    
433
  def UpgradeConfig(self):
434
    """Fill defaults for missing configuration values.
435

436
    """
437
    self.cluster.UpgradeConfig()
438
    for node in self.nodes.values():
439
      node.UpgradeConfig()
440
    for instance in self.instances.values():
441
      instance.UpgradeConfig()
442
    if self.nodegroups is None:
443
      self.nodegroups = {}
444
    for nodegroup in self.nodegroups.values():
445
      nodegroup.UpgradeConfig()
446
    if self.cluster.drbd_usermode_helper is None:
447
      # To decide if we set an helper let's check if at least one instance has
448
      # a DRBD disk. This does not cover all the possible scenarios but it
449
      # gives a good approximation.
450
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
451
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
452
    if self.networks is None:
453
      self.networks = {}
454
    for network in self.networks.values():
455
      network.UpgradeConfig()
456
    self._UpgradeEnabledDiskTemplates()
457

    
458
  def _UpgradeEnabledDiskTemplates(self):
459
    """Upgrade the cluster's enabled disk templates by inspecting the currently
460
       enabled and/or used disk templates.
461

462
    """
463
    # enabled_disk_templates in the cluster config were introduced in 2.8.
464
    # Remove this code once upgrading from earlier versions is deprecated.
465
    if not self.cluster.enabled_disk_templates:
466
      template_set = \
467
        set([inst.disk_template for inst in self.instances.values()])
468
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
469
      if self.cluster.volume_group_name:
470
        template_set.add(constants.DT_DRBD8)
471
        template_set.add(constants.DT_PLAIN)
472
      # FIXME: Adapt this when dis/enabling at configure time is removed.
473
      # Enable 'sharedfile', if they are enabled, even though they might
474
      # currently not be used.
475
      if constants.ENABLE_SHARED_FILE_STORAGE:
476
        template_set.add(constants.DT_SHARED_FILE)
477
      # Set enabled_disk_templates to the inferred disk templates. Order them
478
      # according to a preference list that is based on Ganeti's history of
479
      # supported disk templates.
480
      self.cluster.enabled_disk_templates = []
481
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
482
        if preferred_template in template_set:
483
          self.cluster.enabled_disk_templates.append(preferred_template)
484
          template_set.remove(preferred_template)
485
      self.cluster.enabled_disk_templates.extend(list(template_set))
486

    
487

    
488
class NIC(ConfigObject):
489
  """Config object representing a network card."""
490
  __slots__ = ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID
491

    
492
  @classmethod
493
  def CheckParameterSyntax(cls, nicparams):
494
    """Check the given parameters for validity.
495

496
    @type nicparams:  dict
497
    @param nicparams: dictionary with parameter names/value
498
    @raise errors.ConfigurationError: when a parameter is not valid
499

500
    """
501
    mode = nicparams[constants.NIC_MODE]
502
    if (mode not in constants.NIC_VALID_MODES and
503
        mode != constants.VALUE_AUTO):
504
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
505

    
506
    if (mode == constants.NIC_MODE_BRIDGED and
507
        not nicparams[constants.NIC_LINK]):
508
      raise errors.ConfigurationError("Missing bridged NIC link")
509

    
510

    
511
class Disk(ConfigObject):
512
  """Config object representing a block device."""
513
  __slots__ = (["name", "dev_type", "logical_id", "physical_id",
514
                "children", "iv_name", "size", "mode", "params", "spindles"] +
515
               _UUID)
516

    
517
  def CreateOnSecondary(self):
518
    """Test if this device needs to be created on a secondary node."""
519
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
520

    
521
  def AssembleOnSecondary(self):
522
    """Test if this device needs to be assembled on a secondary node."""
523
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
524

    
525
  def OpenOnSecondary(self):
526
    """Test if this device needs to be opened on a secondary node."""
527
    return self.dev_type in (constants.LD_LV,)
528

    
529
  def StaticDevPath(self):
530
    """Return the device path if this device type has a static one.
531

532
    Some devices (LVM for example) live always at the same /dev/ path,
533
    irrespective of their status. For such devices, we return this
534
    path, for others we return None.
535

536
    @warning: The path returned is not a normalized pathname; callers
537
        should check that it is a valid path.
538

539
    """
540
    if self.dev_type == constants.LD_LV:
541
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
542
    elif self.dev_type == constants.LD_BLOCKDEV:
543
      return self.logical_id[1]
544
    elif self.dev_type == constants.LD_RBD:
545
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
546
    return None
547

    
548
  def ChildrenNeeded(self):
549
    """Compute the needed number of children for activation.
550

551
    This method will return either -1 (all children) or a positive
552
    number denoting the minimum number of children needed for
553
    activation (only mirrored devices will usually return >=0).
554

555
    Currently, only DRBD8 supports diskless activation (therefore we
556
    return 0), for all other we keep the previous semantics and return
557
    -1.
558

559
    """
560
    if self.dev_type == constants.LD_DRBD8:
561
      return 0
562
    return -1
563

    
564
  def IsBasedOnDiskType(self, dev_type):
565
    """Check if the disk or its children are based on the given type.
566

567
    @type dev_type: L{constants.LDS_BLOCK}
568
    @param dev_type: the type to look for
569
    @rtype: boolean
570
    @return: boolean indicating if a device of the given type was found or not
571

572
    """
573
    if self.children:
574
      for child in self.children:
575
        if child.IsBasedOnDiskType(dev_type):
576
          return True
577
    return self.dev_type == dev_type
578

    
579
  def GetNodes(self, node_uuid):
580
    """This function returns the nodes this device lives on.
581

582
    Given the node on which the parent of the device lives on (or, in
583
    case of a top-level device, the primary node of the devices'
584
    instance), this function will return a list of nodes on which this
585
    devices needs to (or can) be assembled.
586

587
    """
588
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
589
                         constants.LD_BLOCKDEV, constants.LD_RBD,
590
                         constants.LD_EXT]:
591
      result = [node_uuid]
592
    elif self.dev_type in constants.LDS_DRBD:
593
      result = [self.logical_id[0], self.logical_id[1]]
594
      if node_uuid not in result:
595
        raise errors.ConfigurationError("DRBD device passed unknown node")
596
    else:
597
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
598
    return result
599

    
600
  def ComputeNodeTree(self, parent_node_uuid):
601
    """Compute the node/disk tree for this disk and its children.
602

603
    This method, given the node on which the parent disk lives, will
604
    return the list of all (node UUID, disk) pairs which describe the disk
605
    tree in the most compact way. For example, a drbd/lvm stack
606
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
607
    which represents all the top-level devices on the nodes.
608

609
    """
610
    my_nodes = self.GetNodes(parent_node_uuid)
611
    result = [(node, self) for node in my_nodes]
612
    if not self.children:
613
      # leaf device
614
      return result
615
    for node in my_nodes:
616
      for child in self.children:
617
        child_result = child.ComputeNodeTree(node)
618
        if len(child_result) == 1:
619
          # child (and all its descendants) is simple, doesn't split
620
          # over multiple hosts, so we don't need to describe it, our
621
          # own entry for this node describes it completely
622
          continue
623
        else:
624
          # check if child nodes differ from my nodes; note that
625
          # subdisk can differ from the child itself, and be instead
626
          # one of its descendants
627
          for subnode, subdisk in child_result:
628
            if subnode not in my_nodes:
629
              result.append((subnode, subdisk))
630
            # otherwise child is under our own node, so we ignore this
631
            # entry (but probably the other results in the list will
632
            # be different)
633
    return result
634

    
635
  def ComputeGrowth(self, amount):
636
    """Compute the per-VG growth requirements.
637

638
    This only works for VG-based disks.
639

640
    @type amount: integer
641
    @param amount: the desired increase in (user-visible) disk space
642
    @rtype: dict
643
    @return: a dictionary of volume-groups and the required size
644

645
    """
646
    if self.dev_type == constants.LD_LV:
647
      return {self.logical_id[0]: amount}
648
    elif self.dev_type == constants.LD_DRBD8:
649
      if self.children:
650
        return self.children[0].ComputeGrowth(amount)
651
      else:
652
        return {}
653
    else:
654
      # Other disk types do not require VG space
655
      return {}
656

    
657
  def RecordGrow(self, amount):
658
    """Update the size of this disk after growth.
659

660
    This method recurses over the disks's children and updates their
661
    size correspondigly. The method needs to be kept in sync with the
662
    actual algorithms from bdev.
663

664
    """
665
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
666
                         constants.LD_RBD, constants.LD_EXT):
667
      self.size += amount
668
    elif self.dev_type == constants.LD_DRBD8:
669
      if self.children:
670
        self.children[0].RecordGrow(amount)
671
      self.size += amount
672
    else:
673
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
674
                                   " disk type %s" % self.dev_type)
675

    
676
  def Update(self, size=None, mode=None, spindles=None):
677
    """Apply changes to size, spindles and mode.
678

679
    """
680
    if self.dev_type == constants.LD_DRBD8:
681
      if self.children:
682
        self.children[0].Update(size=size, mode=mode)
683
    else:
684
      assert not self.children
685

    
686
    if size is not None:
687
      self.size = size
688
    if mode is not None:
689
      self.mode = mode
690
    if spindles is not None:
691
      self.spindles = spindles
692

    
693
  def UnsetSize(self):
694
    """Sets recursively the size to zero for the disk and its children.
695

696
    """
697
    if self.children:
698
      for child in self.children:
699
        child.UnsetSize()
700
    self.size = 0
701

    
702
  def SetPhysicalID(self, target_node_uuid, nodes_ip):
703
    """Convert the logical ID to the physical ID.
704

705
    This is used only for drbd, which needs ip/port configuration.
706

707
    The routine descends down and updates its children also, because
708
    this helps when the only the top device is passed to the remote
709
    node.
710

711
    Arguments:
712
      - target_node_uuid: the node UUID we wish to configure for
713
      - nodes_ip: a mapping of node name to ip
714

715
    The target_node must exist in in nodes_ip, and must be one of the
716
    nodes in the logical ID for each of the DRBD devices encountered
717
    in the disk tree.
718

719
    """
720
    if self.children:
721
      for child in self.children:
722
        child.SetPhysicalID(target_node_uuid, nodes_ip)
723

    
724
    if self.logical_id is None and self.physical_id is not None:
725
      return
726
    if self.dev_type in constants.LDS_DRBD:
727
      pnode_uuid, snode_uuid, port, pminor, sminor, secret = self.logical_id
728
      if target_node_uuid not in (pnode_uuid, snode_uuid):
729
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
730
                                        target_node_uuid)
731
      pnode_ip = nodes_ip.get(pnode_uuid, None)
732
      snode_ip = nodes_ip.get(snode_uuid, None)
733
      if pnode_ip is None or snode_ip is None:
734
        raise errors.ConfigurationError("Can't find primary or secondary node"
735
                                        " for %s" % str(self))
736
      p_data = (pnode_ip, port)
737
      s_data = (snode_ip, port)
738
      if pnode_uuid == target_node_uuid:
739
        self.physical_id = p_data + s_data + (pminor, secret)
740
      else: # it must be secondary, we tested above
741
        self.physical_id = s_data + p_data + (sminor, secret)
742
    else:
743
      self.physical_id = self.logical_id
744
    return
745

    
746
  def ToDict(self):
747
    """Disk-specific conversion to standard python types.
748

749
    This replaces the children lists of objects with lists of
750
    standard python types.
751

752
    """
753
    bo = super(Disk, self).ToDict()
754

    
755
    for attr in ("children",):
756
      alist = bo.get(attr, None)
757
      if alist:
758
        bo[attr] = outils.ContainerToDicts(alist)
759
    return bo
760

    
761
  @classmethod
762
  def FromDict(cls, val):
763
    """Custom function for Disks
764

765
    """
766
    obj = super(Disk, cls).FromDict(val)
767
    if obj.children:
768
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
769
    if obj.logical_id and isinstance(obj.logical_id, list):
770
      obj.logical_id = tuple(obj.logical_id)
771
    if obj.physical_id and isinstance(obj.physical_id, list):
772
      obj.physical_id = tuple(obj.physical_id)
773
    if obj.dev_type in constants.LDS_DRBD:
774
      # we need a tuple of length six here
775
      if len(obj.logical_id) < 6:
776
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
777
    return obj
778

    
779
  def __str__(self):
780
    """Custom str() formatter for disks.
781

782
    """
783
    if self.dev_type == constants.LD_LV:
784
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
785
    elif self.dev_type in constants.LDS_DRBD:
786
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
787
      val = "<DRBD8("
788
      if self.physical_id is None:
789
        phy = "unconfigured"
790
      else:
791
        phy = ("configured as %s:%s %s:%s" %
792
               (self.physical_id[0], self.physical_id[1],
793
                self.physical_id[2], self.physical_id[3]))
794

    
795
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
796
              (node_a, minor_a, node_b, minor_b, port, phy))
797
      if self.children and self.children.count(None) == 0:
798
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
799
      else:
800
        val += "no local storage"
801
    else:
802
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
803
             (self.dev_type, self.logical_id, self.physical_id, self.children))
804
    if self.iv_name is None:
805
      val += ", not visible"
806
    else:
807
      val += ", visible as /dev/%s" % self.iv_name
808
    if self.spindles is not None:
809
      val += ", spindles=%s" % self.spindles
810
    if isinstance(self.size, int):
811
      val += ", size=%dm)>" % self.size
812
    else:
813
      val += ", size='%s')>" % (self.size,)
814
    return val
815

    
816
  def Verify(self):
817
    """Checks that this disk is correctly configured.
818

819
    """
820
    all_errors = []
821
    if self.mode not in constants.DISK_ACCESS_SET:
822
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
823
    return all_errors
824

    
825
  def UpgradeConfig(self):
826
    """Fill defaults for missing configuration values.
827

828
    """
829
    if self.children:
830
      for child in self.children:
831
        child.UpgradeConfig()
832

    
833
    # FIXME: Make this configurable in Ganeti 2.7
834
    self.params = {}
835
    # add here config upgrade for this disk
836

    
837
  @staticmethod
838
  def ComputeLDParams(disk_template, disk_params):
839
    """Computes Logical Disk parameters from Disk Template parameters.
840

841
    @type disk_template: string
842
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
843
    @type disk_params: dict
844
    @param disk_params: disk template parameters;
845
                        dict(template_name -> parameters
846
    @rtype: list(dict)
847
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
848
      contains the LD parameters of the node. The tree is flattened in-order.
849

850
    """
851
    if disk_template not in constants.DISK_TEMPLATES:
852
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
853

    
854
    assert disk_template in disk_params
855

    
856
    result = list()
857
    dt_params = disk_params[disk_template]
858
    if disk_template == constants.DT_DRBD8:
859
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
860
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
861
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
862
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
863
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
864
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
865
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
866
        constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
867
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
868
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
869
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
870
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
871
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
872
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
873
        }))
874

    
875
      # data LV
876
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
877
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
878
        }))
879

    
880
      # metadata LV
881
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
882
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
883
        }))
884

    
885
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
886
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
887

    
888
    elif disk_template == constants.DT_PLAIN:
889
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
890
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
891
        }))
892

    
893
    elif disk_template == constants.DT_BLOCK:
894
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
895

    
896
    elif disk_template == constants.DT_RBD:
897
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
898
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
899
        }))
900

    
901
    elif disk_template == constants.DT_EXT:
902
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
903

    
904
    return result
905

    
906

    
907
class InstancePolicy(ConfigObject):
908
  """Config object representing instance policy limits dictionary.
909

910
  Note that this object is not actually used in the config, it's just
911
  used as a placeholder for a few functions.
912

913
  """
914
  @classmethod
915
  def CheckParameterSyntax(cls, ipolicy, check_std):
916
    """ Check the instance policy for validity.
917

918
    @type ipolicy: dict
919
    @param ipolicy: dictionary with min/max/std specs and policies
920
    @type check_std: bool
921
    @param check_std: Whether to check std value or just assume compliance
922
    @raise errors.ConfigurationError: when the policy is not legal
923

924
    """
925
    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
926
    if constants.IPOLICY_DTS in ipolicy:
927
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
928
    for key in constants.IPOLICY_PARAMETERS:
929
      if key in ipolicy:
930
        InstancePolicy.CheckParameter(key, ipolicy[key])
931
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
932
    if wrong_keys:
933
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
934
                                      utils.CommaJoin(wrong_keys))
935

    
936
  @classmethod
937
  def _CheckIncompleteSpec(cls, spec, keyname):
938
    missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
939
    if missing_params:
940
      msg = ("Missing instance specs parameters for %s: %s" %
941
             (keyname, utils.CommaJoin(missing_params)))
942
      raise errors.ConfigurationError(msg)
943

    
944
  @classmethod
945
  def CheckISpecSyntax(cls, ipolicy, check_std):
946
    """Check the instance policy specs for validity.
947

948
    @type ipolicy: dict
949
    @param ipolicy: dictionary with min/max/std specs
950
    @type check_std: bool
951
    @param check_std: Whether to check std value or just assume compliance
952
    @raise errors.ConfigurationError: when specs are not valid
953

954
    """
955
    if constants.ISPECS_MINMAX not in ipolicy:
956
      # Nothing to check
957
      return
958

    
959
    if check_std and constants.ISPECS_STD not in ipolicy:
960
      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
961
      raise errors.ConfigurationError(msg)
962
    stdspec = ipolicy.get(constants.ISPECS_STD)
963
    if check_std:
964
      InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
965

    
966
    if not ipolicy[constants.ISPECS_MINMAX]:
967
      raise errors.ConfigurationError("Empty minmax specifications")
968
    std_is_good = False
969
    for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
970
      missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
971
      if missing:
972
        msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
973
        raise errors.ConfigurationError(msg)
974
      for (key, spec) in minmaxspecs.items():
975
        InstancePolicy._CheckIncompleteSpec(spec, key)
976

    
977
      spec_std_ok = True
978
      for param in constants.ISPECS_PARAMETERS:
979
        par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
980
                                                           param, check_std)
981
        spec_std_ok = spec_std_ok and par_std_ok
982
      std_is_good = std_is_good or spec_std_ok
983
    if not std_is_good:
984
      raise errors.ConfigurationError("Invalid std specifications")
985

    
986
  @classmethod
987
  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
988
    """Check the instance policy specs for validity on a given key.
989

990
    We check if the instance specs makes sense for a given key, that is
991
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
992

993
    @type minmaxspecs: dict
994
    @param minmaxspecs: dictionary with min and max instance spec
995
    @type stdspec: dict
996
    @param stdspec: dictionary with standard instance spec
997
    @type name: string
998
    @param name: what are the limits for
999
    @type check_std: bool
1000
    @param check_std: Whether to check std value or just assume compliance
1001
    @rtype: bool
1002
    @return: C{True} when specs are valid, C{False} when standard spec for the
1003
        given name is not valid
1004
    @raise errors.ConfigurationError: when min/max specs for the given name
1005
        are not valid
1006

1007
    """
1008
    minspec = minmaxspecs[constants.ISPECS_MIN]
1009
    maxspec = minmaxspecs[constants.ISPECS_MAX]
1010
    min_v = minspec[name]
1011
    max_v = maxspec[name]
1012

    
1013
    if min_v > max_v:
1014
      err = ("Invalid specification of min/max values for %s: %s/%s" %
1015
             (name, min_v, max_v))
1016
      raise errors.ConfigurationError(err)
1017
    elif check_std:
1018
      std_v = stdspec.get(name, min_v)
1019
      return std_v >= min_v and std_v <= max_v
1020
    else:
1021
      return True
1022

    
1023
  @classmethod
1024
  def CheckDiskTemplates(cls, disk_templates):
1025
    """Checks the disk templates for validity.
1026

1027
    """
1028
    if not disk_templates:
1029
      raise errors.ConfigurationError("Instance policy must contain" +
1030
                                      " at least one disk template")
1031
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1032
    if wrong:
1033
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1034
                                      utils.CommaJoin(wrong))
1035

    
1036
  @classmethod
1037
  def CheckParameter(cls, key, value):
1038
    """Checks a parameter.
1039

1040
    Currently we expect all parameters to be float values.
1041

1042
    """
1043
    try:
1044
      float(value)
1045
    except (TypeError, ValueError), err:
1046
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1047
                                      " '%s', error: %s" % (key, value, err))
1048

    
1049

    
1050
class Instance(TaggableObject):
1051
  """Config object representing an instance."""
1052
  __slots__ = [
1053
    "name",
1054
    "primary_node",
1055
    "os",
1056
    "hypervisor",
1057
    "hvparams",
1058
    "beparams",
1059
    "osparams",
1060
    "admin_state",
1061
    "nics",
1062
    "disks",
1063
    "disk_template",
1064
    "disks_active",
1065
    "network_port",
1066
    "serial_no",
1067
    ] + _TIMESTAMPS + _UUID
1068

    
1069
  def _ComputeSecondaryNodes(self):
1070
    """Compute the list of secondary nodes.
1071

1072
    This is a simple wrapper over _ComputeAllNodes.
1073

1074
    """
1075
    all_nodes = set(self._ComputeAllNodes())
1076
    all_nodes.discard(self.primary_node)
1077
    return tuple(all_nodes)
1078

    
1079
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1080
                             "List of names of secondary nodes")
1081

    
1082
  def _ComputeAllNodes(self):
1083
    """Compute the list of all nodes.
1084

1085
    Since the data is already there (in the drbd disks), keeping it as
1086
    a separate normal attribute is redundant and if not properly
1087
    synchronised can cause problems. Thus it's better to compute it
1088
    dynamically.
1089

1090
    """
1091
    def _Helper(nodes, device):
1092
      """Recursively computes nodes given a top device."""
1093
      if device.dev_type in constants.LDS_DRBD:
1094
        nodea, nodeb = device.logical_id[:2]
1095
        nodes.add(nodea)
1096
        nodes.add(nodeb)
1097
      if device.children:
1098
        for child in device.children:
1099
          _Helper(nodes, child)
1100

    
1101
    all_nodes = set()
1102
    all_nodes.add(self.primary_node)
1103
    for device in self.disks:
1104
      _Helper(all_nodes, device)
1105
    return tuple(all_nodes)
1106

    
1107
  all_nodes = property(_ComputeAllNodes, None, None,
1108
                       "List of names of all the nodes of the instance")
1109

    
1110
  def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1111
    """Provide a mapping of nodes to LVs this instance owns.
1112

1113
    This function figures out what logical volumes should belong on
1114
    which nodes, recursing through a device tree.
1115

1116
    @type lvmap: dict
1117
    @param lvmap: optional dictionary to receive the
1118
        'node' : ['lv', ...] data.
1119
    @type devs: list of L{Disk}
1120
    @param devs: disks to get the LV name for. If None, all disk of this
1121
        instance are used.
1122
    @type node_uuid: string
1123
    @param node_uuid: UUID of the node to get the LV names for. If None, the
1124
        primary node of this instance is used.
1125
    @return: None if lvmap arg is given, otherwise, a dictionary of
1126
        the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
1127
        volumeN is of the form "vg_name/lv_name", compatible with
1128
        GetVolumeList()
1129

1130
    """
1131
    if node_uuid is None:
1132
      node_uuid = self.primary_node
1133

    
1134
    if lvmap is None:
1135
      lvmap = {
1136
        node_uuid: [],
1137
        }
1138
      ret = lvmap
1139
    else:
1140
      if not node_uuid in lvmap:
1141
        lvmap[node_uuid] = []
1142
      ret = None
1143

    
1144
    if not devs:
1145
      devs = self.disks
1146

    
1147
    for dev in devs:
1148
      if dev.dev_type == constants.LD_LV:
1149
        lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1150

    
1151
      elif dev.dev_type in constants.LDS_DRBD:
1152
        if dev.children:
1153
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1154
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1155

    
1156
      elif dev.children:
1157
        self.MapLVsByNode(lvmap, dev.children, node_uuid)
1158

    
1159
    return ret
1160

    
1161
  def FindDisk(self, idx):
1162
    """Find a disk given having a specified index.
1163

1164
    This is just a wrapper that does validation of the index.
1165

1166
    @type idx: int
1167
    @param idx: the disk index
1168
    @rtype: L{Disk}
1169
    @return: the corresponding disk
1170
    @raise errors.OpPrereqError: when the given index is not valid
1171

1172
    """
1173
    try:
1174
      idx = int(idx)
1175
      return self.disks[idx]
1176
    except (TypeError, ValueError), err:
1177
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1178
                                 errors.ECODE_INVAL)
1179
    except IndexError:
1180
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1181
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1182
                                 errors.ECODE_INVAL)
1183

    
1184
  def ToDict(self):
1185
    """Instance-specific conversion to standard python types.
1186

1187
    This replaces the children lists of objects with lists of standard
1188
    python types.
1189

1190
    """
1191
    bo = super(Instance, self).ToDict()
1192

    
1193
    for attr in "nics", "disks":
1194
      alist = bo.get(attr, None)
1195
      if alist:
1196
        nlist = outils.ContainerToDicts(alist)
1197
      else:
1198
        nlist = []
1199
      bo[attr] = nlist
1200
    return bo
1201

    
1202
  @classmethod
1203
  def FromDict(cls, val):
1204
    """Custom function for instances.
1205

1206
    """
1207
    if "admin_state" not in val:
1208
      if val.get("admin_up", False):
1209
        val["admin_state"] = constants.ADMINST_UP
1210
      else:
1211
        val["admin_state"] = constants.ADMINST_DOWN
1212
    if "admin_up" in val:
1213
      del val["admin_up"]
1214
    obj = super(Instance, cls).FromDict(val)
1215
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1216
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1217
    return obj
1218

    
1219
  def UpgradeConfig(self):
1220
    """Fill defaults for missing configuration values.
1221

1222
    """
1223
    for nic in self.nics:
1224
      nic.UpgradeConfig()
1225
    for disk in self.disks:
1226
      disk.UpgradeConfig()
1227
    if self.hvparams:
1228
      for key in constants.HVC_GLOBALS:
1229
        try:
1230
          del self.hvparams[key]
1231
        except KeyError:
1232
          pass
1233
    if self.osparams is None:
1234
      self.osparams = {}
1235
    UpgradeBeParams(self.beparams)
1236
    if self.disks_active is None:
1237
      self.disks_active = self.admin_state == constants.ADMINST_UP
1238

    
1239

    
1240
class OS(ConfigObject):
1241
  """Config object representing an operating system.
1242

1243
  @type supported_parameters: list
1244
  @ivar supported_parameters: a list of tuples, name and description,
1245
      containing the supported parameters by this OS
1246

1247
  @type VARIANT_DELIM: string
1248
  @cvar VARIANT_DELIM: the variant delimiter
1249

1250
  """
1251
  __slots__ = [
1252
    "name",
1253
    "path",
1254
    "api_versions",
1255
    "create_script",
1256
    "export_script",
1257
    "import_script",
1258
    "rename_script",
1259
    "verify_script",
1260
    "supported_variants",
1261
    "supported_parameters",
1262
    ]
1263

    
1264
  VARIANT_DELIM = "+"
1265

    
1266
  @classmethod
1267
  def SplitNameVariant(cls, name):
1268
    """Splits the name into the proper name and variant.
1269

1270
    @param name: the OS (unprocessed) name
1271
    @rtype: list
1272
    @return: a list of two elements; if the original name didn't
1273
        contain a variant, it's returned as an empty string
1274

1275
    """
1276
    nv = name.split(cls.VARIANT_DELIM, 1)
1277
    if len(nv) == 1:
1278
      nv.append("")
1279
    return nv
1280

    
1281
  @classmethod
1282
  def GetName(cls, name):
1283
    """Returns the proper name of the os (without the variant).
1284

1285
    @param name: the OS (unprocessed) name
1286

1287
    """
1288
    return cls.SplitNameVariant(name)[0]
1289

    
1290
  @classmethod
1291
  def GetVariant(cls, name):
1292
    """Returns the variant the os (without the base name).
1293

1294
    @param name: the OS (unprocessed) name
1295

1296
    """
1297
    return cls.SplitNameVariant(name)[1]
1298

    
1299

    
1300
class ExtStorage(ConfigObject):
1301
  """Config object representing an External Storage Provider.
1302

1303
  """
1304
  __slots__ = [
1305
    "name",
1306
    "path",
1307
    "create_script",
1308
    "remove_script",
1309
    "grow_script",
1310
    "attach_script",
1311
    "detach_script",
1312
    "setinfo_script",
1313
    "verify_script",
1314
    "supported_parameters",
1315
    ]
1316

    
1317

    
1318
class NodeHvState(ConfigObject):
1319
  """Hypvervisor state on a node.
1320

1321
  @ivar mem_total: Total amount of memory
1322
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1323
    available)
1324
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1325
    rounding
1326
  @ivar mem_inst: Memory used by instances living on node
1327
  @ivar cpu_total: Total node CPU core count
1328
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1329

1330
  """
1331
  __slots__ = [
1332
    "mem_total",
1333
    "mem_node",
1334
    "mem_hv",
1335
    "mem_inst",
1336
    "cpu_total",
1337
    "cpu_node",
1338
    ] + _TIMESTAMPS
1339

    
1340

    
1341
class NodeDiskState(ConfigObject):
1342
  """Disk state on a node.
1343

1344
  """
1345
  __slots__ = [
1346
    "total",
1347
    "reserved",
1348
    "overhead",
1349
    ] + _TIMESTAMPS
1350

    
1351

    
1352
class Node(TaggableObject):
1353
  """Config object representing a node.
1354

1355
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1356
  @ivar hv_state_static: Hypervisor state overriden by user
1357
  @ivar disk_state: Disk state (e.g. free space)
1358
  @ivar disk_state_static: Disk state overriden by user
1359

1360
  """
1361
  __slots__ = [
1362
    "name",
1363
    "primary_ip",
1364
    "secondary_ip",
1365
    "serial_no",
1366
    "master_candidate",
1367
    "offline",
1368
    "drained",
1369
    "group",
1370
    "master_capable",
1371
    "vm_capable",
1372
    "ndparams",
1373
    "powered",
1374
    "hv_state",
1375
    "hv_state_static",
1376
    "disk_state",
1377
    "disk_state_static",
1378
    ] + _TIMESTAMPS + _UUID
1379

    
1380
  def UpgradeConfig(self):
1381
    """Fill defaults for missing configuration values.
1382

1383
    """
1384
    # pylint: disable=E0203
1385
    # because these are "defined" via slots, not manually
1386
    if self.master_capable is None:
1387
      self.master_capable = True
1388

    
1389
    if self.vm_capable is None:
1390
      self.vm_capable = True
1391

    
1392
    if self.ndparams is None:
1393
      self.ndparams = {}
1394
    # And remove any global parameter
1395
    for key in constants.NDC_GLOBALS:
1396
      if key in self.ndparams:
1397
        logging.warning("Ignoring %s node parameter for node %s",
1398
                        key, self.name)
1399
        del self.ndparams[key]
1400

    
1401
    if self.powered is None:
1402
      self.powered = True
1403

    
1404
  def ToDict(self):
1405
    """Custom function for serializing.
1406

1407
    """
1408
    data = super(Node, self).ToDict()
1409

    
1410
    hv_state = data.get("hv_state", None)
1411
    if hv_state is not None:
1412
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1413

    
1414
    disk_state = data.get("disk_state", None)
1415
    if disk_state is not None:
1416
      data["disk_state"] = \
1417
        dict((key, outils.ContainerToDicts(value))
1418
             for (key, value) in disk_state.items())
1419

    
1420
    return data
1421

    
1422
  @classmethod
1423
  def FromDict(cls, val):
1424
    """Custom function for deserializing.
1425

1426
    """
1427
    obj = super(Node, cls).FromDict(val)
1428

    
1429
    if obj.hv_state is not None:
1430
      obj.hv_state = \
1431
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1432

    
1433
    if obj.disk_state is not None:
1434
      obj.disk_state = \
1435
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1436
             for (key, value) in obj.disk_state.items())
1437

    
1438
    return obj
1439

    
1440

    
1441
class NodeGroup(TaggableObject):
1442
  """Config object representing a node group."""
1443
  __slots__ = [
1444
    "name",
1445
    "members",
1446
    "ndparams",
1447
    "diskparams",
1448
    "ipolicy",
1449
    "serial_no",
1450
    "hv_state_static",
1451
    "disk_state_static",
1452
    "alloc_policy",
1453
    "networks",
1454
    ] + _TIMESTAMPS + _UUID
1455

    
1456
  def ToDict(self):
1457
    """Custom function for nodegroup.
1458

1459
    This discards the members object, which gets recalculated and is only kept
1460
    in memory.
1461

1462
    """
1463
    mydict = super(NodeGroup, self).ToDict()
1464
    del mydict["members"]
1465
    return mydict
1466

    
1467
  @classmethod
1468
  def FromDict(cls, val):
1469
    """Custom function for nodegroup.
1470

1471
    The members slot is initialized to an empty list, upon deserialization.
1472

1473
    """
1474
    obj = super(NodeGroup, cls).FromDict(val)
1475
    obj.members = []
1476
    return obj
1477

    
1478
  def UpgradeConfig(self):
1479
    """Fill defaults for missing configuration values.
1480

1481
    """
1482
    if self.ndparams is None:
1483
      self.ndparams = {}
1484

    
1485
    if self.serial_no is None:
1486
      self.serial_no = 1
1487

    
1488
    if self.alloc_policy is None:
1489
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1490

    
1491
    # We only update mtime, and not ctime, since we would not be able
1492
    # to provide a correct value for creation time.
1493
    if self.mtime is None:
1494
      self.mtime = time.time()
1495

    
1496
    if self.diskparams is None:
1497
      self.diskparams = {}
1498
    if self.ipolicy is None:
1499
      self.ipolicy = MakeEmptyIPolicy()
1500

    
1501
    if self.networks is None:
1502
      self.networks = {}
1503

    
1504
  def FillND(self, node):
1505
    """Return filled out ndparams for L{objects.Node}
1506

1507
    @type node: L{objects.Node}
1508
    @param node: A Node object to fill
1509
    @return a copy of the node's ndparams with defaults filled
1510

1511
    """
1512
    return self.SimpleFillND(node.ndparams)
1513

    
1514
  def SimpleFillND(self, ndparams):
1515
    """Fill a given ndparams dict with defaults.
1516

1517
    @type ndparams: dict
1518
    @param ndparams: the dict to fill
1519
    @rtype: dict
1520
    @return: a copy of the passed in ndparams with missing keys filled
1521
        from the node group defaults
1522

1523
    """
1524
    return FillDict(self.ndparams, ndparams)
1525

    
1526

    
1527
class Cluster(TaggableObject):
1528
  """Config object representing the cluster."""
1529
  __slots__ = [
1530
    "serial_no",
1531
    "rsahostkeypub",
1532
    "highest_used_port",
1533
    "tcpudp_port_pool",
1534
    "mac_prefix",
1535
    "volume_group_name",
1536
    "reserved_lvs",
1537
    "drbd_usermode_helper",
1538
    "default_bridge",
1539
    "default_hypervisor",
1540
    "master_node",
1541
    "master_ip",
1542
    "master_netdev",
1543
    "master_netmask",
1544
    "use_external_mip_script",
1545
    "cluster_name",
1546
    "file_storage_dir",
1547
    "shared_file_storage_dir",
1548
    "enabled_hypervisors",
1549
    "hvparams",
1550
    "ipolicy",
1551
    "os_hvp",
1552
    "beparams",
1553
    "osparams",
1554
    "nicparams",
1555
    "ndparams",
1556
    "diskparams",
1557
    "candidate_pool_size",
1558
    "modify_etc_hosts",
1559
    "modify_ssh_setup",
1560
    "maintain_node_health",
1561
    "uid_pool",
1562
    "default_iallocator",
1563
    "hidden_os",
1564
    "blacklisted_os",
1565
    "primary_ip_family",
1566
    "prealloc_wipe_disks",
1567
    "hv_state_static",
1568
    "disk_state_static",
1569
    "enabled_disk_templates",
1570
    ] + _TIMESTAMPS + _UUID
1571

    
1572
  def UpgradeConfig(self):
1573
    """Fill defaults for missing configuration values.
1574

1575
    """
1576
    # pylint: disable=E0203
1577
    # because these are "defined" via slots, not manually
1578
    if self.hvparams is None:
1579
      self.hvparams = constants.HVC_DEFAULTS
1580
    else:
1581
      for hypervisor in self.hvparams:
1582
        self.hvparams[hypervisor] = FillDict(
1583
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1584

    
1585
    if self.os_hvp is None:
1586
      self.os_hvp = {}
1587

    
1588
    # osparams added before 2.2
1589
    if self.osparams is None:
1590
      self.osparams = {}
1591

    
1592
    self.ndparams = UpgradeNDParams(self.ndparams)
1593

    
1594
    self.beparams = UpgradeGroupedParams(self.beparams,
1595
                                         constants.BEC_DEFAULTS)
1596
    for beparams_group in self.beparams:
1597
      UpgradeBeParams(self.beparams[beparams_group])
1598

    
1599
    migrate_default_bridge = not self.nicparams
1600
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1601
                                          constants.NICC_DEFAULTS)
1602
    if migrate_default_bridge:
1603
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1604
        self.default_bridge
1605

    
1606
    if self.modify_etc_hosts is None:
1607
      self.modify_etc_hosts = True
1608

    
1609
    if self.modify_ssh_setup is None:
1610
      self.modify_ssh_setup = True
1611

    
1612
    # default_bridge is no longer used in 2.1. The slot is left there to
1613
    # support auto-upgrading. It can be removed once we decide to deprecate
1614
    # upgrading straight from 2.0.
1615
    if self.default_bridge is not None:
1616
      self.default_bridge = None
1617

    
1618
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1619
    # code can be removed once upgrading straight from 2.0 is deprecated.
1620
    if self.default_hypervisor is not None:
1621
      self.enabled_hypervisors = ([self.default_hypervisor] +
1622
                                  [hvname for hvname in self.enabled_hypervisors
1623
                                   if hvname != self.default_hypervisor])
1624
      self.default_hypervisor = None
1625

    
1626
    # maintain_node_health added after 2.1.1
1627
    if self.maintain_node_health is None:
1628
      self.maintain_node_health = False
1629

    
1630
    if self.uid_pool is None:
1631
      self.uid_pool = []
1632

    
1633
    if self.default_iallocator is None:
1634
      self.default_iallocator = ""
1635

    
1636
    # reserved_lvs added before 2.2
1637
    if self.reserved_lvs is None:
1638
      self.reserved_lvs = []
1639

    
1640
    # hidden and blacklisted operating systems added before 2.2.1
1641
    if self.hidden_os is None:
1642
      self.hidden_os = []
1643

    
1644
    if self.blacklisted_os is None:
1645
      self.blacklisted_os = []
1646

    
1647
    # primary_ip_family added before 2.3
1648
    if self.primary_ip_family is None:
1649
      self.primary_ip_family = AF_INET
1650

    
1651
    if self.master_netmask is None:
1652
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1653
      self.master_netmask = ipcls.iplen
1654

    
1655
    if self.prealloc_wipe_disks is None:
1656
      self.prealloc_wipe_disks = False
1657

    
1658
    # shared_file_storage_dir added before 2.5
1659
    if self.shared_file_storage_dir is None:
1660
      self.shared_file_storage_dir = ""
1661

    
1662
    if self.use_external_mip_script is None:
1663
      self.use_external_mip_script = False
1664

    
1665
    if self.diskparams:
1666
      self.diskparams = UpgradeDiskParams(self.diskparams)
1667
    else:
1668
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1669

    
1670
    # instance policy added before 2.6
1671
    if self.ipolicy is None:
1672
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1673
    else:
1674
      # we can either make sure to upgrade the ipolicy always, or only
1675
      # do it in some corner cases (e.g. missing keys); note that this
1676
      # will break any removal of keys from the ipolicy dict
1677
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1678
      if wrongkeys:
1679
        # These keys would be silently removed by FillIPolicy()
1680
        msg = ("Cluster instance policy contains spurious keys: %s" %
1681
               utils.CommaJoin(wrongkeys))
1682
        raise errors.ConfigurationError(msg)
1683
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1684

    
1685
  @property
1686
  def primary_hypervisor(self):
1687
    """The first hypervisor is the primary.
1688

1689
    Useful, for example, for L{Node}'s hv/disk state.
1690

1691
    """
1692
    return self.enabled_hypervisors[0]
1693

    
1694
  def ToDict(self):
1695
    """Custom function for cluster.
1696

1697
    """
1698
    mydict = super(Cluster, self).ToDict()
1699

    
1700
    if self.tcpudp_port_pool is None:
1701
      tcpudp_port_pool = []
1702
    else:
1703
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1704

    
1705
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1706

    
1707
    return mydict
1708

    
1709
  @classmethod
1710
  def FromDict(cls, val):
1711
    """Custom function for cluster.
1712

1713
    """
1714
    obj = super(Cluster, cls).FromDict(val)
1715

    
1716
    if obj.tcpudp_port_pool is None:
1717
      obj.tcpudp_port_pool = set()
1718
    elif not isinstance(obj.tcpudp_port_pool, set):
1719
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1720

    
1721
    return obj
1722

    
1723
  def SimpleFillDP(self, diskparams):
1724
    """Fill a given diskparams dict with cluster defaults.
1725

1726
    @param diskparams: The diskparams
1727
    @return: The defaults dict
1728

1729
    """
1730
    return FillDiskParams(self.diskparams, diskparams)
1731

    
1732
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1733
    """Get the default hypervisor parameters for the cluster.
1734

1735
    @param hypervisor: the hypervisor name
1736
    @param os_name: if specified, we'll also update the defaults for this OS
1737
    @param skip_keys: if passed, list of keys not to use
1738
    @return: the defaults dict
1739

1740
    """
1741
    if skip_keys is None:
1742
      skip_keys = []
1743

    
1744
    fill_stack = [self.hvparams.get(hypervisor, {})]
1745
    if os_name is not None:
1746
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1747
      fill_stack.append(os_hvp)
1748

    
1749
    ret_dict = {}
1750
    for o_dict in fill_stack:
1751
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1752

    
1753
    return ret_dict
1754

    
1755
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1756
    """Fill a given hvparams dict with cluster defaults.
1757

1758
    @type hv_name: string
1759
    @param hv_name: the hypervisor to use
1760
    @type os_name: string
1761
    @param os_name: the OS to use for overriding the hypervisor defaults
1762
    @type skip_globals: boolean
1763
    @param skip_globals: if True, the global hypervisor parameters will
1764
        not be filled
1765
    @rtype: dict
1766
    @return: a copy of the given hvparams with missing keys filled from
1767
        the cluster defaults
1768

1769
    """
1770
    if skip_globals:
1771
      skip_keys = constants.HVC_GLOBALS
1772
    else:
1773
      skip_keys = []
1774

    
1775
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1776
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1777

    
1778
  def FillHV(self, instance, skip_globals=False):
1779
    """Fill an instance's hvparams dict with cluster defaults.
1780

1781
    @type instance: L{objects.Instance}
1782
    @param instance: the instance parameter to fill
1783
    @type skip_globals: boolean
1784
    @param skip_globals: if True, the global hypervisor parameters will
1785
        not be filled
1786
    @rtype: dict
1787
    @return: a copy of the instance's hvparams with missing keys filled from
1788
        the cluster defaults
1789

1790
    """
1791
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1792
                             instance.hvparams, skip_globals)
1793

    
1794
  def SimpleFillBE(self, beparams):
1795
    """Fill a given beparams dict with cluster defaults.
1796

1797
    @type beparams: dict
1798
    @param beparams: the dict to fill
1799
    @rtype: dict
1800
    @return: a copy of the passed in beparams with missing keys filled
1801
        from the cluster defaults
1802

1803
    """
1804
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1805

    
1806
  def FillBE(self, instance):
1807
    """Fill an instance's beparams dict with cluster defaults.
1808

1809
    @type instance: L{objects.Instance}
1810
    @param instance: the instance parameter to fill
1811
    @rtype: dict
1812
    @return: a copy of the instance's beparams with missing keys filled from
1813
        the cluster defaults
1814

1815
    """
1816
    return self.SimpleFillBE(instance.beparams)
1817

    
1818
  def SimpleFillNIC(self, nicparams):
1819
    """Fill a given nicparams dict with cluster defaults.
1820

1821
    @type nicparams: dict
1822
    @param nicparams: the dict to fill
1823
    @rtype: dict
1824
    @return: a copy of the passed in nicparams with missing keys filled
1825
        from the cluster defaults
1826

1827
    """
1828
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1829

    
1830
  def SimpleFillOS(self, os_name, os_params):
1831
    """Fill an instance's osparams dict with cluster defaults.
1832

1833
    @type os_name: string
1834
    @param os_name: the OS name to use
1835
    @type os_params: dict
1836
    @param os_params: the dict to fill with default values
1837
    @rtype: dict
1838
    @return: a copy of the instance's osparams with missing keys filled from
1839
        the cluster defaults
1840

1841
    """
1842
    name_only = os_name.split("+", 1)[0]
1843
    # base OS
1844
    result = self.osparams.get(name_only, {})
1845
    # OS with variant
1846
    result = FillDict(result, self.osparams.get(os_name, {}))
1847
    # specified params
1848
    return FillDict(result, os_params)
1849

    
1850
  @staticmethod
1851
  def SimpleFillHvState(hv_state):
1852
    """Fill an hv_state sub dict with cluster defaults.
1853

1854
    """
1855
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1856

    
1857
  @staticmethod
1858
  def SimpleFillDiskState(disk_state):
1859
    """Fill an disk_state sub dict with cluster defaults.
1860

1861
    """
1862
    return FillDict(constants.DS_DEFAULTS, disk_state)
1863

    
1864
  def FillND(self, node, nodegroup):
1865
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1866

1867
    @type node: L{objects.Node}
1868
    @param node: A Node object to fill
1869
    @type nodegroup: L{objects.NodeGroup}
1870
    @param nodegroup: A Node object to fill
1871
    @return a copy of the node's ndparams with defaults filled
1872

1873
    """
1874
    return self.SimpleFillND(nodegroup.FillND(node))
1875

    
1876
  def SimpleFillND(self, ndparams):
1877
    """Fill a given ndparams dict with defaults.
1878

1879
    @type ndparams: dict
1880
    @param ndparams: the dict to fill
1881
    @rtype: dict
1882
    @return: a copy of the passed in ndparams with missing keys filled
1883
        from the cluster defaults
1884

1885
    """
1886
    return FillDict(self.ndparams, ndparams)
1887

    
1888
  def SimpleFillIPolicy(self, ipolicy):
1889
    """ Fill instance policy dict with defaults.
1890

1891
    @type ipolicy: dict
1892
    @param ipolicy: the dict to fill
1893
    @rtype: dict
1894
    @return: a copy of passed ipolicy with missing keys filled from
1895
      the cluster defaults
1896

1897
    """
1898
    return FillIPolicy(self.ipolicy, ipolicy)
1899

    
1900
  def IsDiskTemplateEnabled(self, disk_template):
1901
    """Checks if a particular disk template is enabled.
1902

1903
    """
1904
    return utils.storage.IsDiskTemplateEnabled(
1905
        disk_template, self.enabled_disk_templates)
1906

    
1907
  def IsFileStorageEnabled(self):
1908
    """Checks if file storage is enabled.
1909

1910
    """
1911
    return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
1912

    
1913
  def IsSharedFileStorageEnabled(self):
1914
    """Checks if shared file storage is enabled.
1915

1916
    """
1917
    return utils.storage.IsSharedFileStorageEnabled(
1918
        self.enabled_disk_templates)
1919

    
1920

    
1921
class BlockDevStatus(ConfigObject):
1922
  """Config object representing the status of a block device."""
1923
  __slots__ = [
1924
    "dev_path",
1925
    "major",
1926
    "minor",
1927
    "sync_percent",
1928
    "estimated_time",
1929
    "is_degraded",
1930
    "ldisk_status",
1931
    ]
1932

    
1933

    
1934
class ImportExportStatus(ConfigObject):
1935
  """Config object representing the status of an import or export."""
1936
  __slots__ = [
1937
    "recent_output",
1938
    "listen_port",
1939
    "connected",
1940
    "progress_mbytes",
1941
    "progress_throughput",
1942
    "progress_eta",
1943
    "progress_percent",
1944
    "exit_status",
1945
    "error_message",
1946
    ] + _TIMESTAMPS
1947

    
1948

    
1949
class ImportExportOptions(ConfigObject):
1950
  """Options for import/export daemon
1951

1952
  @ivar key_name: X509 key name (None for cluster certificate)
1953
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1954
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1955
  @ivar magic: Used to ensure the connection goes to the right disk
1956
  @ivar ipv6: Whether to use IPv6
1957
  @ivar connect_timeout: Number of seconds for establishing connection
1958

1959
  """
1960
  __slots__ = [
1961
    "key_name",
1962
    "ca_pem",
1963
    "compress",
1964
    "magic",
1965
    "ipv6",
1966
    "connect_timeout",
1967
    ]
1968

    
1969

    
1970
class ConfdRequest(ConfigObject):
1971
  """Object holding a confd request.
1972

1973
  @ivar protocol: confd protocol version
1974
  @ivar type: confd query type
1975
  @ivar query: query request
1976
  @ivar rsalt: requested reply salt
1977

1978
  """
1979
  __slots__ = [
1980
    "protocol",
1981
    "type",
1982
    "query",
1983
    "rsalt",
1984
    ]
1985

    
1986

    
1987
class ConfdReply(ConfigObject):
1988
  """Object holding a confd reply.
1989

1990
  @ivar protocol: confd protocol version
1991
  @ivar status: reply status code (ok, error)
1992
  @ivar answer: confd query reply
1993
  @ivar serial: configuration serial number
1994

1995
  """
1996
  __slots__ = [
1997
    "protocol",
1998
    "status",
1999
    "answer",
2000
    "serial",
2001
    ]
2002

    
2003

    
2004
class QueryFieldDefinition(ConfigObject):
2005
  """Object holding a query field definition.
2006

2007
  @ivar name: Field name
2008
  @ivar title: Human-readable title
2009
  @ivar kind: Field type
2010
  @ivar doc: Human-readable description
2011

2012
  """
2013
  __slots__ = [
2014
    "name",
2015
    "title",
2016
    "kind",
2017
    "doc",
2018
    ]
2019

    
2020

    
2021
class _QueryResponseBase(ConfigObject):
2022
  __slots__ = [
2023
    "fields",
2024
    ]
2025

    
2026
  def ToDict(self):
2027
    """Custom function for serializing.
2028

2029
    """
2030
    mydict = super(_QueryResponseBase, self).ToDict()
2031
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2032
    return mydict
2033

    
2034
  @classmethod
2035
  def FromDict(cls, val):
2036
    """Custom function for de-serializing.
2037

2038
    """
2039
    obj = super(_QueryResponseBase, cls).FromDict(val)
2040
    obj.fields = \
2041
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2042
    return obj
2043

    
2044

    
2045
class QueryResponse(_QueryResponseBase):
2046
  """Object holding the response to a query.
2047

2048
  @ivar fields: List of L{QueryFieldDefinition} objects
2049
  @ivar data: Requested data
2050

2051
  """
2052
  __slots__ = [
2053
    "data",
2054
    ]
2055

    
2056

    
2057
class QueryFieldsRequest(ConfigObject):
2058
  """Object holding a request for querying available fields.
2059

2060
  """
2061
  __slots__ = [
2062
    "what",
2063
    "fields",
2064
    ]
2065

    
2066

    
2067
class QueryFieldsResponse(_QueryResponseBase):
2068
  """Object holding the response to a query for fields.
2069

2070
  @ivar fields: List of L{QueryFieldDefinition} objects
2071

2072
  """
2073
  __slots__ = []
2074

    
2075

    
2076
class MigrationStatus(ConfigObject):
2077
  """Object holding the status of a migration.
2078

2079
  """
2080
  __slots__ = [
2081
    "status",
2082
    "transferred_ram",
2083
    "total_ram",
2084
    ]
2085

    
2086

    
2087
class InstanceConsole(ConfigObject):
2088
  """Object describing how to access the console of an instance.
2089

2090
  """
2091
  __slots__ = [
2092
    "instance",
2093
    "kind",
2094
    "message",
2095
    "host",
2096
    "port",
2097
    "user",
2098
    "command",
2099
    "display",
2100
    ]
2101

    
2102
  def Validate(self):
2103
    """Validates contents of this object.
2104

2105
    """
2106
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2107
    assert self.instance, "Missing instance name"
2108
    assert self.message or self.kind in [constants.CONS_SSH,
2109
                                         constants.CONS_SPICE,
2110
                                         constants.CONS_VNC]
2111
    assert self.host or self.kind == constants.CONS_MESSAGE
2112
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2113
                                      constants.CONS_SSH]
2114
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2115
                                      constants.CONS_SPICE,
2116
                                      constants.CONS_VNC]
2117
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2118
                                         constants.CONS_SPICE,
2119
                                         constants.CONS_VNC]
2120
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2121
                                         constants.CONS_SPICE,
2122
                                         constants.CONS_SSH]
2123
    return True
2124

    
2125

    
2126
class Network(TaggableObject):
2127
  """Object representing a network definition for ganeti.
2128

2129
  """
2130
  __slots__ = [
2131
    "name",
2132
    "serial_no",
2133
    "mac_prefix",
2134
    "network",
2135
    "network6",
2136
    "gateway",
2137
    "gateway6",
2138
    "reservations",
2139
    "ext_reservations",
2140
    ] + _TIMESTAMPS + _UUID
2141

    
2142
  def HooksDict(self, prefix=""):
2143
    """Export a dictionary used by hooks with a network's information.
2144

2145
    @type prefix: String
2146
    @param prefix: Prefix to prepend to the dict entries
2147

2148
    """
2149
    result = {
2150
      "%sNETWORK_NAME" % prefix: self.name,
2151
      "%sNETWORK_UUID" % prefix: self.uuid,
2152
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2153
    }
2154
    if self.network:
2155
      result["%sNETWORK_SUBNET" % prefix] = self.network
2156
    if self.gateway:
2157
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2158
    if self.network6:
2159
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2160
    if self.gateway6:
2161
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2162
    if self.mac_prefix:
2163
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2164

    
2165
    return result
2166

    
2167
  @classmethod
2168
  def FromDict(cls, val):
2169
    """Custom function for networks.
2170

2171
    Remove deprecated network_type and family.
2172

2173
    """
2174
    if "network_type" in val:
2175
      del val["network_type"]
2176
    if "family" in val:
2177
      del val["family"]
2178
    obj = super(Network, cls).FromDict(val)
2179
    return obj
2180

    
2181

    
2182
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2183
  """Simple wrapper over ConfigParse that allows serialization.
2184

2185
  This class is basically ConfigParser.SafeConfigParser with two
2186
  additional methods that allow it to serialize/unserialize to/from a
2187
  buffer.
2188

2189
  """
2190
  def Dumps(self):
2191
    """Dump this instance and return the string representation."""
2192
    buf = StringIO()
2193
    self.write(buf)
2194
    return buf.getvalue()
2195

    
2196
  @classmethod
2197
  def Loads(cls, data):
2198
    """Load data from a string."""
2199
    buf = StringIO(data)
2200
    cfp = cls()
2201
    cfp.readfp(buf)
2202
    return cfp
2203

    
2204

    
2205
class LvmPvInfo(ConfigObject):
2206
  """Information about an LVM physical volume (PV).
2207

2208
  @type name: string
2209
  @ivar name: name of the PV
2210
  @type vg_name: string
2211
  @ivar vg_name: name of the volume group containing the PV
2212
  @type size: float
2213
  @ivar size: size of the PV in MiB
2214
  @type free: float
2215
  @ivar free: free space in the PV, in MiB
2216
  @type attributes: string
2217
  @ivar attributes: PV attributes
2218
  @type lv_list: list of strings
2219
  @ivar lv_list: names of the LVs hosted on the PV
2220
  """
2221
  __slots__ = [
2222
    "name",
2223
    "vg_name",
2224
    "size",
2225
    "free",
2226
    "attributes",
2227
    "lv_list"
2228
    ]
2229

    
2230
  def IsEmpty(self):
2231
    """Is this PV empty?
2232

2233
    """
2234
    return self.size <= (self.free + 1)
2235

    
2236
  def IsAllocatable(self):
2237
    """Is this PV allocatable?
2238

2239
    """
2240
    return ("a" in self.attributes)