Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ ebe93784

History | View | Annotate | Download (64.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def FillIPolicy(default_ipolicy, custom_ipolicy):
86
  """Fills an instance policy with defaults.
87

88
  """
89
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90
  ret_dict = copy.deepcopy(custom_ipolicy)
91
  for key in default_ipolicy:
92
    if key not in ret_dict:
93
      ret_dict[key] = copy.deepcopy(default_ipolicy[key])
94
    elif key == constants.ISPECS_STD:
95
      ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
96
  return ret_dict
97

    
98

    
99
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
100
  """Fills the disk parameter defaults.
101

102
  @see: L{FillDict} for parameters and return value
103

104
  """
105
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
106

    
107
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
108
                             skip_keys=skip_keys))
109
              for dt in constants.DISK_TEMPLATES)
110

    
111

    
112
def UpgradeGroupedParams(target, defaults):
113
  """Update all groups for the target parameter.
114

115
  @type target: dict of dicts
116
  @param target: {group: {parameter: value}}
117
  @type defaults: dict
118
  @param defaults: default parameter values
119

120
  """
121
  if target is None:
122
    target = {constants.PP_DEFAULT: defaults}
123
  else:
124
    for group in target:
125
      target[group] = FillDict(defaults, target[group])
126
  return target
127

    
128

    
129
def UpgradeBeParams(target):
130
  """Update the be parameters dict to the new format.
131

132
  @type target: dict
133
  @param target: "be" parameters dict
134

135
  """
136
  if constants.BE_MEMORY in target:
137
    memory = target[constants.BE_MEMORY]
138
    target[constants.BE_MAXMEM] = memory
139
    target[constants.BE_MINMEM] = memory
140
    del target[constants.BE_MEMORY]
141

    
142

    
143
def UpgradeDiskParams(diskparams):
144
  """Upgrade the disk parameters.
145

146
  @type diskparams: dict
147
  @param diskparams: disk parameters to upgrade
148
  @rtype: dict
149
  @return: the upgraded disk parameters dict
150

151
  """
152
  if not diskparams:
153
    result = {}
154
  else:
155
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
156

    
157
  return result
158

    
159

    
160
def UpgradeNDParams(ndparams):
161
  """Upgrade ndparams structure.
162

163
  @type ndparams: dict
164
  @param ndparams: disk parameters to upgrade
165
  @rtype: dict
166
  @return: the upgraded node parameters dict
167

168
  """
169
  if ndparams is None:
170
    ndparams = {}
171

    
172
  if (constants.ND_OOB_PROGRAM in ndparams and
173
      ndparams[constants.ND_OOB_PROGRAM] is None):
174
    # will be reset by the line below
175
    del ndparams[constants.ND_OOB_PROGRAM]
176
  return FillDict(constants.NDC_DEFAULTS, ndparams)
177

    
178

    
179
def MakeEmptyIPolicy():
180
  """Create empty IPolicy dictionary.
181

182
  """
183
  return {}
184

    
185

    
186
class ConfigObject(outils.ValidatedSlots):
187
  """A generic config object.
188

189
  It has the following properties:
190

191
    - provides somewhat safe recursive unpickling and pickling for its classes
192
    - unset attributes which are defined in slots are always returned
193
      as None instead of raising an error
194

195
  Classes derived from this must always declare __slots__ (we use many
196
  config objects and the memory reduction is useful)
197

198
  """
199
  __slots__ = []
200

    
201
  def __getattr__(self, name):
202
    if name not in self.GetAllSlots():
203
      raise AttributeError("Invalid object attribute %s.%s" %
204
                           (type(self).__name__, name))
205
    return None
206

    
207
  def __setstate__(self, state):
208
    slots = self.GetAllSlots()
209
    for name in state:
210
      if name in slots:
211
        setattr(self, name, state[name])
212

    
213
  def Validate(self):
214
    """Validates the slots.
215

216
    """
217

    
218
  def ToDict(self):
219
    """Convert to a dict holding only standard python types.
220

221
    The generic routine just dumps all of this object's attributes in
222
    a dict. It does not work if the class has children who are
223
    ConfigObjects themselves (e.g. the nics list in an Instance), in
224
    which case the object should subclass the function in order to
225
    make sure all objects returned are only standard python types.
226

227
    """
228
    result = {}
229
    for name in self.GetAllSlots():
230
      value = getattr(self, name, None)
231
      if value is not None:
232
        result[name] = value
233
    return result
234

    
235
  __getstate__ = ToDict
236

    
237
  @classmethod
238
  def FromDict(cls, val):
239
    """Create an object from a dictionary.
240

241
    This generic routine takes a dict, instantiates a new instance of
242
    the given class, and sets attributes based on the dict content.
243

244
    As for `ToDict`, this does not work if the class has children
245
    who are ConfigObjects themselves (e.g. the nics list in an
246
    Instance), in which case the object should subclass the function
247
    and alter the objects.
248

249
    """
250
    if not isinstance(val, dict):
251
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
252
                                      " expected dict, got %s" % type(val))
253
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
254
    obj = cls(**val_str) # pylint: disable=W0142
255
    return obj
256

    
257
  def Copy(self):
258
    """Makes a deep copy of the current object and its children.
259

260
    """
261
    dict_form = self.ToDict()
262
    clone_obj = self.__class__.FromDict(dict_form)
263
    return clone_obj
264

    
265
  def __repr__(self):
266
    """Implement __repr__ for ConfigObjects."""
267
    return repr(self.ToDict())
268

    
269
  def UpgradeConfig(self):
270
    """Fill defaults for missing configuration values.
271

272
    This method will be called at configuration load time, and its
273
    implementation will be object dependent.
274

275
    """
276
    pass
277

    
278

    
279
class TaggableObject(ConfigObject):
280
  """An generic class supporting tags.
281

282
  """
283
  __slots__ = ["tags"]
284
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
285

    
286
  @classmethod
287
  def ValidateTag(cls, tag):
288
    """Check if a tag is valid.
289

290
    If the tag is invalid, an errors.TagError will be raised. The
291
    function has no return value.
292

293
    """
294
    if not isinstance(tag, basestring):
295
      raise errors.TagError("Invalid tag type (not a string)")
296
    if len(tag) > constants.MAX_TAG_LEN:
297
      raise errors.TagError("Tag too long (>%d characters)" %
298
                            constants.MAX_TAG_LEN)
299
    if not tag:
300
      raise errors.TagError("Tags cannot be empty")
301
    if not cls.VALID_TAG_RE.match(tag):
302
      raise errors.TagError("Tag contains invalid characters")
303

    
304
  def GetTags(self):
305
    """Return the tags list.
306

307
    """
308
    tags = getattr(self, "tags", None)
309
    if tags is None:
310
      tags = self.tags = set()
311
    return tags
312

    
313
  def AddTag(self, tag):
314
    """Add a new tag.
315

316
    """
317
    self.ValidateTag(tag)
318
    tags = self.GetTags()
319
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
320
      raise errors.TagError("Too many tags")
321
    self.GetTags().add(tag)
322

    
323
  def RemoveTag(self, tag):
324
    """Remove a tag.
325

326
    """
327
    self.ValidateTag(tag)
328
    tags = self.GetTags()
329
    try:
330
      tags.remove(tag)
331
    except KeyError:
332
      raise errors.TagError("Tag not found")
333

    
334
  def ToDict(self):
335
    """Taggable-object-specific conversion to standard python types.
336

337
    This replaces the tags set with a list.
338

339
    """
340
    bo = super(TaggableObject, self).ToDict()
341

    
342
    tags = bo.get("tags", None)
343
    if isinstance(tags, set):
344
      bo["tags"] = list(tags)
345
    return bo
346

    
347
  @classmethod
348
  def FromDict(cls, val):
349
    """Custom function for instances.
350

351
    """
352
    obj = super(TaggableObject, cls).FromDict(val)
353
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
354
      obj.tags = set(obj.tags)
355
    return obj
356

    
357

    
358
class MasterNetworkParameters(ConfigObject):
359
  """Network configuration parameters for the master
360

361
  @ivar uuid: master nodes UUID
362
  @ivar ip: master IP
363
  @ivar netmask: master netmask
364
  @ivar netdev: master network device
365
  @ivar ip_family: master IP family
366

367
  """
368
  __slots__ = [
369
    "uuid",
370
    "ip",
371
    "netmask",
372
    "netdev",
373
    "ip_family",
374
    ]
375

    
376

    
377
class ConfigData(ConfigObject):
378
  """Top-level config object."""
379
  __slots__ = [
380
    "version",
381
    "cluster",
382
    "nodes",
383
    "nodegroups",
384
    "instances",
385
    "networks",
386
    "serial_no",
387
    ] + _TIMESTAMPS
388

    
389
  def ToDict(self):
390
    """Custom function for top-level config data.
391

392
    This just replaces the list of instances, nodes and the cluster
393
    with standard python types.
394

395
    """
396
    mydict = super(ConfigData, self).ToDict()
397
    mydict["cluster"] = mydict["cluster"].ToDict()
398
    for key in "nodes", "instances", "nodegroups", "networks":
399
      mydict[key] = outils.ContainerToDicts(mydict[key])
400

    
401
    return mydict
402

    
403
  @classmethod
404
  def FromDict(cls, val):
405
    """Custom function for top-level config data
406

407
    """
408
    obj = super(ConfigData, cls).FromDict(val)
409
    obj.cluster = Cluster.FromDict(obj.cluster)
410
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
411
    obj.instances = \
412
      outils.ContainerFromDicts(obj.instances, dict, Instance)
413
    obj.nodegroups = \
414
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
415
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
416
    return obj
417

    
418
  def HasAnyDiskOfType(self, dev_type):
419
    """Check if in there is at disk of the given type in the configuration.
420

421
    @type dev_type: L{constants.LDS_BLOCK}
422
    @param dev_type: the type to look for
423
    @rtype: boolean
424
    @return: boolean indicating if a disk of the given type was found or not
425

426
    """
427
    for instance in self.instances.values():
428
      for disk in instance.disks:
429
        if disk.IsBasedOnDiskType(dev_type):
430
          return True
431
    return False
432

    
433
  def UpgradeConfig(self):
434
    """Fill defaults for missing configuration values.
435

436
    """
437
    self.cluster.UpgradeConfig()
438
    for node in self.nodes.values():
439
      node.UpgradeConfig()
440
    for instance in self.instances.values():
441
      instance.UpgradeConfig()
442
    if self.nodegroups is None:
443
      self.nodegroups = {}
444
    for nodegroup in self.nodegroups.values():
445
      nodegroup.UpgradeConfig()
446
    if self.cluster.drbd_usermode_helper is None:
447
      # To decide if we set an helper let's check if at least one instance has
448
      # a DRBD disk. This does not cover all the possible scenarios but it
449
      # gives a good approximation.
450
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
451
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
452
    if self.networks is None:
453
      self.networks = {}
454
    for network in self.networks.values():
455
      network.UpgradeConfig()
456
    self._UpgradeEnabledDiskTemplates()
457

    
458
  def _UpgradeEnabledDiskTemplates(self):
459
    """Upgrade the cluster's enabled disk templates by inspecting the currently
460
       enabled and/or used disk templates.
461

462
    """
463
    # enabled_disk_templates in the cluster config were introduced in 2.8.
464
    # Remove this code once upgrading from earlier versions is deprecated.
465
    if not self.cluster.enabled_disk_templates:
466
      template_set = \
467
        set([inst.disk_template for inst in self.instances.values()])
468
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
469
      if self.cluster.volume_group_name:
470
        template_set.add(constants.DT_DRBD8)
471
        template_set.add(constants.DT_PLAIN)
472
      # FIXME: Adapt this when dis/enabling at configure time is removed.
473
      # Enable 'file' and 'sharedfile', if they are enabled, even though they
474
      # might currently not be used.
475
      if constants.ENABLE_FILE_STORAGE:
476
        template_set.add(constants.DT_FILE)
477
      if constants.ENABLE_SHARED_FILE_STORAGE:
478
        template_set.add(constants.DT_SHARED_FILE)
479
      # Set enabled_disk_templates to the inferred disk templates. Order them
480
      # according to a preference list that is based on Ganeti's history of
481
      # supported disk templates.
482
      self.cluster.enabled_disk_templates = []
483
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
484
        if preferred_template in template_set:
485
          self.cluster.enabled_disk_templates.append(preferred_template)
486
          template_set.remove(preferred_template)
487
      self.cluster.enabled_disk_templates.extend(list(template_set))
488

    
489

    
490
class NIC(ConfigObject):
491
  """Config object representing a network card."""
492
  __slots__ = ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID
493

    
494
  @classmethod
495
  def CheckParameterSyntax(cls, nicparams):
496
    """Check the given parameters for validity.
497

498
    @type nicparams:  dict
499
    @param nicparams: dictionary with parameter names/value
500
    @raise errors.ConfigurationError: when a parameter is not valid
501

502
    """
503
    mode = nicparams[constants.NIC_MODE]
504
    if (mode not in constants.NIC_VALID_MODES and
505
        mode != constants.VALUE_AUTO):
506
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
507

    
508
    if (mode == constants.NIC_MODE_BRIDGED and
509
        not nicparams[constants.NIC_LINK]):
510
      raise errors.ConfigurationError("Missing bridged NIC link")
511

    
512

    
513
class Disk(ConfigObject):
514
  """Config object representing a block device."""
515
  __slots__ = (["name", "dev_type", "logical_id", "physical_id",
516
                "children", "iv_name", "size", "mode", "params", "spindles"] +
517
               _UUID)
518

    
519
  def CreateOnSecondary(self):
520
    """Test if this device needs to be created on a secondary node."""
521
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
522

    
523
  def AssembleOnSecondary(self):
524
    """Test if this device needs to be assembled on a secondary node."""
525
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
526

    
527
  def OpenOnSecondary(self):
528
    """Test if this device needs to be opened on a secondary node."""
529
    return self.dev_type in (constants.LD_LV,)
530

    
531
  def StaticDevPath(self):
532
    """Return the device path if this device type has a static one.
533

534
    Some devices (LVM for example) live always at the same /dev/ path,
535
    irrespective of their status. For such devices, we return this
536
    path, for others we return None.
537

538
    @warning: The path returned is not a normalized pathname; callers
539
        should check that it is a valid path.
540

541
    """
542
    if self.dev_type == constants.LD_LV:
543
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
544
    elif self.dev_type == constants.LD_BLOCKDEV:
545
      return self.logical_id[1]
546
    elif self.dev_type == constants.LD_RBD:
547
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
548
    return None
549

    
550
  def ChildrenNeeded(self):
551
    """Compute the needed number of children for activation.
552

553
    This method will return either -1 (all children) or a positive
554
    number denoting the minimum number of children needed for
555
    activation (only mirrored devices will usually return >=0).
556

557
    Currently, only DRBD8 supports diskless activation (therefore we
558
    return 0), for all other we keep the previous semantics and return
559
    -1.
560

561
    """
562
    if self.dev_type == constants.LD_DRBD8:
563
      return 0
564
    return -1
565

    
566
  def IsBasedOnDiskType(self, dev_type):
567
    """Check if the disk or its children are based on the given type.
568

569
    @type dev_type: L{constants.LDS_BLOCK}
570
    @param dev_type: the type to look for
571
    @rtype: boolean
572
    @return: boolean indicating if a device of the given type was found or not
573

574
    """
575
    if self.children:
576
      for child in self.children:
577
        if child.IsBasedOnDiskType(dev_type):
578
          return True
579
    return self.dev_type == dev_type
580

    
581
  def GetNodes(self, node_uuid):
582
    """This function returns the nodes this device lives on.
583

584
    Given the node on which the parent of the device lives on (or, in
585
    case of a top-level device, the primary node of the devices'
586
    instance), this function will return a list of nodes on which this
587
    devices needs to (or can) be assembled.
588

589
    """
590
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
591
                         constants.LD_BLOCKDEV, constants.LD_RBD,
592
                         constants.LD_EXT]:
593
      result = [node_uuid]
594
    elif self.dev_type in constants.LDS_DRBD:
595
      result = [self.logical_id[0], self.logical_id[1]]
596
      if node_uuid not in result:
597
        raise errors.ConfigurationError("DRBD device passed unknown node")
598
    else:
599
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
600
    return result
601

    
602
  def ComputeNodeTree(self, parent_node_uuid):
603
    """Compute the node/disk tree for this disk and its children.
604

605
    This method, given the node on which the parent disk lives, will
606
    return the list of all (node UUID, disk) pairs which describe the disk
607
    tree in the most compact way. For example, a drbd/lvm stack
608
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
609
    which represents all the top-level devices on the nodes.
610

611
    """
612
    my_nodes = self.GetNodes(parent_node_uuid)
613
    result = [(node, self) for node in my_nodes]
614
    if not self.children:
615
      # leaf device
616
      return result
617
    for node in my_nodes:
618
      for child in self.children:
619
        child_result = child.ComputeNodeTree(node)
620
        if len(child_result) == 1:
621
          # child (and all its descendants) is simple, doesn't split
622
          # over multiple hosts, so we don't need to describe it, our
623
          # own entry for this node describes it completely
624
          continue
625
        else:
626
          # check if child nodes differ from my nodes; note that
627
          # subdisk can differ from the child itself, and be instead
628
          # one of its descendants
629
          for subnode, subdisk in child_result:
630
            if subnode not in my_nodes:
631
              result.append((subnode, subdisk))
632
            # otherwise child is under our own node, so we ignore this
633
            # entry (but probably the other results in the list will
634
            # be different)
635
    return result
636

    
637
  def ComputeGrowth(self, amount):
638
    """Compute the per-VG growth requirements.
639

640
    This only works for VG-based disks.
641

642
    @type amount: integer
643
    @param amount: the desired increase in (user-visible) disk space
644
    @rtype: dict
645
    @return: a dictionary of volume-groups and the required size
646

647
    """
648
    if self.dev_type == constants.LD_LV:
649
      return {self.logical_id[0]: amount}
650
    elif self.dev_type == constants.LD_DRBD8:
651
      if self.children:
652
        return self.children[0].ComputeGrowth(amount)
653
      else:
654
        return {}
655
    else:
656
      # Other disk types do not require VG space
657
      return {}
658

    
659
  def RecordGrow(self, amount):
660
    """Update the size of this disk after growth.
661

662
    This method recurses over the disks's children and updates their
663
    size correspondigly. The method needs to be kept in sync with the
664
    actual algorithms from bdev.
665

666
    """
667
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
668
                         constants.LD_RBD, constants.LD_EXT):
669
      self.size += amount
670
    elif self.dev_type == constants.LD_DRBD8:
671
      if self.children:
672
        self.children[0].RecordGrow(amount)
673
      self.size += amount
674
    else:
675
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
676
                                   " disk type %s" % self.dev_type)
677

    
678
  def Update(self, size=None, mode=None, spindles=None):
679
    """Apply changes to size, spindles and mode.
680

681
    """
682
    if self.dev_type == constants.LD_DRBD8:
683
      if self.children:
684
        self.children[0].Update(size=size, mode=mode)
685
    else:
686
      assert not self.children
687

    
688
    if size is not None:
689
      self.size = size
690
    if mode is not None:
691
      self.mode = mode
692
    if spindles is not None:
693
      self.spindles = spindles
694

    
695
  def UnsetSize(self):
696
    """Sets recursively the size to zero for the disk and its children.
697

698
    """
699
    if self.children:
700
      for child in self.children:
701
        child.UnsetSize()
702
    self.size = 0
703

    
704
  def SetPhysicalID(self, target_node_uuid, nodes_ip):
705
    """Convert the logical ID to the physical ID.
706

707
    This is used only for drbd, which needs ip/port configuration.
708

709
    The routine descends down and updates its children also, because
710
    this helps when the only the top device is passed to the remote
711
    node.
712

713
    Arguments:
714
      - target_node_uuid: the node UUID we wish to configure for
715
      - nodes_ip: a mapping of node name to ip
716

717
    The target_node must exist in in nodes_ip, and must be one of the
718
    nodes in the logical ID for each of the DRBD devices encountered
719
    in the disk tree.
720

721
    """
722
    if self.children:
723
      for child in self.children:
724
        child.SetPhysicalID(target_node_uuid, nodes_ip)
725

    
726
    if self.logical_id is None and self.physical_id is not None:
727
      return
728
    if self.dev_type in constants.LDS_DRBD:
729
      pnode_uuid, snode_uuid, port, pminor, sminor, secret = self.logical_id
730
      if target_node_uuid not in (pnode_uuid, snode_uuid):
731
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
732
                                        target_node_uuid)
733
      pnode_ip = nodes_ip.get(pnode_uuid, None)
734
      snode_ip = nodes_ip.get(snode_uuid, None)
735
      if pnode_ip is None or snode_ip is None:
736
        raise errors.ConfigurationError("Can't find primary or secondary node"
737
                                        " for %s" % str(self))
738
      p_data = (pnode_ip, port)
739
      s_data = (snode_ip, port)
740
      if pnode_uuid == target_node_uuid:
741
        self.physical_id = p_data + s_data + (pminor, secret)
742
      else: # it must be secondary, we tested above
743
        self.physical_id = s_data + p_data + (sminor, secret)
744
    else:
745
      self.physical_id = self.logical_id
746
    return
747

    
748
  def ToDict(self):
749
    """Disk-specific conversion to standard python types.
750

751
    This replaces the children lists of objects with lists of
752
    standard python types.
753

754
    """
755
    bo = super(Disk, self).ToDict()
756

    
757
    for attr in ("children",):
758
      alist = bo.get(attr, None)
759
      if alist:
760
        bo[attr] = outils.ContainerToDicts(alist)
761
    return bo
762

    
763
  @classmethod
764
  def FromDict(cls, val):
765
    """Custom function for Disks
766

767
    """
768
    obj = super(Disk, cls).FromDict(val)
769
    if obj.children:
770
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
771
    if obj.logical_id and isinstance(obj.logical_id, list):
772
      obj.logical_id = tuple(obj.logical_id)
773
    if obj.physical_id and isinstance(obj.physical_id, list):
774
      obj.physical_id = tuple(obj.physical_id)
775
    if obj.dev_type in constants.LDS_DRBD:
776
      # we need a tuple of length six here
777
      if len(obj.logical_id) < 6:
778
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
779
    return obj
780

    
781
  def __str__(self):
782
    """Custom str() formatter for disks.
783

784
    """
785
    if self.dev_type == constants.LD_LV:
786
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
787
    elif self.dev_type in constants.LDS_DRBD:
788
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
789
      val = "<DRBD8("
790
      if self.physical_id is None:
791
        phy = "unconfigured"
792
      else:
793
        phy = ("configured as %s:%s %s:%s" %
794
               (self.physical_id[0], self.physical_id[1],
795
                self.physical_id[2], self.physical_id[3]))
796

    
797
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
798
              (node_a, minor_a, node_b, minor_b, port, phy))
799
      if self.children and self.children.count(None) == 0:
800
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
801
      else:
802
        val += "no local storage"
803
    else:
804
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
805
             (self.dev_type, self.logical_id, self.physical_id, self.children))
806
    if self.iv_name is None:
807
      val += ", not visible"
808
    else:
809
      val += ", visible as /dev/%s" % self.iv_name
810
    if self.spindles is not None:
811
      val += ", spindles=%s" % self.spindles
812
    if isinstance(self.size, int):
813
      val += ", size=%dm)>" % self.size
814
    else:
815
      val += ", size='%s')>" % (self.size,)
816
    return val
817

    
818
  def Verify(self):
819
    """Checks that this disk is correctly configured.
820

821
    """
822
    all_errors = []
823
    if self.mode not in constants.DISK_ACCESS_SET:
824
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
825
    return all_errors
826

    
827
  def UpgradeConfig(self):
828
    """Fill defaults for missing configuration values.
829

830
    """
831
    if self.children:
832
      for child in self.children:
833
        child.UpgradeConfig()
834

    
835
    # FIXME: Make this configurable in Ganeti 2.7
836
    self.params = {}
837
    # add here config upgrade for this disk
838

    
839
  @staticmethod
840
  def ComputeLDParams(disk_template, disk_params):
841
    """Computes Logical Disk parameters from Disk Template parameters.
842

843
    @type disk_template: string
844
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
845
    @type disk_params: dict
846
    @param disk_params: disk template parameters;
847
                        dict(template_name -> parameters
848
    @rtype: list(dict)
849
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
850
      contains the LD parameters of the node. The tree is flattened in-order.
851

852
    """
853
    if disk_template not in constants.DISK_TEMPLATES:
854
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
855

    
856
    assert disk_template in disk_params
857

    
858
    result = list()
859
    dt_params = disk_params[disk_template]
860
    if disk_template == constants.DT_DRBD8:
861
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
862
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
863
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
864
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
865
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
866
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
867
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
868
        constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
869
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
870
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
871
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
872
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
873
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
874
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
875
        }))
876

    
877
      # data LV
878
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
879
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
880
        }))
881

    
882
      # metadata LV
883
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
884
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
885
        }))
886

    
887
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
888
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
889

    
890
    elif disk_template == constants.DT_PLAIN:
891
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
892
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
893
        }))
894

    
895
    elif disk_template == constants.DT_BLOCK:
896
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
897

    
898
    elif disk_template == constants.DT_RBD:
899
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
900
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
901
        }))
902

    
903
    elif disk_template == constants.DT_EXT:
904
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
905

    
906
    return result
907

    
908

    
909
class InstancePolicy(ConfigObject):
910
  """Config object representing instance policy limits dictionary.
911

912
  Note that this object is not actually used in the config, it's just
913
  used as a placeholder for a few functions.
914

915
  """
916
  @classmethod
917
  def CheckParameterSyntax(cls, ipolicy, check_std):
918
    """ Check the instance policy for validity.
919

920
    @type ipolicy: dict
921
    @param ipolicy: dictionary with min/max/std specs and policies
922
    @type check_std: bool
923
    @param check_std: Whether to check std value or just assume compliance
924
    @raise errors.ConfigurationError: when the policy is not legal
925

926
    """
927
    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
928
    if constants.IPOLICY_DTS in ipolicy:
929
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
930
    for key in constants.IPOLICY_PARAMETERS:
931
      if key in ipolicy:
932
        InstancePolicy.CheckParameter(key, ipolicy[key])
933
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
934
    if wrong_keys:
935
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
936
                                      utils.CommaJoin(wrong_keys))
937

    
938
  @classmethod
939
  def _CheckIncompleteSpec(cls, spec, keyname):
940
    missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
941
    if missing_params:
942
      msg = ("Missing instance specs parameters for %s: %s" %
943
             (keyname, utils.CommaJoin(missing_params)))
944
      raise errors.ConfigurationError(msg)
945

    
946
  @classmethod
947
  def CheckISpecSyntax(cls, ipolicy, check_std):
948
    """Check the instance policy specs for validity.
949

950
    @type ipolicy: dict
951
    @param ipolicy: dictionary with min/max/std specs
952
    @type check_std: bool
953
    @param check_std: Whether to check std value or just assume compliance
954
    @raise errors.ConfigurationError: when specs are not valid
955

956
    """
957
    if constants.ISPECS_MINMAX not in ipolicy:
958
      # Nothing to check
959
      return
960

    
961
    if check_std and constants.ISPECS_STD not in ipolicy:
962
      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
963
      raise errors.ConfigurationError(msg)
964
    stdspec = ipolicy.get(constants.ISPECS_STD)
965
    if check_std:
966
      InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
967

    
968
    if not ipolicy[constants.ISPECS_MINMAX]:
969
      raise errors.ConfigurationError("Empty minmax specifications")
970
    std_is_good = False
971
    for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
972
      missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
973
      if missing:
974
        msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
975
        raise errors.ConfigurationError(msg)
976
      for (key, spec) in minmaxspecs.items():
977
        InstancePolicy._CheckIncompleteSpec(spec, key)
978

    
979
      spec_std_ok = True
980
      for param in constants.ISPECS_PARAMETERS:
981
        par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
982
                                                           param, check_std)
983
        spec_std_ok = spec_std_ok and par_std_ok
984
      std_is_good = std_is_good or spec_std_ok
985
    if not std_is_good:
986
      raise errors.ConfigurationError("Invalid std specifications")
987

    
988
  @classmethod
989
  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
990
    """Check the instance policy specs for validity on a given key.
991

992
    We check if the instance specs makes sense for a given key, that is
993
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
994

995
    @type minmaxspecs: dict
996
    @param minmaxspecs: dictionary with min and max instance spec
997
    @type stdspec: dict
998
    @param stdspec: dictionary with standard instance spec
999
    @type name: string
1000
    @param name: what are the limits for
1001
    @type check_std: bool
1002
    @param check_std: Whether to check std value or just assume compliance
1003
    @rtype: bool
1004
    @return: C{True} when specs are valid, C{False} when standard spec for the
1005
        given name is not valid
1006
    @raise errors.ConfigurationError: when min/max specs for the given name
1007
        are not valid
1008

1009
    """
1010
    minspec = minmaxspecs[constants.ISPECS_MIN]
1011
    maxspec = minmaxspecs[constants.ISPECS_MAX]
1012
    min_v = minspec[name]
1013
    max_v = maxspec[name]
1014

    
1015
    if min_v > max_v:
1016
      err = ("Invalid specification of min/max values for %s: %s/%s" %
1017
             (name, min_v, max_v))
1018
      raise errors.ConfigurationError(err)
1019
    elif check_std:
1020
      std_v = stdspec.get(name, min_v)
1021
      return std_v >= min_v and std_v <= max_v
1022
    else:
1023
      return True
1024

    
1025
  @classmethod
1026
  def CheckDiskTemplates(cls, disk_templates):
1027
    """Checks the disk templates for validity.
1028

1029
    """
1030
    if not disk_templates:
1031
      raise errors.ConfigurationError("Instance policy must contain" +
1032
                                      " at least one disk template")
1033
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1034
    if wrong:
1035
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1036
                                      utils.CommaJoin(wrong))
1037

    
1038
  @classmethod
1039
  def CheckParameter(cls, key, value):
1040
    """Checks a parameter.
1041

1042
    Currently we expect all parameters to be float values.
1043

1044
    """
1045
    try:
1046
      float(value)
1047
    except (TypeError, ValueError), err:
1048
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1049
                                      " '%s', error: %s" % (key, value, err))
1050

    
1051

    
1052
class Instance(TaggableObject):
1053
  """Config object representing an instance."""
1054
  __slots__ = [
1055
    "name",
1056
    "primary_node",
1057
    "os",
1058
    "hypervisor",
1059
    "hvparams",
1060
    "beparams",
1061
    "osparams",
1062
    "admin_state",
1063
    "nics",
1064
    "disks",
1065
    "disk_template",
1066
    "disks_active",
1067
    "network_port",
1068
    "serial_no",
1069
    ] + _TIMESTAMPS + _UUID
1070

    
1071
  def _ComputeSecondaryNodes(self):
1072
    """Compute the list of secondary nodes.
1073

1074
    This is a simple wrapper over _ComputeAllNodes.
1075

1076
    """
1077
    all_nodes = set(self._ComputeAllNodes())
1078
    all_nodes.discard(self.primary_node)
1079
    return tuple(all_nodes)
1080

    
1081
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1082
                             "List of names of secondary nodes")
1083

    
1084
  def _ComputeAllNodes(self):
1085
    """Compute the list of all nodes.
1086

1087
    Since the data is already there (in the drbd disks), keeping it as
1088
    a separate normal attribute is redundant and if not properly
1089
    synchronised can cause problems. Thus it's better to compute it
1090
    dynamically.
1091

1092
    """
1093
    def _Helper(nodes, device):
1094
      """Recursively computes nodes given a top device."""
1095
      if device.dev_type in constants.LDS_DRBD:
1096
        nodea, nodeb = device.logical_id[:2]
1097
        nodes.add(nodea)
1098
        nodes.add(nodeb)
1099
      if device.children:
1100
        for child in device.children:
1101
          _Helper(nodes, child)
1102

    
1103
    all_nodes = set()
1104
    all_nodes.add(self.primary_node)
1105
    for device in self.disks:
1106
      _Helper(all_nodes, device)
1107
    return tuple(all_nodes)
1108

    
1109
  all_nodes = property(_ComputeAllNodes, None, None,
1110
                       "List of names of all the nodes of the instance")
1111

    
1112
  def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1113
    """Provide a mapping of nodes to LVs this instance owns.
1114

1115
    This function figures out what logical volumes should belong on
1116
    which nodes, recursing through a device tree.
1117

1118
    @type lvmap: dict
1119
    @param lvmap: optional dictionary to receive the
1120
        'node' : ['lv', ...] data.
1121
    @type devs: list of L{Disk}
1122
    @param devs: disks to get the LV name for. If None, all disk of this
1123
        instance are used.
1124
    @type node_uuid: string
1125
    @param node_uuid: UUID of the node to get the LV names for. If None, the
1126
        primary node of this instance is used.
1127
    @return: None if lvmap arg is given, otherwise, a dictionary of
1128
        the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
1129
        volumeN is of the form "vg_name/lv_name", compatible with
1130
        GetVolumeList()
1131

1132
    """
1133
    if node_uuid is None:
1134
      node_uuid = self.primary_node
1135

    
1136
    if lvmap is None:
1137
      lvmap = {
1138
        node_uuid: [],
1139
        }
1140
      ret = lvmap
1141
    else:
1142
      if not node_uuid in lvmap:
1143
        lvmap[node_uuid] = []
1144
      ret = None
1145

    
1146
    if not devs:
1147
      devs = self.disks
1148

    
1149
    for dev in devs:
1150
      if dev.dev_type == constants.LD_LV:
1151
        lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1152

    
1153
      elif dev.dev_type in constants.LDS_DRBD:
1154
        if dev.children:
1155
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1156
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1157

    
1158
      elif dev.children:
1159
        self.MapLVsByNode(lvmap, dev.children, node_uuid)
1160

    
1161
    return ret
1162

    
1163
  def FindDisk(self, idx):
1164
    """Find a disk given having a specified index.
1165

1166
    This is just a wrapper that does validation of the index.
1167

1168
    @type idx: int
1169
    @param idx: the disk index
1170
    @rtype: L{Disk}
1171
    @return: the corresponding disk
1172
    @raise errors.OpPrereqError: when the given index is not valid
1173

1174
    """
1175
    try:
1176
      idx = int(idx)
1177
      return self.disks[idx]
1178
    except (TypeError, ValueError), err:
1179
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1180
                                 errors.ECODE_INVAL)
1181
    except IndexError:
1182
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1183
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1184
                                 errors.ECODE_INVAL)
1185

    
1186
  def ToDict(self):
1187
    """Instance-specific conversion to standard python types.
1188

1189
    This replaces the children lists of objects with lists of standard
1190
    python types.
1191

1192
    """
1193
    bo = super(Instance, self).ToDict()
1194

    
1195
    for attr in "nics", "disks":
1196
      alist = bo.get(attr, None)
1197
      if alist:
1198
        nlist = outils.ContainerToDicts(alist)
1199
      else:
1200
        nlist = []
1201
      bo[attr] = nlist
1202
    return bo
1203

    
1204
  @classmethod
1205
  def FromDict(cls, val):
1206
    """Custom function for instances.
1207

1208
    """
1209
    if "admin_state" not in val:
1210
      if val.get("admin_up", False):
1211
        val["admin_state"] = constants.ADMINST_UP
1212
      else:
1213
        val["admin_state"] = constants.ADMINST_DOWN
1214
    if "admin_up" in val:
1215
      del val["admin_up"]
1216
    obj = super(Instance, cls).FromDict(val)
1217
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1218
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1219
    return obj
1220

    
1221
  def UpgradeConfig(self):
1222
    """Fill defaults for missing configuration values.
1223

1224
    """
1225
    for nic in self.nics:
1226
      nic.UpgradeConfig()
1227
    for disk in self.disks:
1228
      disk.UpgradeConfig()
1229
    if self.hvparams:
1230
      for key in constants.HVC_GLOBALS:
1231
        try:
1232
          del self.hvparams[key]
1233
        except KeyError:
1234
          pass
1235
    if self.osparams is None:
1236
      self.osparams = {}
1237
    UpgradeBeParams(self.beparams)
1238
    if self.disks_active is None:
1239
      self.disks_active = self.admin_state == constants.ADMINST_UP
1240

    
1241

    
1242
class OS(ConfigObject):
1243
  """Config object representing an operating system.
1244

1245
  @type supported_parameters: list
1246
  @ivar supported_parameters: a list of tuples, name and description,
1247
      containing the supported parameters by this OS
1248

1249
  @type VARIANT_DELIM: string
1250
  @cvar VARIANT_DELIM: the variant delimiter
1251

1252
  """
1253
  __slots__ = [
1254
    "name",
1255
    "path",
1256
    "api_versions",
1257
    "create_script",
1258
    "export_script",
1259
    "import_script",
1260
    "rename_script",
1261
    "verify_script",
1262
    "supported_variants",
1263
    "supported_parameters",
1264
    ]
1265

    
1266
  VARIANT_DELIM = "+"
1267

    
1268
  @classmethod
1269
  def SplitNameVariant(cls, name):
1270
    """Splits the name into the proper name and variant.
1271

1272
    @param name: the OS (unprocessed) name
1273
    @rtype: list
1274
    @return: a list of two elements; if the original name didn't
1275
        contain a variant, it's returned as an empty string
1276

1277
    """
1278
    nv = name.split(cls.VARIANT_DELIM, 1)
1279
    if len(nv) == 1:
1280
      nv.append("")
1281
    return nv
1282

    
1283
  @classmethod
1284
  def GetName(cls, name):
1285
    """Returns the proper name of the os (without the variant).
1286

1287
    @param name: the OS (unprocessed) name
1288

1289
    """
1290
    return cls.SplitNameVariant(name)[0]
1291

    
1292
  @classmethod
1293
  def GetVariant(cls, name):
1294
    """Returns the variant the os (without the base name).
1295

1296
    @param name: the OS (unprocessed) name
1297

1298
    """
1299
    return cls.SplitNameVariant(name)[1]
1300

    
1301

    
1302
class ExtStorage(ConfigObject):
1303
  """Config object representing an External Storage Provider.
1304

1305
  """
1306
  __slots__ = [
1307
    "name",
1308
    "path",
1309
    "create_script",
1310
    "remove_script",
1311
    "grow_script",
1312
    "attach_script",
1313
    "detach_script",
1314
    "setinfo_script",
1315
    "verify_script",
1316
    "supported_parameters",
1317
    ]
1318

    
1319

    
1320
class NodeHvState(ConfigObject):
1321
  """Hypvervisor state on a node.
1322

1323
  @ivar mem_total: Total amount of memory
1324
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1325
    available)
1326
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1327
    rounding
1328
  @ivar mem_inst: Memory used by instances living on node
1329
  @ivar cpu_total: Total node CPU core count
1330
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1331

1332
  """
1333
  __slots__ = [
1334
    "mem_total",
1335
    "mem_node",
1336
    "mem_hv",
1337
    "mem_inst",
1338
    "cpu_total",
1339
    "cpu_node",
1340
    ] + _TIMESTAMPS
1341

    
1342

    
1343
class NodeDiskState(ConfigObject):
1344
  """Disk state on a node.
1345

1346
  """
1347
  __slots__ = [
1348
    "total",
1349
    "reserved",
1350
    "overhead",
1351
    ] + _TIMESTAMPS
1352

    
1353

    
1354
class Node(TaggableObject):
1355
  """Config object representing a node.
1356

1357
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1358
  @ivar hv_state_static: Hypervisor state overriden by user
1359
  @ivar disk_state: Disk state (e.g. free space)
1360
  @ivar disk_state_static: Disk state overriden by user
1361

1362
  """
1363
  __slots__ = [
1364
    "name",
1365
    "primary_ip",
1366
    "secondary_ip",
1367
    "serial_no",
1368
    "master_candidate",
1369
    "offline",
1370
    "drained",
1371
    "group",
1372
    "master_capable",
1373
    "vm_capable",
1374
    "ndparams",
1375
    "powered",
1376
    "hv_state",
1377
    "hv_state_static",
1378
    "disk_state",
1379
    "disk_state_static",
1380
    ] + _TIMESTAMPS + _UUID
1381

    
1382
  def UpgradeConfig(self):
1383
    """Fill defaults for missing configuration values.
1384

1385
    """
1386
    # pylint: disable=E0203
1387
    # because these are "defined" via slots, not manually
1388
    if self.master_capable is None:
1389
      self.master_capable = True
1390

    
1391
    if self.vm_capable is None:
1392
      self.vm_capable = True
1393

    
1394
    if self.ndparams is None:
1395
      self.ndparams = {}
1396
    # And remove any global parameter
1397
    for key in constants.NDC_GLOBALS:
1398
      if key in self.ndparams:
1399
        logging.warning("Ignoring %s node parameter for node %s",
1400
                        key, self.name)
1401
        del self.ndparams[key]
1402

    
1403
    if self.powered is None:
1404
      self.powered = True
1405

    
1406
  def ToDict(self):
1407
    """Custom function for serializing.
1408

1409
    """
1410
    data = super(Node, self).ToDict()
1411

    
1412
    hv_state = data.get("hv_state", None)
1413
    if hv_state is not None:
1414
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1415

    
1416
    disk_state = data.get("disk_state", None)
1417
    if disk_state is not None:
1418
      data["disk_state"] = \
1419
        dict((key, outils.ContainerToDicts(value))
1420
             for (key, value) in disk_state.items())
1421

    
1422
    return data
1423

    
1424
  @classmethod
1425
  def FromDict(cls, val):
1426
    """Custom function for deserializing.
1427

1428
    """
1429
    obj = super(Node, cls).FromDict(val)
1430

    
1431
    if obj.hv_state is not None:
1432
      obj.hv_state = \
1433
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1434

    
1435
    if obj.disk_state is not None:
1436
      obj.disk_state = \
1437
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1438
             for (key, value) in obj.disk_state.items())
1439

    
1440
    return obj
1441

    
1442

    
1443
class NodeGroup(TaggableObject):
1444
  """Config object representing a node group."""
1445
  __slots__ = [
1446
    "name",
1447
    "members",
1448
    "ndparams",
1449
    "diskparams",
1450
    "ipolicy",
1451
    "serial_no",
1452
    "hv_state_static",
1453
    "disk_state_static",
1454
    "alloc_policy",
1455
    "networks",
1456
    ] + _TIMESTAMPS + _UUID
1457

    
1458
  def ToDict(self):
1459
    """Custom function for nodegroup.
1460

1461
    This discards the members object, which gets recalculated and is only kept
1462
    in memory.
1463

1464
    """
1465
    mydict = super(NodeGroup, self).ToDict()
1466
    del mydict["members"]
1467
    return mydict
1468

    
1469
  @classmethod
1470
  def FromDict(cls, val):
1471
    """Custom function for nodegroup.
1472

1473
    The members slot is initialized to an empty list, upon deserialization.
1474

1475
    """
1476
    obj = super(NodeGroup, cls).FromDict(val)
1477
    obj.members = []
1478
    return obj
1479

    
1480
  def UpgradeConfig(self):
1481
    """Fill defaults for missing configuration values.
1482

1483
    """
1484
    if self.ndparams is None:
1485
      self.ndparams = {}
1486

    
1487
    if self.serial_no is None:
1488
      self.serial_no = 1
1489

    
1490
    if self.alloc_policy is None:
1491
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1492

    
1493
    # We only update mtime, and not ctime, since we would not be able
1494
    # to provide a correct value for creation time.
1495
    if self.mtime is None:
1496
      self.mtime = time.time()
1497

    
1498
    if self.diskparams is None:
1499
      self.diskparams = {}
1500
    if self.ipolicy is None:
1501
      self.ipolicy = MakeEmptyIPolicy()
1502

    
1503
    if self.networks is None:
1504
      self.networks = {}
1505

    
1506
  def FillND(self, node):
1507
    """Return filled out ndparams for L{objects.Node}
1508

1509
    @type node: L{objects.Node}
1510
    @param node: A Node object to fill
1511
    @return a copy of the node's ndparams with defaults filled
1512

1513
    """
1514
    return self.SimpleFillND(node.ndparams)
1515

    
1516
  def SimpleFillND(self, ndparams):
1517
    """Fill a given ndparams dict with defaults.
1518

1519
    @type ndparams: dict
1520
    @param ndparams: the dict to fill
1521
    @rtype: dict
1522
    @return: a copy of the passed in ndparams with missing keys filled
1523
        from the node group defaults
1524

1525
    """
1526
    return FillDict(self.ndparams, ndparams)
1527

    
1528

    
1529
class Cluster(TaggableObject):
1530
  """Config object representing the cluster."""
1531
  __slots__ = [
1532
    "serial_no",
1533
    "rsahostkeypub",
1534
    "highest_used_port",
1535
    "tcpudp_port_pool",
1536
    "mac_prefix",
1537
    "volume_group_name",
1538
    "reserved_lvs",
1539
    "drbd_usermode_helper",
1540
    "default_bridge",
1541
    "default_hypervisor",
1542
    "master_node",
1543
    "master_ip",
1544
    "master_netdev",
1545
    "master_netmask",
1546
    "use_external_mip_script",
1547
    "cluster_name",
1548
    "file_storage_dir",
1549
    "shared_file_storage_dir",
1550
    "enabled_hypervisors",
1551
    "hvparams",
1552
    "ipolicy",
1553
    "os_hvp",
1554
    "beparams",
1555
    "osparams",
1556
    "nicparams",
1557
    "ndparams",
1558
    "diskparams",
1559
    "candidate_pool_size",
1560
    "modify_etc_hosts",
1561
    "modify_ssh_setup",
1562
    "maintain_node_health",
1563
    "uid_pool",
1564
    "default_iallocator",
1565
    "hidden_os",
1566
    "blacklisted_os",
1567
    "primary_ip_family",
1568
    "prealloc_wipe_disks",
1569
    "hv_state_static",
1570
    "disk_state_static",
1571
    "enabled_disk_templates",
1572
    ] + _TIMESTAMPS + _UUID
1573

    
1574
  def UpgradeConfig(self):
1575
    """Fill defaults for missing configuration values.
1576

1577
    """
1578
    # pylint: disable=E0203
1579
    # because these are "defined" via slots, not manually
1580
    if self.hvparams is None:
1581
      self.hvparams = constants.HVC_DEFAULTS
1582
    else:
1583
      for hypervisor in self.hvparams:
1584
        self.hvparams[hypervisor] = FillDict(
1585
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1586

    
1587
    if self.os_hvp is None:
1588
      self.os_hvp = {}
1589

    
1590
    # osparams added before 2.2
1591
    if self.osparams is None:
1592
      self.osparams = {}
1593

    
1594
    self.ndparams = UpgradeNDParams(self.ndparams)
1595

    
1596
    self.beparams = UpgradeGroupedParams(self.beparams,
1597
                                         constants.BEC_DEFAULTS)
1598
    for beparams_group in self.beparams:
1599
      UpgradeBeParams(self.beparams[beparams_group])
1600

    
1601
    migrate_default_bridge = not self.nicparams
1602
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1603
                                          constants.NICC_DEFAULTS)
1604
    if migrate_default_bridge:
1605
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1606
        self.default_bridge
1607

    
1608
    if self.modify_etc_hosts is None:
1609
      self.modify_etc_hosts = True
1610

    
1611
    if self.modify_ssh_setup is None:
1612
      self.modify_ssh_setup = True
1613

    
1614
    # default_bridge is no longer used in 2.1. The slot is left there to
1615
    # support auto-upgrading. It can be removed once we decide to deprecate
1616
    # upgrading straight from 2.0.
1617
    if self.default_bridge is not None:
1618
      self.default_bridge = None
1619

    
1620
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1621
    # code can be removed once upgrading straight from 2.0 is deprecated.
1622
    if self.default_hypervisor is not None:
1623
      self.enabled_hypervisors = ([self.default_hypervisor] +
1624
                                  [hvname for hvname in self.enabled_hypervisors
1625
                                   if hvname != self.default_hypervisor])
1626
      self.default_hypervisor = None
1627

    
1628
    # maintain_node_health added after 2.1.1
1629
    if self.maintain_node_health is None:
1630
      self.maintain_node_health = False
1631

    
1632
    if self.uid_pool is None:
1633
      self.uid_pool = []
1634

    
1635
    if self.default_iallocator is None:
1636
      self.default_iallocator = ""
1637

    
1638
    # reserved_lvs added before 2.2
1639
    if self.reserved_lvs is None:
1640
      self.reserved_lvs = []
1641

    
1642
    # hidden and blacklisted operating systems added before 2.2.1
1643
    if self.hidden_os is None:
1644
      self.hidden_os = []
1645

    
1646
    if self.blacklisted_os is None:
1647
      self.blacklisted_os = []
1648

    
1649
    # primary_ip_family added before 2.3
1650
    if self.primary_ip_family is None:
1651
      self.primary_ip_family = AF_INET
1652

    
1653
    if self.master_netmask is None:
1654
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1655
      self.master_netmask = ipcls.iplen
1656

    
1657
    if self.prealloc_wipe_disks is None:
1658
      self.prealloc_wipe_disks = False
1659

    
1660
    # shared_file_storage_dir added before 2.5
1661
    if self.shared_file_storage_dir is None:
1662
      self.shared_file_storage_dir = ""
1663

    
1664
    if self.use_external_mip_script is None:
1665
      self.use_external_mip_script = False
1666

    
1667
    if self.diskparams:
1668
      self.diskparams = UpgradeDiskParams(self.diskparams)
1669
    else:
1670
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1671

    
1672
    # instance policy added before 2.6
1673
    if self.ipolicy is None:
1674
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1675
    else:
1676
      # we can either make sure to upgrade the ipolicy always, or only
1677
      # do it in some corner cases (e.g. missing keys); note that this
1678
      # will break any removal of keys from the ipolicy dict
1679
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1680
      if wrongkeys:
1681
        # These keys would be silently removed by FillIPolicy()
1682
        msg = ("Cluster instance policy contains spurious keys: %s" %
1683
               utils.CommaJoin(wrongkeys))
1684
        raise errors.ConfigurationError(msg)
1685
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1686

    
1687
  @property
1688
  def primary_hypervisor(self):
1689
    """The first hypervisor is the primary.
1690

1691
    Useful, for example, for L{Node}'s hv/disk state.
1692

1693
    """
1694
    return self.enabled_hypervisors[0]
1695

    
1696
  def ToDict(self):
1697
    """Custom function for cluster.
1698

1699
    """
1700
    mydict = super(Cluster, self).ToDict()
1701

    
1702
    if self.tcpudp_port_pool is None:
1703
      tcpudp_port_pool = []
1704
    else:
1705
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1706

    
1707
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1708

    
1709
    return mydict
1710

    
1711
  @classmethod
1712
  def FromDict(cls, val):
1713
    """Custom function for cluster.
1714

1715
    """
1716
    obj = super(Cluster, cls).FromDict(val)
1717

    
1718
    if obj.tcpudp_port_pool is None:
1719
      obj.tcpudp_port_pool = set()
1720
    elif not isinstance(obj.tcpudp_port_pool, set):
1721
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1722

    
1723
    return obj
1724

    
1725
  def SimpleFillDP(self, diskparams):
1726
    """Fill a given diskparams dict with cluster defaults.
1727

1728
    @param diskparams: The diskparams
1729
    @return: The defaults dict
1730

1731
    """
1732
    return FillDiskParams(self.diskparams, diskparams)
1733

    
1734
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1735
    """Get the default hypervisor parameters for the cluster.
1736

1737
    @param hypervisor: the hypervisor name
1738
    @param os_name: if specified, we'll also update the defaults for this OS
1739
    @param skip_keys: if passed, list of keys not to use
1740
    @return: the defaults dict
1741

1742
    """
1743
    if skip_keys is None:
1744
      skip_keys = []
1745

    
1746
    fill_stack = [self.hvparams.get(hypervisor, {})]
1747
    if os_name is not None:
1748
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1749
      fill_stack.append(os_hvp)
1750

    
1751
    ret_dict = {}
1752
    for o_dict in fill_stack:
1753
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1754

    
1755
    return ret_dict
1756

    
1757
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1758
    """Fill a given hvparams dict with cluster defaults.
1759

1760
    @type hv_name: string
1761
    @param hv_name: the hypervisor to use
1762
    @type os_name: string
1763
    @param os_name: the OS to use for overriding the hypervisor defaults
1764
    @type skip_globals: boolean
1765
    @param skip_globals: if True, the global hypervisor parameters will
1766
        not be filled
1767
    @rtype: dict
1768
    @return: a copy of the given hvparams with missing keys filled from
1769
        the cluster defaults
1770

1771
    """
1772
    if skip_globals:
1773
      skip_keys = constants.HVC_GLOBALS
1774
    else:
1775
      skip_keys = []
1776

    
1777
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1778
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1779

    
1780
  def FillHV(self, instance, skip_globals=False):
1781
    """Fill an instance's hvparams dict with cluster defaults.
1782

1783
    @type instance: L{objects.Instance}
1784
    @param instance: the instance parameter to fill
1785
    @type skip_globals: boolean
1786
    @param skip_globals: if True, the global hypervisor parameters will
1787
        not be filled
1788
    @rtype: dict
1789
    @return: a copy of the instance's hvparams with missing keys filled from
1790
        the cluster defaults
1791

1792
    """
1793
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1794
                             instance.hvparams, skip_globals)
1795

    
1796
  def SimpleFillBE(self, beparams):
1797
    """Fill a given beparams dict with cluster defaults.
1798

1799
    @type beparams: dict
1800
    @param beparams: the dict to fill
1801
    @rtype: dict
1802
    @return: a copy of the passed in beparams with missing keys filled
1803
        from the cluster defaults
1804

1805
    """
1806
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1807

    
1808
  def FillBE(self, instance):
1809
    """Fill an instance's beparams dict with cluster defaults.
1810

1811
    @type instance: L{objects.Instance}
1812
    @param instance: the instance parameter to fill
1813
    @rtype: dict
1814
    @return: a copy of the instance's beparams with missing keys filled from
1815
        the cluster defaults
1816

1817
    """
1818
    return self.SimpleFillBE(instance.beparams)
1819

    
1820
  def SimpleFillNIC(self, nicparams):
1821
    """Fill a given nicparams dict with cluster defaults.
1822

1823
    @type nicparams: dict
1824
    @param nicparams: the dict to fill
1825
    @rtype: dict
1826
    @return: a copy of the passed in nicparams with missing keys filled
1827
        from the cluster defaults
1828

1829
    """
1830
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1831

    
1832
  def SimpleFillOS(self, os_name, os_params):
1833
    """Fill an instance's osparams dict with cluster defaults.
1834

1835
    @type os_name: string
1836
    @param os_name: the OS name to use
1837
    @type os_params: dict
1838
    @param os_params: the dict to fill with default values
1839
    @rtype: dict
1840
    @return: a copy of the instance's osparams with missing keys filled from
1841
        the cluster defaults
1842

1843
    """
1844
    name_only = os_name.split("+", 1)[0]
1845
    # base OS
1846
    result = self.osparams.get(name_only, {})
1847
    # OS with variant
1848
    result = FillDict(result, self.osparams.get(os_name, {}))
1849
    # specified params
1850
    return FillDict(result, os_params)
1851

    
1852
  @staticmethod
1853
  def SimpleFillHvState(hv_state):
1854
    """Fill an hv_state sub dict with cluster defaults.
1855

1856
    """
1857
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1858

    
1859
  @staticmethod
1860
  def SimpleFillDiskState(disk_state):
1861
    """Fill an disk_state sub dict with cluster defaults.
1862

1863
    """
1864
    return FillDict(constants.DS_DEFAULTS, disk_state)
1865

    
1866
  def FillND(self, node, nodegroup):
1867
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1868

1869
    @type node: L{objects.Node}
1870
    @param node: A Node object to fill
1871
    @type nodegroup: L{objects.NodeGroup}
1872
    @param nodegroup: A Node object to fill
1873
    @return a copy of the node's ndparams with defaults filled
1874

1875
    """
1876
    return self.SimpleFillND(nodegroup.FillND(node))
1877

    
1878
  def SimpleFillND(self, ndparams):
1879
    """Fill a given ndparams dict with defaults.
1880

1881
    @type ndparams: dict
1882
    @param ndparams: the dict to fill
1883
    @rtype: dict
1884
    @return: a copy of the passed in ndparams with missing keys filled
1885
        from the cluster defaults
1886

1887
    """
1888
    return FillDict(self.ndparams, ndparams)
1889

    
1890
  def SimpleFillIPolicy(self, ipolicy):
1891
    """ Fill instance policy dict with defaults.
1892

1893
    @type ipolicy: dict
1894
    @param ipolicy: the dict to fill
1895
    @rtype: dict
1896
    @return: a copy of passed ipolicy with missing keys filled from
1897
      the cluster defaults
1898

1899
    """
1900
    return FillIPolicy(self.ipolicy, ipolicy)
1901

    
1902
  def IsDiskTemplateEnabled(self, disk_template):
1903
    """Checks if a particular disk template is enabled.
1904

1905
    """
1906
    return utils.storage.IsDiskTemplateEnabled(
1907
        disk_template, self.enabled_disk_templates)
1908

    
1909
  def IsFileStorageEnabled(self):
1910
    """Checks if file storage is enabled.
1911

1912
    """
1913
    return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
1914

    
1915
  def IsSharedFileStorageEnabled(self):
1916
    """Checks if shared file storage is enabled.
1917

1918
    """
1919
    return utils.storage.IsSharedFileStorageEnabled(
1920
        self.enabled_disk_templates)
1921

    
1922

    
1923
class BlockDevStatus(ConfigObject):
1924
  """Config object representing the status of a block device."""
1925
  __slots__ = [
1926
    "dev_path",
1927
    "major",
1928
    "minor",
1929
    "sync_percent",
1930
    "estimated_time",
1931
    "is_degraded",
1932
    "ldisk_status",
1933
    ]
1934

    
1935

    
1936
class ImportExportStatus(ConfigObject):
1937
  """Config object representing the status of an import or export."""
1938
  __slots__ = [
1939
    "recent_output",
1940
    "listen_port",
1941
    "connected",
1942
    "progress_mbytes",
1943
    "progress_throughput",
1944
    "progress_eta",
1945
    "progress_percent",
1946
    "exit_status",
1947
    "error_message",
1948
    ] + _TIMESTAMPS
1949

    
1950

    
1951
class ImportExportOptions(ConfigObject):
1952
  """Options for import/export daemon
1953

1954
  @ivar key_name: X509 key name (None for cluster certificate)
1955
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1956
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1957
  @ivar magic: Used to ensure the connection goes to the right disk
1958
  @ivar ipv6: Whether to use IPv6
1959
  @ivar connect_timeout: Number of seconds for establishing connection
1960

1961
  """
1962
  __slots__ = [
1963
    "key_name",
1964
    "ca_pem",
1965
    "compress",
1966
    "magic",
1967
    "ipv6",
1968
    "connect_timeout",
1969
    ]
1970

    
1971

    
1972
class ConfdRequest(ConfigObject):
1973
  """Object holding a confd request.
1974

1975
  @ivar protocol: confd protocol version
1976
  @ivar type: confd query type
1977
  @ivar query: query request
1978
  @ivar rsalt: requested reply salt
1979

1980
  """
1981
  __slots__ = [
1982
    "protocol",
1983
    "type",
1984
    "query",
1985
    "rsalt",
1986
    ]
1987

    
1988

    
1989
class ConfdReply(ConfigObject):
1990
  """Object holding a confd reply.
1991

1992
  @ivar protocol: confd protocol version
1993
  @ivar status: reply status code (ok, error)
1994
  @ivar answer: confd query reply
1995
  @ivar serial: configuration serial number
1996

1997
  """
1998
  __slots__ = [
1999
    "protocol",
2000
    "status",
2001
    "answer",
2002
    "serial",
2003
    ]
2004

    
2005

    
2006
class QueryFieldDefinition(ConfigObject):
2007
  """Object holding a query field definition.
2008

2009
  @ivar name: Field name
2010
  @ivar title: Human-readable title
2011
  @ivar kind: Field type
2012
  @ivar doc: Human-readable description
2013

2014
  """
2015
  __slots__ = [
2016
    "name",
2017
    "title",
2018
    "kind",
2019
    "doc",
2020
    ]
2021

    
2022

    
2023
class _QueryResponseBase(ConfigObject):
2024
  __slots__ = [
2025
    "fields",
2026
    ]
2027

    
2028
  def ToDict(self):
2029
    """Custom function for serializing.
2030

2031
    """
2032
    mydict = super(_QueryResponseBase, self).ToDict()
2033
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2034
    return mydict
2035

    
2036
  @classmethod
2037
  def FromDict(cls, val):
2038
    """Custom function for de-serializing.
2039

2040
    """
2041
    obj = super(_QueryResponseBase, cls).FromDict(val)
2042
    obj.fields = \
2043
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2044
    return obj
2045

    
2046

    
2047
class QueryResponse(_QueryResponseBase):
2048
  """Object holding the response to a query.
2049

2050
  @ivar fields: List of L{QueryFieldDefinition} objects
2051
  @ivar data: Requested data
2052

2053
  """
2054
  __slots__ = [
2055
    "data",
2056
    ]
2057

    
2058

    
2059
class QueryFieldsRequest(ConfigObject):
2060
  """Object holding a request for querying available fields.
2061

2062
  """
2063
  __slots__ = [
2064
    "what",
2065
    "fields",
2066
    ]
2067

    
2068

    
2069
class QueryFieldsResponse(_QueryResponseBase):
2070
  """Object holding the response to a query for fields.
2071

2072
  @ivar fields: List of L{QueryFieldDefinition} objects
2073

2074
  """
2075
  __slots__ = []
2076

    
2077

    
2078
class MigrationStatus(ConfigObject):
2079
  """Object holding the status of a migration.
2080

2081
  """
2082
  __slots__ = [
2083
    "status",
2084
    "transferred_ram",
2085
    "total_ram",
2086
    ]
2087

    
2088

    
2089
class InstanceConsole(ConfigObject):
2090
  """Object describing how to access the console of an instance.
2091

2092
  """
2093
  __slots__ = [
2094
    "instance",
2095
    "kind",
2096
    "message",
2097
    "host",
2098
    "port",
2099
    "user",
2100
    "command",
2101
    "display",
2102
    ]
2103

    
2104
  def Validate(self):
2105
    """Validates contents of this object.
2106

2107
    """
2108
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2109
    assert self.instance, "Missing instance name"
2110
    assert self.message or self.kind in [constants.CONS_SSH,
2111
                                         constants.CONS_SPICE,
2112
                                         constants.CONS_VNC]
2113
    assert self.host or self.kind == constants.CONS_MESSAGE
2114
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2115
                                      constants.CONS_SSH]
2116
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2117
                                      constants.CONS_SPICE,
2118
                                      constants.CONS_VNC]
2119
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2120
                                         constants.CONS_SPICE,
2121
                                         constants.CONS_VNC]
2122
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2123
                                         constants.CONS_SPICE,
2124
                                         constants.CONS_SSH]
2125
    return True
2126

    
2127

    
2128
class Network(TaggableObject):
2129
  """Object representing a network definition for ganeti.
2130

2131
  """
2132
  __slots__ = [
2133
    "name",
2134
    "serial_no",
2135
    "mac_prefix",
2136
    "network",
2137
    "network6",
2138
    "gateway",
2139
    "gateway6",
2140
    "reservations",
2141
    "ext_reservations",
2142
    ] + _TIMESTAMPS + _UUID
2143

    
2144
  def HooksDict(self, prefix=""):
2145
    """Export a dictionary used by hooks with a network's information.
2146

2147
    @type prefix: String
2148
    @param prefix: Prefix to prepend to the dict entries
2149

2150
    """
2151
    result = {
2152
      "%sNETWORK_NAME" % prefix: self.name,
2153
      "%sNETWORK_UUID" % prefix: self.uuid,
2154
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2155
    }
2156
    if self.network:
2157
      result["%sNETWORK_SUBNET" % prefix] = self.network
2158
    if self.gateway:
2159
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2160
    if self.network6:
2161
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2162
    if self.gateway6:
2163
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2164
    if self.mac_prefix:
2165
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2166

    
2167
    return result
2168

    
2169
  @classmethod
2170
  def FromDict(cls, val):
2171
    """Custom function for networks.
2172

2173
    Remove deprecated network_type and family.
2174

2175
    """
2176
    if "network_type" in val:
2177
      del val["network_type"]
2178
    if "family" in val:
2179
      del val["family"]
2180
    obj = super(Network, cls).FromDict(val)
2181
    return obj
2182

    
2183

    
2184
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2185
  """Simple wrapper over ConfigParse that allows serialization.
2186

2187
  This class is basically ConfigParser.SafeConfigParser with two
2188
  additional methods that allow it to serialize/unserialize to/from a
2189
  buffer.
2190

2191
  """
2192
  def Dumps(self):
2193
    """Dump this instance and return the string representation."""
2194
    buf = StringIO()
2195
    self.write(buf)
2196
    return buf.getvalue()
2197

    
2198
  @classmethod
2199
  def Loads(cls, data):
2200
    """Load data from a string."""
2201
    buf = StringIO(data)
2202
    cfp = cls()
2203
    cfp.readfp(buf)
2204
    return cfp
2205

    
2206

    
2207
class LvmPvInfo(ConfigObject):
2208
  """Information about an LVM physical volume (PV).
2209

2210
  @type name: string
2211
  @ivar name: name of the PV
2212
  @type vg_name: string
2213
  @ivar vg_name: name of the volume group containing the PV
2214
  @type size: float
2215
  @ivar size: size of the PV in MiB
2216
  @type free: float
2217
  @ivar free: free space in the PV, in MiB
2218
  @type attributes: string
2219
  @ivar attributes: PV attributes
2220
  @type lv_list: list of strings
2221
  @ivar lv_list: names of the LVs hosted on the PV
2222
  """
2223
  __slots__ = [
2224
    "name",
2225
    "vg_name",
2226
    "size",
2227
    "free",
2228
    "attributes",
2229
    "lv_list"
2230
    ]
2231

    
2232
  def IsEmpty(self):
2233
    """Is this PV empty?
2234

2235
    """
2236
    return self.size <= (self.free + 1)
2237

    
2238
  def IsAllocatable(self):
2239
    """Is this PV allocatable?
2240

2241
    """
2242
    return ("a" in self.attributes)