Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 30b12688

History | View | Annotate | Download (65.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def FillIPolicy(default_ipolicy, custom_ipolicy):
86
  """Fills an instance policy with defaults.
87

88
  """
89
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90
  ret_dict = copy.deepcopy(custom_ipolicy)
91
  for key in default_ipolicy:
92
    if key not in ret_dict:
93
      ret_dict[key] = copy.deepcopy(default_ipolicy[key])
94
    elif key == constants.ISPECS_STD:
95
      ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
96
  return ret_dict
97

    
98

    
99
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
100
  """Fills the disk parameter defaults.
101

102
  @see: L{FillDict} for parameters and return value
103

104
  """
105
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
106

    
107
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
108
                             skip_keys=skip_keys))
109
              for dt in constants.DISK_TEMPLATES)
110

    
111

    
112
def UpgradeGroupedParams(target, defaults):
113
  """Update all groups for the target parameter.
114

115
  @type target: dict of dicts
116
  @param target: {group: {parameter: value}}
117
  @type defaults: dict
118
  @param defaults: default parameter values
119

120
  """
121
  if target is None:
122
    target = {constants.PP_DEFAULT: defaults}
123
  else:
124
    for group in target:
125
      target[group] = FillDict(defaults, target[group])
126
  return target
127

    
128

    
129
def UpgradeBeParams(target):
130
  """Update the be parameters dict to the new format.
131

132
  @type target: dict
133
  @param target: "be" parameters dict
134

135
  """
136
  if constants.BE_MEMORY in target:
137
    memory = target[constants.BE_MEMORY]
138
    target[constants.BE_MAXMEM] = memory
139
    target[constants.BE_MINMEM] = memory
140
    del target[constants.BE_MEMORY]
141

    
142

    
143
def UpgradeDiskParams(diskparams):
144
  """Upgrade the disk parameters.
145

146
  @type diskparams: dict
147
  @param diskparams: disk parameters to upgrade
148
  @rtype: dict
149
  @return: the upgraded disk parameters dict
150

151
  """
152
  if not diskparams:
153
    result = {}
154
  else:
155
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
156

    
157
  return result
158

    
159

    
160
def UpgradeNDParams(ndparams):
161
  """Upgrade ndparams structure.
162

163
  @type ndparams: dict
164
  @param ndparams: disk parameters to upgrade
165
  @rtype: dict
166
  @return: the upgraded node parameters dict
167

168
  """
169
  if ndparams is None:
170
    ndparams = {}
171

    
172
  if (constants.ND_OOB_PROGRAM in ndparams and
173
      ndparams[constants.ND_OOB_PROGRAM] is None):
174
    # will be reset by the line below
175
    del ndparams[constants.ND_OOB_PROGRAM]
176
  return FillDict(constants.NDC_DEFAULTS, ndparams)
177

    
178

    
179
def MakeEmptyIPolicy():
180
  """Create empty IPolicy dictionary.
181

182
  """
183
  return {}
184

    
185

    
186
class ConfigObject(outils.ValidatedSlots):
187
  """A generic config object.
188

189
  It has the following properties:
190

191
    - provides somewhat safe recursive unpickling and pickling for its classes
192
    - unset attributes which are defined in slots are always returned
193
      as None instead of raising an error
194

195
  Classes derived from this must always declare __slots__ (we use many
196
  config objects and the memory reduction is useful)
197

198
  """
199
  __slots__ = []
200

    
201
  def __getattr__(self, name):
202
    if name not in self.GetAllSlots():
203
      raise AttributeError("Invalid object attribute %s.%s" %
204
                           (type(self).__name__, name))
205
    return None
206

    
207
  def __setstate__(self, state):
208
    slots = self.GetAllSlots()
209
    for name in state:
210
      if name in slots:
211
        setattr(self, name, state[name])
212

    
213
  def Validate(self):
214
    """Validates the slots.
215

216
    """
217

    
218
  def ToDict(self):
219
    """Convert to a dict holding only standard python types.
220

221
    The generic routine just dumps all of this object's attributes in
222
    a dict. It does not work if the class has children who are
223
    ConfigObjects themselves (e.g. the nics list in an Instance), in
224
    which case the object should subclass the function in order to
225
    make sure all objects returned are only standard python types.
226

227
    """
228
    result = {}
229
    for name in self.GetAllSlots():
230
      value = getattr(self, name, None)
231
      if value is not None:
232
        result[name] = value
233
    return result
234

    
235
  __getstate__ = ToDict
236

    
237
  @classmethod
238
  def FromDict(cls, val):
239
    """Create an object from a dictionary.
240

241
    This generic routine takes a dict, instantiates a new instance of
242
    the given class, and sets attributes based on the dict content.
243

244
    As for `ToDict`, this does not work if the class has children
245
    who are ConfigObjects themselves (e.g. the nics list in an
246
    Instance), in which case the object should subclass the function
247
    and alter the objects.
248

249
    """
250
    if not isinstance(val, dict):
251
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
252
                                      " expected dict, got %s" % type(val))
253
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
254
    obj = cls(**val_str) # pylint: disable=W0142
255
    return obj
256

    
257
  def Copy(self):
258
    """Makes a deep copy of the current object and its children.
259

260
    """
261
    dict_form = self.ToDict()
262
    clone_obj = self.__class__.FromDict(dict_form)
263
    return clone_obj
264

    
265
  def __repr__(self):
266
    """Implement __repr__ for ConfigObjects."""
267
    return repr(self.ToDict())
268

    
269
  def UpgradeConfig(self):
270
    """Fill defaults for missing configuration values.
271

272
    This method will be called at configuration load time, and its
273
    implementation will be object dependent.
274

275
    """
276
    pass
277

    
278

    
279
class TaggableObject(ConfigObject):
280
  """An generic class supporting tags.
281

282
  """
283
  __slots__ = ["tags"]
284
  VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$")
285

    
286
  @classmethod
287
  def ValidateTag(cls, tag):
288
    """Check if a tag is valid.
289

290
    If the tag is invalid, an errors.TagError will be raised. The
291
    function has no return value.
292

293
    """
294
    if not isinstance(tag, basestring):
295
      raise errors.TagError("Invalid tag type (not a string)")
296
    if len(tag) > constants.MAX_TAG_LEN:
297
      raise errors.TagError("Tag too long (>%d characters)" %
298
                            constants.MAX_TAG_LEN)
299
    if not tag:
300
      raise errors.TagError("Tags cannot be empty")
301
    if not cls.VALID_TAG_RE.match(tag):
302
      raise errors.TagError("Tag contains invalid characters")
303

    
304
  def GetTags(self):
305
    """Return the tags list.
306

307
    """
308
    tags = getattr(self, "tags", None)
309
    if tags is None:
310
      tags = self.tags = set()
311
    return tags
312

    
313
  def AddTag(self, tag):
314
    """Add a new tag.
315

316
    """
317
    self.ValidateTag(tag)
318
    tags = self.GetTags()
319
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
320
      raise errors.TagError("Too many tags")
321
    self.GetTags().add(tag)
322

    
323
  def RemoveTag(self, tag):
324
    """Remove a tag.
325

326
    """
327
    self.ValidateTag(tag)
328
    tags = self.GetTags()
329
    try:
330
      tags.remove(tag)
331
    except KeyError:
332
      raise errors.TagError("Tag not found")
333

    
334
  def ToDict(self):
335
    """Taggable-object-specific conversion to standard python types.
336

337
    This replaces the tags set with a list.
338

339
    """
340
    bo = super(TaggableObject, self).ToDict()
341

    
342
    tags = bo.get("tags", None)
343
    if isinstance(tags, set):
344
      bo["tags"] = list(tags)
345
    return bo
346

    
347
  @classmethod
348
  def FromDict(cls, val):
349
    """Custom function for instances.
350

351
    """
352
    obj = super(TaggableObject, cls).FromDict(val)
353
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
354
      obj.tags = set(obj.tags)
355
    return obj
356

    
357

    
358
class MasterNetworkParameters(ConfigObject):
359
  """Network configuration parameters for the master
360

361
  @ivar uuid: master nodes UUID
362
  @ivar ip: master IP
363
  @ivar netmask: master netmask
364
  @ivar netdev: master network device
365
  @ivar ip_family: master IP family
366

367
  """
368
  __slots__ = [
369
    "uuid",
370
    "ip",
371
    "netmask",
372
    "netdev",
373
    "ip_family",
374
    ]
375

    
376

    
377
class ConfigData(ConfigObject):
378
  """Top-level config object."""
379
  __slots__ = [
380
    "version",
381
    "cluster",
382
    "nodes",
383
    "nodegroups",
384
    "instances",
385
    "networks",
386
    "serial_no",
387
    ] + _TIMESTAMPS
388

    
389
  def ToDict(self):
390
    """Custom function for top-level config data.
391

392
    This just replaces the list of instances, nodes and the cluster
393
    with standard python types.
394

395
    """
396
    mydict = super(ConfigData, self).ToDict()
397
    mydict["cluster"] = mydict["cluster"].ToDict()
398
    for key in "nodes", "instances", "nodegroups", "networks":
399
      mydict[key] = outils.ContainerToDicts(mydict[key])
400

    
401
    return mydict
402

    
403
  @classmethod
404
  def FromDict(cls, val):
405
    """Custom function for top-level config data
406

407
    """
408
    obj = super(ConfigData, cls).FromDict(val)
409
    obj.cluster = Cluster.FromDict(obj.cluster)
410
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
411
    obj.instances = \
412
      outils.ContainerFromDicts(obj.instances, dict, Instance)
413
    obj.nodegroups = \
414
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
415
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
416
    return obj
417

    
418
  def HasAnyDiskOfType(self, dev_type):
419
    """Check if in there is at disk of the given type in the configuration.
420

421
    @type dev_type: L{constants.DTS_BLOCK}
422
    @param dev_type: the type to look for
423
    @rtype: boolean
424
    @return: boolean indicating if a disk of the given type was found or not
425

426
    """
427
    for instance in self.instances.values():
428
      for disk in instance.disks:
429
        if disk.IsBasedOnDiskType(dev_type):
430
          return True
431
    return False
432

    
433
  def UpgradeConfig(self):
434
    """Fill defaults for missing configuration values.
435

436
    """
437
    self.cluster.UpgradeConfig()
438
    for node in self.nodes.values():
439
      node.UpgradeConfig()
440
    for instance in self.instances.values():
441
      instance.UpgradeConfig()
442
    self._UpgradeEnabledDiskTemplates()
443
    if self.nodegroups is None:
444
      self.nodegroups = {}
445
    for nodegroup in self.nodegroups.values():
446
      nodegroup.UpgradeConfig()
447
      InstancePolicy.UpgradeDiskTemplates(
448
        nodegroup.ipolicy, self.cluster.enabled_disk_templates)
449
    if self.cluster.drbd_usermode_helper is None:
450
      # To decide if we set an helper let's check if at least one instance has
451
      # a DRBD disk. This does not cover all the possible scenarios but it
452
      # gives a good approximation.
453
      if self.HasAnyDiskOfType(constants.DT_DRBD8):
454
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
455
    if self.networks is None:
456
      self.networks = {}
457
    for network in self.networks.values():
458
      network.UpgradeConfig()
459

    
460
  def _UpgradeEnabledDiskTemplates(self):
461
    """Upgrade the cluster's enabled disk templates by inspecting the currently
462
       enabled and/or used disk templates.
463

464
    """
465
    if not self.cluster.enabled_disk_templates:
466
      template_set = \
467
        set([inst.disk_template for inst in self.instances.values()])
468
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
469
      if self.cluster.volume_group_name:
470
        template_set.add(constants.DT_DRBD8)
471
        template_set.add(constants.DT_PLAIN)
472
      # Set enabled_disk_templates to the inferred disk templates. Order them
473
      # according to a preference list that is based on Ganeti's history of
474
      # supported disk templates.
475
      self.cluster.enabled_disk_templates = []
476
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
477
        if preferred_template in template_set:
478
          self.cluster.enabled_disk_templates.append(preferred_template)
479
          template_set.remove(preferred_template)
480
      self.cluster.enabled_disk_templates.extend(list(template_set))
481
    InstancePolicy.UpgradeDiskTemplates(
482
      self.cluster.ipolicy, self.cluster.enabled_disk_templates)
483

    
484

    
485
class NIC(ConfigObject):
486
  """Config object representing a network card."""
487
  __slots__ = ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID
488

    
489
  @classmethod
490
  def CheckParameterSyntax(cls, nicparams):
491
    """Check the given parameters for validity.
492

493
    @type nicparams:  dict
494
    @param nicparams: dictionary with parameter names/value
495
    @raise errors.ConfigurationError: when a parameter is not valid
496

497
    """
498
    mode = nicparams[constants.NIC_MODE]
499
    if (mode not in constants.NIC_VALID_MODES and
500
        mode != constants.VALUE_AUTO):
501
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
502

    
503
    if (mode == constants.NIC_MODE_BRIDGED and
504
        not nicparams[constants.NIC_LINK]):
505
      raise errors.ConfigurationError("Missing bridged NIC link")
506

    
507

    
508
class Disk(ConfigObject):
509
  """Config object representing a block device."""
510
  __slots__ = (["name", "dev_type", "logical_id", "physical_id",
511
                "children", "iv_name", "size", "mode", "params", "spindles"] +
512
               _UUID)
513

    
514
  def CreateOnSecondary(self):
515
    """Test if this device needs to be created on a secondary node."""
516
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
517

    
518
  def AssembleOnSecondary(self):
519
    """Test if this device needs to be assembled on a secondary node."""
520
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
521

    
522
  def OpenOnSecondary(self):
523
    """Test if this device needs to be opened on a secondary node."""
524
    return self.dev_type in (constants.DT_PLAIN,)
525

    
526
  def StaticDevPath(self):
527
    """Return the device path if this device type has a static one.
528

529
    Some devices (LVM for example) live always at the same /dev/ path,
530
    irrespective of their status. For such devices, we return this
531
    path, for others we return None.
532

533
    @warning: The path returned is not a normalized pathname; callers
534
        should check that it is a valid path.
535

536
    """
537
    if self.dev_type == constants.DT_PLAIN:
538
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
539
    elif self.dev_type == constants.DT_BLOCK:
540
      return self.logical_id[1]
541
    elif self.dev_type == constants.DT_RBD:
542
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
543
    return None
544

    
545
  def ChildrenNeeded(self):
546
    """Compute the needed number of children for activation.
547

548
    This method will return either -1 (all children) or a positive
549
    number denoting the minimum number of children needed for
550
    activation (only mirrored devices will usually return >=0).
551

552
    Currently, only DRBD8 supports diskless activation (therefore we
553
    return 0), for all other we keep the previous semantics and return
554
    -1.
555

556
    """
557
    if self.dev_type == constants.DT_DRBD8:
558
      return 0
559
    return -1
560

    
561
  def IsBasedOnDiskType(self, dev_type):
562
    """Check if the disk or its children are based on the given type.
563

564
    @type dev_type: L{constants.DTS_BLOCK}
565
    @param dev_type: the type to look for
566
    @rtype: boolean
567
    @return: boolean indicating if a device of the given type was found or not
568

569
    """
570
    if self.children:
571
      for child in self.children:
572
        if child.IsBasedOnDiskType(dev_type):
573
          return True
574
    return self.dev_type == dev_type
575

    
576
  def GetNodes(self, node_uuid):
577
    """This function returns the nodes this device lives on.
578

579
    Given the node on which the parent of the device lives on (or, in
580
    case of a top-level device, the primary node of the devices'
581
    instance), this function will return a list of nodes on which this
582
    devices needs to (or can) be assembled.
583

584
    """
585
    if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
586
                         constants.DT_BLOCK, constants.DT_RBD,
587
                         constants.DT_EXT, constants.DT_SHARED_FILE]:
588
      result = [node_uuid]
589
    elif self.dev_type in constants.DTS_DRBD:
590
      result = [self.logical_id[0], self.logical_id[1]]
591
      if node_uuid not in result:
592
        raise errors.ConfigurationError("DRBD device passed unknown node")
593
    else:
594
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
595
    return result
596

    
597
  def ComputeNodeTree(self, parent_node_uuid):
598
    """Compute the node/disk tree for this disk and its children.
599

600
    This method, given the node on which the parent disk lives, will
601
    return the list of all (node UUID, disk) pairs which describe the disk
602
    tree in the most compact way. For example, a drbd/lvm stack
603
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
604
    which represents all the top-level devices on the nodes.
605

606
    """
607
    my_nodes = self.GetNodes(parent_node_uuid)
608
    result = [(node, self) for node in my_nodes]
609
    if not self.children:
610
      # leaf device
611
      return result
612
    for node in my_nodes:
613
      for child in self.children:
614
        child_result = child.ComputeNodeTree(node)
615
        if len(child_result) == 1:
616
          # child (and all its descendants) is simple, doesn't split
617
          # over multiple hosts, so we don't need to describe it, our
618
          # own entry for this node describes it completely
619
          continue
620
        else:
621
          # check if child nodes differ from my nodes; note that
622
          # subdisk can differ from the child itself, and be instead
623
          # one of its descendants
624
          for subnode, subdisk in child_result:
625
            if subnode not in my_nodes:
626
              result.append((subnode, subdisk))
627
            # otherwise child is under our own node, so we ignore this
628
            # entry (but probably the other results in the list will
629
            # be different)
630
    return result
631

    
632
  def ComputeGrowth(self, amount):
633
    """Compute the per-VG growth requirements.
634

635
    This only works for VG-based disks.
636

637
    @type amount: integer
638
    @param amount: the desired increase in (user-visible) disk space
639
    @rtype: dict
640
    @return: a dictionary of volume-groups and the required size
641

642
    """
643
    if self.dev_type == constants.DT_PLAIN:
644
      return {self.logical_id[0]: amount}
645
    elif self.dev_type == constants.DT_DRBD8:
646
      if self.children:
647
        return self.children[0].ComputeGrowth(amount)
648
      else:
649
        return {}
650
    else:
651
      # Other disk types do not require VG space
652
      return {}
653

    
654
  def RecordGrow(self, amount):
655
    """Update the size of this disk after growth.
656

657
    This method recurses over the disks's children and updates their
658
    size correspondigly. The method needs to be kept in sync with the
659
    actual algorithms from bdev.
660

661
    """
662
    if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE,
663
                         constants.DT_RBD, constants.DT_EXT,
664
                         constants.DT_SHARED_FILE):
665
      self.size += amount
666
    elif self.dev_type == constants.DT_DRBD8:
667
      if self.children:
668
        self.children[0].RecordGrow(amount)
669
      self.size += amount
670
    else:
671
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
672
                                   " disk type %s" % self.dev_type)
673

    
674
  def Update(self, size=None, mode=None, spindles=None):
675
    """Apply changes to size, spindles and mode.
676

677
    """
678
    if self.dev_type == constants.DT_DRBD8:
679
      if self.children:
680
        self.children[0].Update(size=size, mode=mode)
681
    else:
682
      assert not self.children
683

    
684
    if size is not None:
685
      self.size = size
686
    if mode is not None:
687
      self.mode = mode
688
    if spindles is not None:
689
      self.spindles = spindles
690

    
691
  def UnsetSize(self):
692
    """Sets recursively the size to zero for the disk and its children.
693

694
    """
695
    if self.children:
696
      for child in self.children:
697
        child.UnsetSize()
698
    self.size = 0
699

    
700
  def SetPhysicalID(self, target_node_uuid, nodes_ip):
701
    """Convert the logical ID to the physical ID.
702

703
    This is used only for drbd, which needs ip/port configuration.
704

705
    The routine descends down and updates its children also, because
706
    this helps when the only the top device is passed to the remote
707
    node.
708

709
    Arguments:
710
      - target_node_uuid: the node UUID we wish to configure for
711
      - nodes_ip: a mapping of node name to ip
712

713
    The target_node must exist in in nodes_ip, and must be one of the
714
    nodes in the logical ID for each of the DRBD devices encountered
715
    in the disk tree.
716

717
    """
718
    if self.children:
719
      for child in self.children:
720
        child.SetPhysicalID(target_node_uuid, nodes_ip)
721

    
722
    if self.logical_id is None and self.physical_id is not None:
723
      return
724
    if self.dev_type in constants.DTS_DRBD:
725
      pnode_uuid, snode_uuid, port, pminor, sminor, secret = self.logical_id
726
      if target_node_uuid not in (pnode_uuid, snode_uuid):
727
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
728
                                        target_node_uuid)
729
      pnode_ip = nodes_ip.get(pnode_uuid, None)
730
      snode_ip = nodes_ip.get(snode_uuid, None)
731
      if pnode_ip is None or snode_ip is None:
732
        raise errors.ConfigurationError("Can't find primary or secondary node"
733
                                        " for %s" % str(self))
734
      p_data = (pnode_ip, port)
735
      s_data = (snode_ip, port)
736
      if pnode_uuid == target_node_uuid:
737
        self.physical_id = p_data + s_data + (pminor, secret)
738
      else: # it must be secondary, we tested above
739
        self.physical_id = s_data + p_data + (sminor, secret)
740
    else:
741
      self.physical_id = self.logical_id
742
    return
743

    
744
  def ToDict(self):
745
    """Disk-specific conversion to standard python types.
746

747
    This replaces the children lists of objects with lists of
748
    standard python types.
749

750
    """
751
    bo = super(Disk, self).ToDict()
752

    
753
    for attr in ("children",):
754
      alist = bo.get(attr, None)
755
      if alist:
756
        bo[attr] = outils.ContainerToDicts(alist)
757
    return bo
758

    
759
  @classmethod
760
  def FromDict(cls, val):
761
    """Custom function for Disks
762

763
    """
764
    obj = super(Disk, cls).FromDict(val)
765
    if obj.children:
766
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
767
    if obj.logical_id and isinstance(obj.logical_id, list):
768
      obj.logical_id = tuple(obj.logical_id)
769
    if obj.physical_id and isinstance(obj.physical_id, list):
770
      obj.physical_id = tuple(obj.physical_id)
771
    if obj.dev_type in constants.DTS_DRBD:
772
      # we need a tuple of length six here
773
      if len(obj.logical_id) < 6:
774
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
775
    return obj
776

    
777
  def __str__(self):
778
    """Custom str() formatter for disks.
779

780
    """
781
    if self.dev_type == constants.DT_PLAIN:
782
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
783
    elif self.dev_type in constants.DTS_DRBD:
784
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
785
      val = "<DRBD8("
786
      if self.physical_id is None:
787
        phy = "unconfigured"
788
      else:
789
        phy = ("configured as %s:%s %s:%s" %
790
               (self.physical_id[0], self.physical_id[1],
791
                self.physical_id[2], self.physical_id[3]))
792

    
793
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
794
              (node_a, minor_a, node_b, minor_b, port, phy))
795
      if self.children and self.children.count(None) == 0:
796
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
797
      else:
798
        val += "no local storage"
799
    else:
800
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
801
             (self.dev_type, self.logical_id, self.physical_id, self.children))
802
    if self.iv_name is None:
803
      val += ", not visible"
804
    else:
805
      val += ", visible as /dev/%s" % self.iv_name
806
    if self.spindles is not None:
807
      val += ", spindles=%s" % self.spindles
808
    if isinstance(self.size, int):
809
      val += ", size=%dm)>" % self.size
810
    else:
811
      val += ", size='%s')>" % (self.size,)
812
    return val
813

    
814
  def Verify(self):
815
    """Checks that this disk is correctly configured.
816

817
    """
818
    all_errors = []
819
    if self.mode not in constants.DISK_ACCESS_SET:
820
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
821
    return all_errors
822

    
823
  def UpgradeConfig(self):
824
    """Fill defaults for missing configuration values.
825

826
    """
827
    if self.children:
828
      for child in self.children:
829
        child.UpgradeConfig()
830

    
831
    # FIXME: Make this configurable in Ganeti 2.7
832
    # Params should be an empty dict that gets filled any time needed
833
    # In case of ext template we allow arbitrary params that should not
834
    # be overrided during a config reload/upgrade.
835
    if not self.params or not isinstance(self.params, dict):
836
      self.params = {}
837

    
838
    # add here config upgrade for this disk
839

    
840
    # map of legacy device types (mapping differing LD constants to new
841
    # DT constants)
842
    LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
843
    if self.dev_type in LEG_DEV_TYPE_MAP:
844
      self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
845

    
846
  @staticmethod
847
  def ComputeLDParams(disk_template, disk_params):
848
    """Computes Logical Disk parameters from Disk Template parameters.
849

850
    @type disk_template: string
851
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
852
    @type disk_params: dict
853
    @param disk_params: disk template parameters;
854
                        dict(template_name -> parameters
855
    @rtype: list(dict)
856
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
857
      contains the LD parameters of the node. The tree is flattened in-order.
858

859
    """
860
    if disk_template not in constants.DISK_TEMPLATES:
861
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
862

    
863
    assert disk_template in disk_params
864

    
865
    result = list()
866
    dt_params = disk_params[disk_template]
867
    if disk_template == constants.DT_DRBD8:
868
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], {
869
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
870
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
871
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
872
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
873
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
874
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
875
        constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
876
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
877
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
878
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
879
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
880
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
881
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
882
        }))
883

    
884
      # data LV
885
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
886
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
887
        }))
888

    
889
      # metadata LV
890
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
891
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
892
        }))
893

    
894
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
895
      result.append(constants.DISK_LD_DEFAULTS[disk_template])
896

    
897
    elif disk_template == constants.DT_PLAIN:
898
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
899
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
900
        }))
901

    
902
    elif disk_template == constants.DT_BLOCK:
903
      result.append(constants.DISK_LD_DEFAULTS[constants.DT_BLOCK])
904

    
905
    elif disk_template == constants.DT_RBD:
906
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_RBD], {
907
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
908
        }))
909

    
910
    elif disk_template == constants.DT_EXT:
911
      result.append(constants.DISK_LD_DEFAULTS[constants.DT_EXT])
912

    
913
    return result
914

    
915

    
916
class InstancePolicy(ConfigObject):
917
  """Config object representing instance policy limits dictionary.
918

919
  Note that this object is not actually used in the config, it's just
920
  used as a placeholder for a few functions.
921

922
  """
923
  @classmethod
924
  def UpgradeDiskTemplates(cls, ipolicy, enabled_disk_templates):
925
    """Upgrades the ipolicy configuration."""
926
    if constants.IPOLICY_DTS in ipolicy:
927
      if not set(ipolicy[constants.IPOLICY_DTS]).issubset(
928
        set(enabled_disk_templates)):
929
        ipolicy[constants.IPOLICY_DTS] = list(
930
          set(ipolicy[constants.IPOLICY_DTS]) & set(enabled_disk_templates))
931

    
932
  @classmethod
933
  def CheckParameterSyntax(cls, ipolicy, check_std):
934
    """ Check the instance policy for validity.
935

936
    @type ipolicy: dict
937
    @param ipolicy: dictionary with min/max/std specs and policies
938
    @type check_std: bool
939
    @param check_std: Whether to check std value or just assume compliance
940
    @raise errors.ConfigurationError: when the policy is not legal
941

942
    """
943
    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
944
    if constants.IPOLICY_DTS in ipolicy:
945
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
946
    for key in constants.IPOLICY_PARAMETERS:
947
      if key in ipolicy:
948
        InstancePolicy.CheckParameter(key, ipolicy[key])
949
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
950
    if wrong_keys:
951
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
952
                                      utils.CommaJoin(wrong_keys))
953

    
954
  @classmethod
955
  def _CheckIncompleteSpec(cls, spec, keyname):
956
    missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
957
    if missing_params:
958
      msg = ("Missing instance specs parameters for %s: %s" %
959
             (keyname, utils.CommaJoin(missing_params)))
960
      raise errors.ConfigurationError(msg)
961

    
962
  @classmethod
963
  def CheckISpecSyntax(cls, ipolicy, check_std):
964
    """Check the instance policy specs for validity.
965

966
    @type ipolicy: dict
967
    @param ipolicy: dictionary with min/max/std specs
968
    @type check_std: bool
969
    @param check_std: Whether to check std value or just assume compliance
970
    @raise errors.ConfigurationError: when specs are not valid
971

972
    """
973
    if constants.ISPECS_MINMAX not in ipolicy:
974
      # Nothing to check
975
      return
976

    
977
    if check_std and constants.ISPECS_STD not in ipolicy:
978
      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
979
      raise errors.ConfigurationError(msg)
980
    stdspec = ipolicy.get(constants.ISPECS_STD)
981
    if check_std:
982
      InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
983

    
984
    if not ipolicy[constants.ISPECS_MINMAX]:
985
      raise errors.ConfigurationError("Empty minmax specifications")
986
    std_is_good = False
987
    for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
988
      missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
989
      if missing:
990
        msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
991
        raise errors.ConfigurationError(msg)
992
      for (key, spec) in minmaxspecs.items():
993
        InstancePolicy._CheckIncompleteSpec(spec, key)
994

    
995
      spec_std_ok = True
996
      for param in constants.ISPECS_PARAMETERS:
997
        par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
998
                                                           param, check_std)
999
        spec_std_ok = spec_std_ok and par_std_ok
1000
      std_is_good = std_is_good or spec_std_ok
1001
    if not std_is_good:
1002
      raise errors.ConfigurationError("Invalid std specifications")
1003

    
1004
  @classmethod
1005
  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
1006
    """Check the instance policy specs for validity on a given key.
1007

1008
    We check if the instance specs makes sense for a given key, that is
1009
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
1010

1011
    @type minmaxspecs: dict
1012
    @param minmaxspecs: dictionary with min and max instance spec
1013
    @type stdspec: dict
1014
    @param stdspec: dictionary with standard instance spec
1015
    @type name: string
1016
    @param name: what are the limits for
1017
    @type check_std: bool
1018
    @param check_std: Whether to check std value or just assume compliance
1019
    @rtype: bool
1020
    @return: C{True} when specs are valid, C{False} when standard spec for the
1021
        given name is not valid
1022
    @raise errors.ConfigurationError: when min/max specs for the given name
1023
        are not valid
1024

1025
    """
1026
    minspec = minmaxspecs[constants.ISPECS_MIN]
1027
    maxspec = minmaxspecs[constants.ISPECS_MAX]
1028
    min_v = minspec[name]
1029
    max_v = maxspec[name]
1030

    
1031
    if min_v > max_v:
1032
      err = ("Invalid specification of min/max values for %s: %s/%s" %
1033
             (name, min_v, max_v))
1034
      raise errors.ConfigurationError(err)
1035
    elif check_std:
1036
      std_v = stdspec.get(name, min_v)
1037
      return std_v >= min_v and std_v <= max_v
1038
    else:
1039
      return True
1040

    
1041
  @classmethod
1042
  def CheckDiskTemplates(cls, disk_templates):
1043
    """Checks the disk templates for validity.
1044

1045
    """
1046
    if not disk_templates:
1047
      raise errors.ConfigurationError("Instance policy must contain" +
1048
                                      " at least one disk template")
1049
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1050
    if wrong:
1051
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1052
                                      utils.CommaJoin(wrong))
1053

    
1054
  @classmethod
1055
  def CheckParameter(cls, key, value):
1056
    """Checks a parameter.
1057

1058
    Currently we expect all parameters to be float values.
1059

1060
    """
1061
    try:
1062
      float(value)
1063
    except (TypeError, ValueError), err:
1064
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1065
                                      " '%s', error: %s" % (key, value, err))
1066

    
1067

    
1068
class Instance(TaggableObject):
1069
  """Config object representing an instance."""
1070
  __slots__ = [
1071
    "name",
1072
    "primary_node",
1073
    "os",
1074
    "hypervisor",
1075
    "hvparams",
1076
    "beparams",
1077
    "osparams",
1078
    "admin_state",
1079
    "nics",
1080
    "disks",
1081
    "disk_template",
1082
    "disks_active",
1083
    "network_port",
1084
    "serial_no",
1085
    ] + _TIMESTAMPS + _UUID
1086

    
1087
  def _ComputeSecondaryNodes(self):
1088
    """Compute the list of secondary nodes.
1089

1090
    This is a simple wrapper over _ComputeAllNodes.
1091

1092
    """
1093
    all_nodes = set(self._ComputeAllNodes())
1094
    all_nodes.discard(self.primary_node)
1095
    return tuple(all_nodes)
1096

    
1097
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1098
                             "List of names of secondary nodes")
1099

    
1100
  def _ComputeAllNodes(self):
1101
    """Compute the list of all nodes.
1102

1103
    Since the data is already there (in the drbd disks), keeping it as
1104
    a separate normal attribute is redundant and if not properly
1105
    synchronised can cause problems. Thus it's better to compute it
1106
    dynamically.
1107

1108
    """
1109
    def _Helper(nodes, device):
1110
      """Recursively computes nodes given a top device."""
1111
      if device.dev_type in constants.DTS_DRBD:
1112
        nodea, nodeb = device.logical_id[:2]
1113
        nodes.add(nodea)
1114
        nodes.add(nodeb)
1115
      if device.children:
1116
        for child in device.children:
1117
          _Helper(nodes, child)
1118

    
1119
    all_nodes = set()
1120
    all_nodes.add(self.primary_node)
1121
    for device in self.disks:
1122
      _Helper(all_nodes, device)
1123
    return tuple(all_nodes)
1124

    
1125
  all_nodes = property(_ComputeAllNodes, None, None,
1126
                       "List of names of all the nodes of the instance")
1127

    
1128
  def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1129
    """Provide a mapping of nodes to LVs this instance owns.
1130

1131
    This function figures out what logical volumes should belong on
1132
    which nodes, recursing through a device tree.
1133

1134
    @type lvmap: dict
1135
    @param lvmap: optional dictionary to receive the
1136
        'node' : ['lv', ...] data.
1137
    @type devs: list of L{Disk}
1138
    @param devs: disks to get the LV name for. If None, all disk of this
1139
        instance are used.
1140
    @type node_uuid: string
1141
    @param node_uuid: UUID of the node to get the LV names for. If None, the
1142
        primary node of this instance is used.
1143
    @return: None if lvmap arg is given, otherwise, a dictionary of
1144
        the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
1145
        volumeN is of the form "vg_name/lv_name", compatible with
1146
        GetVolumeList()
1147

1148
    """
1149
    if node_uuid is None:
1150
      node_uuid = self.primary_node
1151

    
1152
    if lvmap is None:
1153
      lvmap = {
1154
        node_uuid: [],
1155
        }
1156
      ret = lvmap
1157
    else:
1158
      if not node_uuid in lvmap:
1159
        lvmap[node_uuid] = []
1160
      ret = None
1161

    
1162
    if not devs:
1163
      devs = self.disks
1164

    
1165
    for dev in devs:
1166
      if dev.dev_type == constants.DT_PLAIN:
1167
        lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1168

    
1169
      elif dev.dev_type in constants.DTS_DRBD:
1170
        if dev.children:
1171
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1172
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1173

    
1174
      elif dev.children:
1175
        self.MapLVsByNode(lvmap, dev.children, node_uuid)
1176

    
1177
    return ret
1178

    
1179
  def FindDisk(self, idx):
1180
    """Find a disk given having a specified index.
1181

1182
    This is just a wrapper that does validation of the index.
1183

1184
    @type idx: int
1185
    @param idx: the disk index
1186
    @rtype: L{Disk}
1187
    @return: the corresponding disk
1188
    @raise errors.OpPrereqError: when the given index is not valid
1189

1190
    """
1191
    try:
1192
      idx = int(idx)
1193
      return self.disks[idx]
1194
    except (TypeError, ValueError), err:
1195
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1196
                                 errors.ECODE_INVAL)
1197
    except IndexError:
1198
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1199
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1200
                                 errors.ECODE_INVAL)
1201

    
1202
  def ToDict(self):
1203
    """Instance-specific conversion to standard python types.
1204

1205
    This replaces the children lists of objects with lists of standard
1206
    python types.
1207

1208
    """
1209
    bo = super(Instance, self).ToDict()
1210

    
1211
    for attr in "nics", "disks":
1212
      alist = bo.get(attr, None)
1213
      if alist:
1214
        nlist = outils.ContainerToDicts(alist)
1215
      else:
1216
        nlist = []
1217
      bo[attr] = nlist
1218
    return bo
1219

    
1220
  @classmethod
1221
  def FromDict(cls, val):
1222
    """Custom function for instances.
1223

1224
    """
1225
    if "admin_state" not in val:
1226
      if val.get("admin_up", False):
1227
        val["admin_state"] = constants.ADMINST_UP
1228
      else:
1229
        val["admin_state"] = constants.ADMINST_DOWN
1230
    if "admin_up" in val:
1231
      del val["admin_up"]
1232
    obj = super(Instance, cls).FromDict(val)
1233
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1234
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1235
    return obj
1236

    
1237
  def UpgradeConfig(self):
1238
    """Fill defaults for missing configuration values.
1239

1240
    """
1241
    for nic in self.nics:
1242
      nic.UpgradeConfig()
1243
    for disk in self.disks:
1244
      disk.UpgradeConfig()
1245
    if self.hvparams:
1246
      for key in constants.HVC_GLOBALS:
1247
        try:
1248
          del self.hvparams[key]
1249
        except KeyError:
1250
          pass
1251
    if self.osparams is None:
1252
      self.osparams = {}
1253
    UpgradeBeParams(self.beparams)
1254
    if self.disks_active is None:
1255
      self.disks_active = self.admin_state == constants.ADMINST_UP
1256

    
1257

    
1258
class OS(ConfigObject):
1259
  """Config object representing an operating system.
1260

1261
  @type supported_parameters: list
1262
  @ivar supported_parameters: a list of tuples, name and description,
1263
      containing the supported parameters by this OS
1264

1265
  @type VARIANT_DELIM: string
1266
  @cvar VARIANT_DELIM: the variant delimiter
1267

1268
  """
1269
  __slots__ = [
1270
    "name",
1271
    "path",
1272
    "api_versions",
1273
    "create_script",
1274
    "export_script",
1275
    "import_script",
1276
    "rename_script",
1277
    "verify_script",
1278
    "supported_variants",
1279
    "supported_parameters",
1280
    ]
1281

    
1282
  VARIANT_DELIM = "+"
1283

    
1284
  @classmethod
1285
  def SplitNameVariant(cls, name):
1286
    """Splits the name into the proper name and variant.
1287

1288
    @param name: the OS (unprocessed) name
1289
    @rtype: list
1290
    @return: a list of two elements; if the original name didn't
1291
        contain a variant, it's returned as an empty string
1292

1293
    """
1294
    nv = name.split(cls.VARIANT_DELIM, 1)
1295
    if len(nv) == 1:
1296
      nv.append("")
1297
    return nv
1298

    
1299
  @classmethod
1300
  def GetName(cls, name):
1301
    """Returns the proper name of the os (without the variant).
1302

1303
    @param name: the OS (unprocessed) name
1304

1305
    """
1306
    return cls.SplitNameVariant(name)[0]
1307

    
1308
  @classmethod
1309
  def GetVariant(cls, name):
1310
    """Returns the variant the os (without the base name).
1311

1312
    @param name: the OS (unprocessed) name
1313

1314
    """
1315
    return cls.SplitNameVariant(name)[1]
1316

    
1317

    
1318
class ExtStorage(ConfigObject):
1319
  """Config object representing an External Storage Provider.
1320

1321
  """
1322
  __slots__ = [
1323
    "name",
1324
    "path",
1325
    "create_script",
1326
    "remove_script",
1327
    "grow_script",
1328
    "attach_script",
1329
    "detach_script",
1330
    "setinfo_script",
1331
    "verify_script",
1332
    "supported_parameters",
1333
    ]
1334

    
1335

    
1336
class NodeHvState(ConfigObject):
1337
  """Hypvervisor state on a node.
1338

1339
  @ivar mem_total: Total amount of memory
1340
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1341
    available)
1342
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1343
    rounding
1344
  @ivar mem_inst: Memory used by instances living on node
1345
  @ivar cpu_total: Total node CPU core count
1346
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1347

1348
  """
1349
  __slots__ = [
1350
    "mem_total",
1351
    "mem_node",
1352
    "mem_hv",
1353
    "mem_inst",
1354
    "cpu_total",
1355
    "cpu_node",
1356
    ] + _TIMESTAMPS
1357

    
1358

    
1359
class NodeDiskState(ConfigObject):
1360
  """Disk state on a node.
1361

1362
  """
1363
  __slots__ = [
1364
    "total",
1365
    "reserved",
1366
    "overhead",
1367
    ] + _TIMESTAMPS
1368

    
1369

    
1370
class Node(TaggableObject):
1371
  """Config object representing a node.
1372

1373
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1374
  @ivar hv_state_static: Hypervisor state overriden by user
1375
  @ivar disk_state: Disk state (e.g. free space)
1376
  @ivar disk_state_static: Disk state overriden by user
1377

1378
  """
1379
  __slots__ = [
1380
    "name",
1381
    "primary_ip",
1382
    "secondary_ip",
1383
    "serial_no",
1384
    "master_candidate",
1385
    "offline",
1386
    "drained",
1387
    "group",
1388
    "master_capable",
1389
    "vm_capable",
1390
    "ndparams",
1391
    "powered",
1392
    "hv_state",
1393
    "hv_state_static",
1394
    "disk_state",
1395
    "disk_state_static",
1396
    ] + _TIMESTAMPS + _UUID
1397

    
1398
  def UpgradeConfig(self):
1399
    """Fill defaults for missing configuration values.
1400

1401
    """
1402
    # pylint: disable=E0203
1403
    # because these are "defined" via slots, not manually
1404
    if self.master_capable is None:
1405
      self.master_capable = True
1406

    
1407
    if self.vm_capable is None:
1408
      self.vm_capable = True
1409

    
1410
    if self.ndparams is None:
1411
      self.ndparams = {}
1412
    # And remove any global parameter
1413
    for key in constants.NDC_GLOBALS:
1414
      if key in self.ndparams:
1415
        logging.warning("Ignoring %s node parameter for node %s",
1416
                        key, self.name)
1417
        del self.ndparams[key]
1418

    
1419
    if self.powered is None:
1420
      self.powered = True
1421

    
1422
  def ToDict(self):
1423
    """Custom function for serializing.
1424

1425
    """
1426
    data = super(Node, self).ToDict()
1427

    
1428
    hv_state = data.get("hv_state", None)
1429
    if hv_state is not None:
1430
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1431

    
1432
    disk_state = data.get("disk_state", None)
1433
    if disk_state is not None:
1434
      data["disk_state"] = \
1435
        dict((key, outils.ContainerToDicts(value))
1436
             for (key, value) in disk_state.items())
1437

    
1438
    return data
1439

    
1440
  @classmethod
1441
  def FromDict(cls, val):
1442
    """Custom function for deserializing.
1443

1444
    """
1445
    obj = super(Node, cls).FromDict(val)
1446

    
1447
    if obj.hv_state is not None:
1448
      obj.hv_state = \
1449
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1450

    
1451
    if obj.disk_state is not None:
1452
      obj.disk_state = \
1453
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1454
             for (key, value) in obj.disk_state.items())
1455

    
1456
    return obj
1457

    
1458

    
1459
class NodeGroup(TaggableObject):
1460
  """Config object representing a node group."""
1461
  __slots__ = [
1462
    "name",
1463
    "members",
1464
    "ndparams",
1465
    "diskparams",
1466
    "ipolicy",
1467
    "serial_no",
1468
    "hv_state_static",
1469
    "disk_state_static",
1470
    "alloc_policy",
1471
    "networks",
1472
    ] + _TIMESTAMPS + _UUID
1473

    
1474
  def ToDict(self):
1475
    """Custom function for nodegroup.
1476

1477
    This discards the members object, which gets recalculated and is only kept
1478
    in memory.
1479

1480
    """
1481
    mydict = super(NodeGroup, self).ToDict()
1482
    del mydict["members"]
1483
    return mydict
1484

    
1485
  @classmethod
1486
  def FromDict(cls, val):
1487
    """Custom function for nodegroup.
1488

1489
    The members slot is initialized to an empty list, upon deserialization.
1490

1491
    """
1492
    obj = super(NodeGroup, cls).FromDict(val)
1493
    obj.members = []
1494
    return obj
1495

    
1496
  def UpgradeConfig(self):
1497
    """Fill defaults for missing configuration values.
1498

1499
    """
1500
    if self.ndparams is None:
1501
      self.ndparams = {}
1502

    
1503
    if self.serial_no is None:
1504
      self.serial_no = 1
1505

    
1506
    if self.alloc_policy is None:
1507
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1508

    
1509
    # We only update mtime, and not ctime, since we would not be able
1510
    # to provide a correct value for creation time.
1511
    if self.mtime is None:
1512
      self.mtime = time.time()
1513

    
1514
    if self.diskparams is None:
1515
      self.diskparams = {}
1516
    if self.ipolicy is None:
1517
      self.ipolicy = MakeEmptyIPolicy()
1518

    
1519
    if self.networks is None:
1520
      self.networks = {}
1521

    
1522
  def FillND(self, node):
1523
    """Return filled out ndparams for L{objects.Node}
1524

1525
    @type node: L{objects.Node}
1526
    @param node: A Node object to fill
1527
    @return a copy of the node's ndparams with defaults filled
1528

1529
    """
1530
    return self.SimpleFillND(node.ndparams)
1531

    
1532
  def SimpleFillND(self, ndparams):
1533
    """Fill a given ndparams dict with defaults.
1534

1535
    @type ndparams: dict
1536
    @param ndparams: the dict to fill
1537
    @rtype: dict
1538
    @return: a copy of the passed in ndparams with missing keys filled
1539
        from the node group defaults
1540

1541
    """
1542
    return FillDict(self.ndparams, ndparams)
1543

    
1544

    
1545
class Cluster(TaggableObject):
1546
  """Config object representing the cluster."""
1547
  __slots__ = [
1548
    "serial_no",
1549
    "rsahostkeypub",
1550
    "dsahostkeypub",
1551
    "highest_used_port",
1552
    "tcpudp_port_pool",
1553
    "mac_prefix",
1554
    "volume_group_name",
1555
    "reserved_lvs",
1556
    "drbd_usermode_helper",
1557
    "default_bridge",
1558
    "default_hypervisor",
1559
    "master_node",
1560
    "master_ip",
1561
    "master_netdev",
1562
    "master_netmask",
1563
    "use_external_mip_script",
1564
    "cluster_name",
1565
    "file_storage_dir",
1566
    "shared_file_storage_dir",
1567
    "enabled_hypervisors",
1568
    "hvparams",
1569
    "ipolicy",
1570
    "os_hvp",
1571
    "beparams",
1572
    "osparams",
1573
    "nicparams",
1574
    "ndparams",
1575
    "diskparams",
1576
    "candidate_pool_size",
1577
    "modify_etc_hosts",
1578
    "modify_ssh_setup",
1579
    "maintain_node_health",
1580
    "uid_pool",
1581
    "default_iallocator",
1582
    "hidden_os",
1583
    "blacklisted_os",
1584
    "primary_ip_family",
1585
    "prealloc_wipe_disks",
1586
    "hv_state_static",
1587
    "disk_state_static",
1588
    "enabled_disk_templates",
1589
    ] + _TIMESTAMPS + _UUID
1590

    
1591
  def UpgradeConfig(self):
1592
    """Fill defaults for missing configuration values.
1593

1594
    """
1595
    # pylint: disable=E0203
1596
    # because these are "defined" via slots, not manually
1597
    if self.hvparams is None:
1598
      self.hvparams = constants.HVC_DEFAULTS
1599
    else:
1600
      for hypervisor in constants.HYPER_TYPES:
1601
        try:
1602
          existing_params = self.hvparams[hypervisor]
1603
        except KeyError:
1604
          existing_params = {}
1605
        self.hvparams[hypervisor] = FillDict(
1606
            constants.HVC_DEFAULTS[hypervisor], existing_params)
1607

    
1608
    if self.os_hvp is None:
1609
      self.os_hvp = {}
1610

    
1611
    # osparams added before 2.2
1612
    if self.osparams is None:
1613
      self.osparams = {}
1614

    
1615
    self.ndparams = UpgradeNDParams(self.ndparams)
1616

    
1617
    self.beparams = UpgradeGroupedParams(self.beparams,
1618
                                         constants.BEC_DEFAULTS)
1619
    for beparams_group in self.beparams:
1620
      UpgradeBeParams(self.beparams[beparams_group])
1621

    
1622
    migrate_default_bridge = not self.nicparams
1623
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1624
                                          constants.NICC_DEFAULTS)
1625
    if migrate_default_bridge:
1626
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1627
        self.default_bridge
1628

    
1629
    if self.modify_etc_hosts is None:
1630
      self.modify_etc_hosts = True
1631

    
1632
    if self.modify_ssh_setup is None:
1633
      self.modify_ssh_setup = True
1634

    
1635
    # default_bridge is no longer used in 2.1. The slot is left there to
1636
    # support auto-upgrading. It can be removed once we decide to deprecate
1637
    # upgrading straight from 2.0.
1638
    if self.default_bridge is not None:
1639
      self.default_bridge = None
1640

    
1641
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1642
    # code can be removed once upgrading straight from 2.0 is deprecated.
1643
    if self.default_hypervisor is not None:
1644
      self.enabled_hypervisors = ([self.default_hypervisor] +
1645
                                  [hvname for hvname in self.enabled_hypervisors
1646
                                   if hvname != self.default_hypervisor])
1647
      self.default_hypervisor = None
1648

    
1649
    # maintain_node_health added after 2.1.1
1650
    if self.maintain_node_health is None:
1651
      self.maintain_node_health = False
1652

    
1653
    if self.uid_pool is None:
1654
      self.uid_pool = []
1655

    
1656
    if self.default_iallocator is None:
1657
      self.default_iallocator = ""
1658

    
1659
    # reserved_lvs added before 2.2
1660
    if self.reserved_lvs is None:
1661
      self.reserved_lvs = []
1662

    
1663
    # hidden and blacklisted operating systems added before 2.2.1
1664
    if self.hidden_os is None:
1665
      self.hidden_os = []
1666

    
1667
    if self.blacklisted_os is None:
1668
      self.blacklisted_os = []
1669

    
1670
    # primary_ip_family added before 2.3
1671
    if self.primary_ip_family is None:
1672
      self.primary_ip_family = AF_INET
1673

    
1674
    if self.master_netmask is None:
1675
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1676
      self.master_netmask = ipcls.iplen
1677

    
1678
    if self.prealloc_wipe_disks is None:
1679
      self.prealloc_wipe_disks = False
1680

    
1681
    # shared_file_storage_dir added before 2.5
1682
    if self.shared_file_storage_dir is None:
1683
      self.shared_file_storage_dir = ""
1684

    
1685
    if self.use_external_mip_script is None:
1686
      self.use_external_mip_script = False
1687

    
1688
    if self.diskparams:
1689
      self.diskparams = UpgradeDiskParams(self.diskparams)
1690
    else:
1691
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1692

    
1693
    # instance policy added before 2.6
1694
    if self.ipolicy is None:
1695
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1696
    else:
1697
      # we can either make sure to upgrade the ipolicy always, or only
1698
      # do it in some corner cases (e.g. missing keys); note that this
1699
      # will break any removal of keys from the ipolicy dict
1700
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1701
      if wrongkeys:
1702
        # These keys would be silently removed by FillIPolicy()
1703
        msg = ("Cluster instance policy contains spurious keys: %s" %
1704
               utils.CommaJoin(wrongkeys))
1705
        raise errors.ConfigurationError(msg)
1706
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1707

    
1708
  @property
1709
  def primary_hypervisor(self):
1710
    """The first hypervisor is the primary.
1711

1712
    Useful, for example, for L{Node}'s hv/disk state.
1713

1714
    """
1715
    return self.enabled_hypervisors[0]
1716

    
1717
  def ToDict(self):
1718
    """Custom function for cluster.
1719

1720
    """
1721
    mydict = super(Cluster, self).ToDict()
1722

    
1723
    if self.tcpudp_port_pool is None:
1724
      tcpudp_port_pool = []
1725
    else:
1726
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1727

    
1728
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1729

    
1730
    return mydict
1731

    
1732
  @classmethod
1733
  def FromDict(cls, val):
1734
    """Custom function for cluster.
1735

1736
    """
1737
    obj = super(Cluster, cls).FromDict(val)
1738

    
1739
    if obj.tcpudp_port_pool is None:
1740
      obj.tcpudp_port_pool = set()
1741
    elif not isinstance(obj.tcpudp_port_pool, set):
1742
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1743

    
1744
    return obj
1745

    
1746
  def SimpleFillDP(self, diskparams):
1747
    """Fill a given diskparams dict with cluster defaults.
1748

1749
    @param diskparams: The diskparams
1750
    @return: The defaults dict
1751

1752
    """
1753
    return FillDiskParams(self.diskparams, diskparams)
1754

    
1755
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1756
    """Get the default hypervisor parameters for the cluster.
1757

1758
    @param hypervisor: the hypervisor name
1759
    @param os_name: if specified, we'll also update the defaults for this OS
1760
    @param skip_keys: if passed, list of keys not to use
1761
    @return: the defaults dict
1762

1763
    """
1764
    if skip_keys is None:
1765
      skip_keys = []
1766

    
1767
    fill_stack = [self.hvparams.get(hypervisor, {})]
1768
    if os_name is not None:
1769
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1770
      fill_stack.append(os_hvp)
1771

    
1772
    ret_dict = {}
1773
    for o_dict in fill_stack:
1774
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1775

    
1776
    return ret_dict
1777

    
1778
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1779
    """Fill a given hvparams dict with cluster defaults.
1780

1781
    @type hv_name: string
1782
    @param hv_name: the hypervisor to use
1783
    @type os_name: string
1784
    @param os_name: the OS to use for overriding the hypervisor defaults
1785
    @type skip_globals: boolean
1786
    @param skip_globals: if True, the global hypervisor parameters will
1787
        not be filled
1788
    @rtype: dict
1789
    @return: a copy of the given hvparams with missing keys filled from
1790
        the cluster defaults
1791

1792
    """
1793
    if skip_globals:
1794
      skip_keys = constants.HVC_GLOBALS
1795
    else:
1796
      skip_keys = []
1797

    
1798
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1799
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1800

    
1801
  def FillHV(self, instance, skip_globals=False):
1802
    """Fill an instance's hvparams dict with cluster defaults.
1803

1804
    @type instance: L{objects.Instance}
1805
    @param instance: the instance parameter to fill
1806
    @type skip_globals: boolean
1807
    @param skip_globals: if True, the global hypervisor parameters will
1808
        not be filled
1809
    @rtype: dict
1810
    @return: a copy of the instance's hvparams with missing keys filled from
1811
        the cluster defaults
1812

1813
    """
1814
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1815
                             instance.hvparams, skip_globals)
1816

    
1817
  def SimpleFillBE(self, beparams):
1818
    """Fill a given beparams dict with cluster defaults.
1819

1820
    @type beparams: dict
1821
    @param beparams: the dict to fill
1822
    @rtype: dict
1823
    @return: a copy of the passed in beparams with missing keys filled
1824
        from the cluster defaults
1825

1826
    """
1827
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1828

    
1829
  def FillBE(self, instance):
1830
    """Fill an instance's beparams dict with cluster defaults.
1831

1832
    @type instance: L{objects.Instance}
1833
    @param instance: the instance parameter to fill
1834
    @rtype: dict
1835
    @return: a copy of the instance's beparams with missing keys filled from
1836
        the cluster defaults
1837

1838
    """
1839
    return self.SimpleFillBE(instance.beparams)
1840

    
1841
  def SimpleFillNIC(self, nicparams):
1842
    """Fill a given nicparams dict with cluster defaults.
1843

1844
    @type nicparams: dict
1845
    @param nicparams: the dict to fill
1846
    @rtype: dict
1847
    @return: a copy of the passed in nicparams with missing keys filled
1848
        from the cluster defaults
1849

1850
    """
1851
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1852

    
1853
  def SimpleFillOS(self, os_name, os_params):
1854
    """Fill an instance's osparams dict with cluster defaults.
1855

1856
    @type os_name: string
1857
    @param os_name: the OS name to use
1858
    @type os_params: dict
1859
    @param os_params: the dict to fill with default values
1860
    @rtype: dict
1861
    @return: a copy of the instance's osparams with missing keys filled from
1862
        the cluster defaults
1863

1864
    """
1865
    name_only = os_name.split("+", 1)[0]
1866
    # base OS
1867
    result = self.osparams.get(name_only, {})
1868
    # OS with variant
1869
    result = FillDict(result, self.osparams.get(os_name, {}))
1870
    # specified params
1871
    return FillDict(result, os_params)
1872

    
1873
  @staticmethod
1874
  def SimpleFillHvState(hv_state):
1875
    """Fill an hv_state sub dict with cluster defaults.
1876

1877
    """
1878
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1879

    
1880
  @staticmethod
1881
  def SimpleFillDiskState(disk_state):
1882
    """Fill an disk_state sub dict with cluster defaults.
1883

1884
    """
1885
    return FillDict(constants.DS_DEFAULTS, disk_state)
1886

    
1887
  def FillND(self, node, nodegroup):
1888
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1889

1890
    @type node: L{objects.Node}
1891
    @param node: A Node object to fill
1892
    @type nodegroup: L{objects.NodeGroup}
1893
    @param nodegroup: A Node object to fill
1894
    @return a copy of the node's ndparams with defaults filled
1895

1896
    """
1897
    return self.SimpleFillND(nodegroup.FillND(node))
1898

    
1899
  def SimpleFillND(self, ndparams):
1900
    """Fill a given ndparams dict with defaults.
1901

1902
    @type ndparams: dict
1903
    @param ndparams: the dict to fill
1904
    @rtype: dict
1905
    @return: a copy of the passed in ndparams with missing keys filled
1906
        from the cluster defaults
1907

1908
    """
1909
    return FillDict(self.ndparams, ndparams)
1910

    
1911
  def SimpleFillIPolicy(self, ipolicy):
1912
    """ Fill instance policy dict with defaults.
1913

1914
    @type ipolicy: dict
1915
    @param ipolicy: the dict to fill
1916
    @rtype: dict
1917
    @return: a copy of passed ipolicy with missing keys filled from
1918
      the cluster defaults
1919

1920
    """
1921
    return FillIPolicy(self.ipolicy, ipolicy)
1922

    
1923
  def IsDiskTemplateEnabled(self, disk_template):
1924
    """Checks if a particular disk template is enabled.
1925

1926
    """
1927
    return utils.storage.IsDiskTemplateEnabled(
1928
        disk_template, self.enabled_disk_templates)
1929

    
1930
  def IsFileStorageEnabled(self):
1931
    """Checks if file storage is enabled.
1932

1933
    """
1934
    return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
1935

    
1936
  def IsSharedFileStorageEnabled(self):
1937
    """Checks if shared file storage is enabled.
1938

1939
    """
1940
    return utils.storage.IsSharedFileStorageEnabled(
1941
        self.enabled_disk_templates)
1942

    
1943

    
1944
class BlockDevStatus(ConfigObject):
1945
  """Config object representing the status of a block device."""
1946
  __slots__ = [
1947
    "dev_path",
1948
    "major",
1949
    "minor",
1950
    "sync_percent",
1951
    "estimated_time",
1952
    "is_degraded",
1953
    "ldisk_status",
1954
    ]
1955

    
1956

    
1957
class ImportExportStatus(ConfigObject):
1958
  """Config object representing the status of an import or export."""
1959
  __slots__ = [
1960
    "recent_output",
1961
    "listen_port",
1962
    "connected",
1963
    "progress_mbytes",
1964
    "progress_throughput",
1965
    "progress_eta",
1966
    "progress_percent",
1967
    "exit_status",
1968
    "error_message",
1969
    ] + _TIMESTAMPS
1970

    
1971

    
1972
class ImportExportOptions(ConfigObject):
1973
  """Options for import/export daemon
1974

1975
  @ivar key_name: X509 key name (None for cluster certificate)
1976
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1977
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1978
  @ivar magic: Used to ensure the connection goes to the right disk
1979
  @ivar ipv6: Whether to use IPv6
1980
  @ivar connect_timeout: Number of seconds for establishing connection
1981

1982
  """
1983
  __slots__ = [
1984
    "key_name",
1985
    "ca_pem",
1986
    "compress",
1987
    "magic",
1988
    "ipv6",
1989
    "connect_timeout",
1990
    ]
1991

    
1992

    
1993
class ConfdRequest(ConfigObject):
1994
  """Object holding a confd request.
1995

1996
  @ivar protocol: confd protocol version
1997
  @ivar type: confd query type
1998
  @ivar query: query request
1999
  @ivar rsalt: requested reply salt
2000

2001
  """
2002
  __slots__ = [
2003
    "protocol",
2004
    "type",
2005
    "query",
2006
    "rsalt",
2007
    ]
2008

    
2009

    
2010
class ConfdReply(ConfigObject):
2011
  """Object holding a confd reply.
2012

2013
  @ivar protocol: confd protocol version
2014
  @ivar status: reply status code (ok, error)
2015
  @ivar answer: confd query reply
2016
  @ivar serial: configuration serial number
2017

2018
  """
2019
  __slots__ = [
2020
    "protocol",
2021
    "status",
2022
    "answer",
2023
    "serial",
2024
    ]
2025

    
2026

    
2027
class QueryFieldDefinition(ConfigObject):
2028
  """Object holding a query field definition.
2029

2030
  @ivar name: Field name
2031
  @ivar title: Human-readable title
2032
  @ivar kind: Field type
2033
  @ivar doc: Human-readable description
2034

2035
  """
2036
  __slots__ = [
2037
    "name",
2038
    "title",
2039
    "kind",
2040
    "doc",
2041
    ]
2042

    
2043

    
2044
class _QueryResponseBase(ConfigObject):
2045
  __slots__ = [
2046
    "fields",
2047
    ]
2048

    
2049
  def ToDict(self):
2050
    """Custom function for serializing.
2051

2052
    """
2053
    mydict = super(_QueryResponseBase, self).ToDict()
2054
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2055
    return mydict
2056

    
2057
  @classmethod
2058
  def FromDict(cls, val):
2059
    """Custom function for de-serializing.
2060

2061
    """
2062
    obj = super(_QueryResponseBase, cls).FromDict(val)
2063
    obj.fields = \
2064
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2065
    return obj
2066

    
2067

    
2068
class QueryResponse(_QueryResponseBase):
2069
  """Object holding the response to a query.
2070

2071
  @ivar fields: List of L{QueryFieldDefinition} objects
2072
  @ivar data: Requested data
2073

2074
  """
2075
  __slots__ = [
2076
    "data",
2077
    ]
2078

    
2079

    
2080
class QueryFieldsRequest(ConfigObject):
2081
  """Object holding a request for querying available fields.
2082

2083
  """
2084
  __slots__ = [
2085
    "what",
2086
    "fields",
2087
    ]
2088

    
2089

    
2090
class QueryFieldsResponse(_QueryResponseBase):
2091
  """Object holding the response to a query for fields.
2092

2093
  @ivar fields: List of L{QueryFieldDefinition} objects
2094

2095
  """
2096
  __slots__ = []
2097

    
2098

    
2099
class MigrationStatus(ConfigObject):
2100
  """Object holding the status of a migration.
2101

2102
  """
2103
  __slots__ = [
2104
    "status",
2105
    "transferred_ram",
2106
    "total_ram",
2107
    ]
2108

    
2109

    
2110
class InstanceConsole(ConfigObject):
2111
  """Object describing how to access the console of an instance.
2112

2113
  """
2114
  __slots__ = [
2115
    "instance",
2116
    "kind",
2117
    "message",
2118
    "host",
2119
    "port",
2120
    "user",
2121
    "command",
2122
    "display",
2123
    ]
2124

    
2125
  def Validate(self):
2126
    """Validates contents of this object.
2127

2128
    """
2129
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2130
    assert self.instance, "Missing instance name"
2131
    assert self.message or self.kind in [constants.CONS_SSH,
2132
                                         constants.CONS_SPICE,
2133
                                         constants.CONS_VNC]
2134
    assert self.host or self.kind == constants.CONS_MESSAGE
2135
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2136
                                      constants.CONS_SSH]
2137
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2138
                                      constants.CONS_SPICE,
2139
                                      constants.CONS_VNC]
2140
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2141
                                         constants.CONS_SPICE,
2142
                                         constants.CONS_VNC]
2143
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2144
                                         constants.CONS_SPICE,
2145
                                         constants.CONS_SSH]
2146
    return True
2147

    
2148

    
2149
class Network(TaggableObject):
2150
  """Object representing a network definition for ganeti.
2151

2152
  """
2153
  __slots__ = [
2154
    "name",
2155
    "serial_no",
2156
    "mac_prefix",
2157
    "network",
2158
    "network6",
2159
    "gateway",
2160
    "gateway6",
2161
    "reservations",
2162
    "ext_reservations",
2163
    ] + _TIMESTAMPS + _UUID
2164

    
2165
  def HooksDict(self, prefix=""):
2166
    """Export a dictionary used by hooks with a network's information.
2167

2168
    @type prefix: String
2169
    @param prefix: Prefix to prepend to the dict entries
2170

2171
    """
2172
    result = {
2173
      "%sNETWORK_NAME" % prefix: self.name,
2174
      "%sNETWORK_UUID" % prefix: self.uuid,
2175
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2176
    }
2177
    if self.network:
2178
      result["%sNETWORK_SUBNET" % prefix] = self.network
2179
    if self.gateway:
2180
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2181
    if self.network6:
2182
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2183
    if self.gateway6:
2184
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2185
    if self.mac_prefix:
2186
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2187

    
2188
    return result
2189

    
2190
  @classmethod
2191
  def FromDict(cls, val):
2192
    """Custom function for networks.
2193

2194
    Remove deprecated network_type and family.
2195

2196
    """
2197
    if "network_type" in val:
2198
      del val["network_type"]
2199
    if "family" in val:
2200
      del val["family"]
2201
    obj = super(Network, cls).FromDict(val)
2202
    return obj
2203

    
2204

    
2205
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2206
  """Simple wrapper over ConfigParse that allows serialization.
2207

2208
  This class is basically ConfigParser.SafeConfigParser with two
2209
  additional methods that allow it to serialize/unserialize to/from a
2210
  buffer.
2211

2212
  """
2213
  def Dumps(self):
2214
    """Dump this instance and return the string representation."""
2215
    buf = StringIO()
2216
    self.write(buf)
2217
    return buf.getvalue()
2218

    
2219
  @classmethod
2220
  def Loads(cls, data):
2221
    """Load data from a string."""
2222
    buf = StringIO(data)
2223
    cfp = cls()
2224
    cfp.readfp(buf)
2225
    return cfp
2226

    
2227

    
2228
class LvmPvInfo(ConfigObject):
2229
  """Information about an LVM physical volume (PV).
2230

2231
  @type name: string
2232
  @ivar name: name of the PV
2233
  @type vg_name: string
2234
  @ivar vg_name: name of the volume group containing the PV
2235
  @type size: float
2236
  @ivar size: size of the PV in MiB
2237
  @type free: float
2238
  @ivar free: free space in the PV, in MiB
2239
  @type attributes: string
2240
  @ivar attributes: PV attributes
2241
  @type lv_list: list of strings
2242
  @ivar lv_list: names of the LVs hosted on the PV
2243
  """
2244
  __slots__ = [
2245
    "name",
2246
    "vg_name",
2247
    "size",
2248
    "free",
2249
    "attributes",
2250
    "lv_list"
2251
    ]
2252

    
2253
  def IsEmpty(self):
2254
    """Is this PV empty?
2255

2256
    """
2257
    return self.size <= (self.free + 1)
2258

    
2259
  def IsAllocatable(self):
2260
    """Is this PV allocatable?
2261

2262
    """
2263
    return ("a" in self.attributes)