Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 6b2a2942

History | View | Annotate | Download (65.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      if k in ret_dict:
79
        del ret_dict[k]
80
  return ret_dict
81

    
82

    
83
def FillIPolicy(default_ipolicy, custom_ipolicy):
84
  """Fills an instance policy with defaults.
85

86
  """
87
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
88
  ret_dict = copy.deepcopy(custom_ipolicy)
89
  for key in default_ipolicy:
90
    if key not in ret_dict:
91
      ret_dict[key] = copy.deepcopy(default_ipolicy[key])
92
    elif key == constants.ISPECS_STD:
93
      ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
94
  return ret_dict
95

    
96

    
97
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
98
  """Fills the disk parameter defaults.
99

100
  @see: L{FillDict} for parameters and return value
101

102
  """
103
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
104

    
105
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
106
                             skip_keys=skip_keys))
107
              for dt in constants.DISK_TEMPLATES)
108

    
109

    
110
def UpgradeGroupedParams(target, defaults):
111
  """Update all groups for the target parameter.
112

113
  @type target: dict of dicts
114
  @param target: {group: {parameter: value}}
115
  @type defaults: dict
116
  @param defaults: default parameter values
117

118
  """
119
  if target is None:
120
    target = {constants.PP_DEFAULT: defaults}
121
  else:
122
    for group in target:
123
      target[group] = FillDict(defaults, target[group])
124
  return target
125

    
126

    
127
def UpgradeBeParams(target):
128
  """Update the be parameters dict to the new format.
129

130
  @type target: dict
131
  @param target: "be" parameters dict
132

133
  """
134
  if constants.BE_MEMORY in target:
135
    memory = target[constants.BE_MEMORY]
136
    target[constants.BE_MAXMEM] = memory
137
    target[constants.BE_MINMEM] = memory
138
    del target[constants.BE_MEMORY]
139

    
140

    
141
def UpgradeDiskParams(diskparams):
142
  """Upgrade the disk parameters.
143

144
  @type diskparams: dict
145
  @param diskparams: disk parameters to upgrade
146
  @rtype: dict
147
  @return: the upgraded disk parameters dict
148

149
  """
150
  if not diskparams:
151
    result = {}
152
  else:
153
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
154

    
155
  return result
156

    
157

    
158
def UpgradeNDParams(ndparams):
159
  """Upgrade ndparams structure.
160

161
  @type ndparams: dict
162
  @param ndparams: disk parameters to upgrade
163
  @rtype: dict
164
  @return: the upgraded node parameters dict
165

166
  """
167
  if ndparams is None:
168
    ndparams = {}
169

    
170
  if (constants.ND_OOB_PROGRAM in ndparams and
171
      ndparams[constants.ND_OOB_PROGRAM] is None):
172
    # will be reset by the line below
173
    del ndparams[constants.ND_OOB_PROGRAM]
174
  return FillDict(constants.NDC_DEFAULTS, ndparams)
175

    
176

    
177
def MakeEmptyIPolicy():
178
  """Create empty IPolicy dictionary.
179

180
  """
181
  return {}
182

    
183

    
184
class ConfigObject(outils.ValidatedSlots):
185
  """A generic config object.
186

187
  It has the following properties:
188

189
    - provides somewhat safe recursive unpickling and pickling for its classes
190
    - unset attributes which are defined in slots are always returned
191
      as None instead of raising an error
192

193
  Classes derived from this must always declare __slots__ (we use many
194
  config objects and the memory reduction is useful)
195

196
  """
197
  __slots__ = []
198

    
199
  def __getattr__(self, name):
200
    if name not in self.GetAllSlots():
201
      raise AttributeError("Invalid object attribute %s.%s" %
202
                           (type(self).__name__, name))
203
    return None
204

    
205
  def __setstate__(self, state):
206
    slots = self.GetAllSlots()
207
    for name in state:
208
      if name in slots:
209
        setattr(self, name, state[name])
210

    
211
  def Validate(self):
212
    """Validates the slots.
213

214
    """
215

    
216
  def ToDict(self):
217
    """Convert to a dict holding only standard python types.
218

219
    The generic routine just dumps all of this object's attributes in
220
    a dict. It does not work if the class has children who are
221
    ConfigObjects themselves (e.g. the nics list in an Instance), in
222
    which case the object should subclass the function in order to
223
    make sure all objects returned are only standard python types.
224

225
    """
226
    result = {}
227
    for name in self.GetAllSlots():
228
      value = getattr(self, name, None)
229
      if value is not None:
230
        result[name] = value
231
    return result
232

    
233
  __getstate__ = ToDict
234

    
235
  @classmethod
236
  def FromDict(cls, val):
237
    """Create an object from a dictionary.
238

239
    This generic routine takes a dict, instantiates a new instance of
240
    the given class, and sets attributes based on the dict content.
241

242
    As for `ToDict`, this does not work if the class has children
243
    who are ConfigObjects themselves (e.g. the nics list in an
244
    Instance), in which case the object should subclass the function
245
    and alter the objects.
246

247
    """
248
    if not isinstance(val, dict):
249
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
250
                                      " expected dict, got %s" % type(val))
251
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
252
    obj = cls(**val_str) # pylint: disable=W0142
253
    return obj
254

    
255
  def Copy(self):
256
    """Makes a deep copy of the current object and its children.
257

258
    """
259
    dict_form = self.ToDict()
260
    clone_obj = self.__class__.FromDict(dict_form)
261
    return clone_obj
262

    
263
  def __repr__(self):
264
    """Implement __repr__ for ConfigObjects."""
265
    return repr(self.ToDict())
266

    
267
  def __eq__(self, other):
268
    """Implement __eq__ for ConfigObjects."""
269
    return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
270

    
271
  def UpgradeConfig(self):
272
    """Fill defaults for missing configuration values.
273

274
    This method will be called at configuration load time, and its
275
    implementation will be object dependent.
276

277
    """
278
    pass
279

    
280

    
281
class TaggableObject(ConfigObject):
282
  """An generic class supporting tags.
283

284
  """
285
  __slots__ = ["tags"]
286
  VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$")
287

    
288
  @classmethod
289
  def ValidateTag(cls, tag):
290
    """Check if a tag is valid.
291

292
    If the tag is invalid, an errors.TagError will be raised. The
293
    function has no return value.
294

295
    """
296
    if not isinstance(tag, basestring):
297
      raise errors.TagError("Invalid tag type (not a string)")
298
    if len(tag) > constants.MAX_TAG_LEN:
299
      raise errors.TagError("Tag too long (>%d characters)" %
300
                            constants.MAX_TAG_LEN)
301
    if not tag:
302
      raise errors.TagError("Tags cannot be empty")
303
    if not cls.VALID_TAG_RE.match(tag):
304
      raise errors.TagError("Tag contains invalid characters")
305

    
306
  def GetTags(self):
307
    """Return the tags list.
308

309
    """
310
    tags = getattr(self, "tags", None)
311
    if tags is None:
312
      tags = self.tags = set()
313
    return tags
314

    
315
  def AddTag(self, tag):
316
    """Add a new tag.
317

318
    """
319
    self.ValidateTag(tag)
320
    tags = self.GetTags()
321
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
322
      raise errors.TagError("Too many tags")
323
    self.GetTags().add(tag)
324

    
325
  def RemoveTag(self, tag):
326
    """Remove a tag.
327

328
    """
329
    self.ValidateTag(tag)
330
    tags = self.GetTags()
331
    try:
332
      tags.remove(tag)
333
    except KeyError:
334
      raise errors.TagError("Tag not found")
335

    
336
  def ToDict(self):
337
    """Taggable-object-specific conversion to standard python types.
338

339
    This replaces the tags set with a list.
340

341
    """
342
    bo = super(TaggableObject, self).ToDict()
343

    
344
    tags = bo.get("tags", None)
345
    if isinstance(tags, set):
346
      bo["tags"] = list(tags)
347
    return bo
348

    
349
  @classmethod
350
  def FromDict(cls, val):
351
    """Custom function for instances.
352

353
    """
354
    obj = super(TaggableObject, cls).FromDict(val)
355
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
356
      obj.tags = set(obj.tags)
357
    return obj
358

    
359

    
360
class MasterNetworkParameters(ConfigObject):
361
  """Network configuration parameters for the master
362

363
  @ivar uuid: master nodes UUID
364
  @ivar ip: master IP
365
  @ivar netmask: master netmask
366
  @ivar netdev: master network device
367
  @ivar ip_family: master IP family
368

369
  """
370
  __slots__ = [
371
    "uuid",
372
    "ip",
373
    "netmask",
374
    "netdev",
375
    "ip_family",
376
    ]
377

    
378

    
379
class ConfigData(ConfigObject):
380
  """Top-level config object."""
381
  __slots__ = [
382
    "version",
383
    "cluster",
384
    "nodes",
385
    "nodegroups",
386
    "instances",
387
    "networks",
388
    "serial_no",
389
    ] + _TIMESTAMPS
390

    
391
  def ToDict(self):
392
    """Custom function for top-level config data.
393

394
    This just replaces the list of instances, nodes and the cluster
395
    with standard python types.
396

397
    """
398
    mydict = super(ConfigData, self).ToDict()
399
    mydict["cluster"] = mydict["cluster"].ToDict()
400
    for key in "nodes", "instances", "nodegroups", "networks":
401
      mydict[key] = outils.ContainerToDicts(mydict[key])
402

    
403
    return mydict
404

    
405
  @classmethod
406
  def FromDict(cls, val):
407
    """Custom function for top-level config data
408

409
    """
410
    obj = super(ConfigData, cls).FromDict(val)
411
    obj.cluster = Cluster.FromDict(obj.cluster)
412
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
413
    obj.instances = \
414
      outils.ContainerFromDicts(obj.instances, dict, Instance)
415
    obj.nodegroups = \
416
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
417
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
418
    return obj
419

    
420
  def HasAnyDiskOfType(self, dev_type):
421
    """Check if in there is at disk of the given type in the configuration.
422

423
    @type dev_type: L{constants.DTS_BLOCK}
424
    @param dev_type: the type to look for
425
    @rtype: boolean
426
    @return: boolean indicating if a disk of the given type was found or not
427

428
    """
429
    for instance in self.instances.values():
430
      for disk in instance.disks:
431
        if disk.IsBasedOnDiskType(dev_type):
432
          return True
433
    return False
434

    
435
  def UpgradeConfig(self):
436
    """Fill defaults for missing configuration values.
437

438
    """
439
    self.cluster.UpgradeConfig()
440
    for node in self.nodes.values():
441
      node.UpgradeConfig()
442
    for instance in self.instances.values():
443
      instance.UpgradeConfig()
444
    self._UpgradeEnabledDiskTemplates()
445
    if self.nodegroups is None:
446
      self.nodegroups = {}
447
    for nodegroup in self.nodegroups.values():
448
      nodegroup.UpgradeConfig()
449
      InstancePolicy.UpgradeDiskTemplates(
450
        nodegroup.ipolicy, self.cluster.enabled_disk_templates)
451
    if self.cluster.drbd_usermode_helper is None:
452
      if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
453
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
454
    if self.networks is None:
455
      self.networks = {}
456
    for network in self.networks.values():
457
      network.UpgradeConfig()
458

    
459
  def _UpgradeEnabledDiskTemplates(self):
460
    """Upgrade the cluster's enabled disk templates by inspecting the currently
461
       enabled and/or used disk templates.
462

463
    """
464
    if not self.cluster.enabled_disk_templates:
465
      template_set = \
466
        set([inst.disk_template for inst in self.instances.values()])
467
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
468
      if self.cluster.volume_group_name:
469
        template_set.add(constants.DT_DRBD8)
470
        template_set.add(constants.DT_PLAIN)
471
      # Set enabled_disk_templates to the inferred disk templates. Order them
472
      # according to a preference list that is based on Ganeti's history of
473
      # supported disk templates.
474
      self.cluster.enabled_disk_templates = []
475
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
476
        if preferred_template in template_set:
477
          self.cluster.enabled_disk_templates.append(preferred_template)
478
          template_set.remove(preferred_template)
479
      self.cluster.enabled_disk_templates.extend(list(template_set))
480
    InstancePolicy.UpgradeDiskTemplates(
481
      self.cluster.ipolicy, self.cluster.enabled_disk_templates)
482

    
483

    
484
class NIC(ConfigObject):
485
  """Config object representing a network card."""
486
  __slots__ = ["name", "mac", "ip", "network",
487
               "nicparams", "netinfo", "pci"] + _UUID
488

    
489
  @classmethod
490
  def CheckParameterSyntax(cls, nicparams):
491
    """Check the given parameters for validity.
492

493
    @type nicparams:  dict
494
    @param nicparams: dictionary with parameter names/value
495
    @raise errors.ConfigurationError: when a parameter is not valid
496

497
    """
498
    mode = nicparams[constants.NIC_MODE]
499
    if (mode not in constants.NIC_VALID_MODES and
500
        mode != constants.VALUE_AUTO):
501
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
502

    
503
    if (mode == constants.NIC_MODE_BRIDGED and
504
        not nicparams[constants.NIC_LINK]):
505
      raise errors.ConfigurationError("Missing bridged NIC link")
506

    
507

    
508
class Disk(ConfigObject):
509
  """Config object representing a block device."""
510
  __slots__ = (["name", "dev_type", "logical_id", "children", "iv_name",
511
                "size", "mode", "params", "spindles", "pci"] + _UUID +
512
               # dynamic_params is special. It depends on the node this instance
513
               # is sent to, and should not be persisted.
514
               ["dynamic_params"])
515

    
516
  def CreateOnSecondary(self):
517
    """Test if this device needs to be created on a secondary node."""
518
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
519

    
520
  def AssembleOnSecondary(self):
521
    """Test if this device needs to be assembled on a secondary node."""
522
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
523

    
524
  def OpenOnSecondary(self):
525
    """Test if this device needs to be opened on a secondary node."""
526
    return self.dev_type in (constants.DT_PLAIN,)
527

    
528
  def StaticDevPath(self):
529
    """Return the device path if this device type has a static one.
530

531
    Some devices (LVM for example) live always at the same /dev/ path,
532
    irrespective of their status. For such devices, we return this
533
    path, for others we return None.
534

535
    @warning: The path returned is not a normalized pathname; callers
536
        should check that it is a valid path.
537

538
    """
539
    if self.dev_type == constants.DT_PLAIN:
540
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
541
    elif self.dev_type == constants.DT_BLOCK:
542
      return self.logical_id[1]
543
    elif self.dev_type == constants.DT_RBD:
544
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
545
    return None
546

    
547
  def ChildrenNeeded(self):
548
    """Compute the needed number of children for activation.
549

550
    This method will return either -1 (all children) or a positive
551
    number denoting the minimum number of children needed for
552
    activation (only mirrored devices will usually return >=0).
553

554
    Currently, only DRBD8 supports diskless activation (therefore we
555
    return 0), for all other we keep the previous semantics and return
556
    -1.
557

558
    """
559
    if self.dev_type == constants.DT_DRBD8:
560
      return 0
561
    return -1
562

    
563
  def IsBasedOnDiskType(self, dev_type):
564
    """Check if the disk or its children are based on the given type.
565

566
    @type dev_type: L{constants.DTS_BLOCK}
567
    @param dev_type: the type to look for
568
    @rtype: boolean
569
    @return: boolean indicating if a device of the given type was found or not
570

571
    """
572
    if self.children:
573
      for child in self.children:
574
        if child.IsBasedOnDiskType(dev_type):
575
          return True
576
    return self.dev_type == dev_type
577

    
578
  def GetNodes(self, node_uuid):
579
    """This function returns the nodes this device lives on.
580

581
    Given the node on which the parent of the device lives on (or, in
582
    case of a top-level device, the primary node of the devices'
583
    instance), this function will return a list of nodes on which this
584
    devices needs to (or can) be assembled.
585

586
    """
587
    if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
588
                         constants.DT_BLOCK, constants.DT_RBD,
589
                         constants.DT_EXT, constants.DT_SHARED_FILE]:
590
      result = [node_uuid]
591
    elif self.dev_type in constants.DTS_DRBD:
592
      result = [self.logical_id[0], self.logical_id[1]]
593
      if node_uuid not in result:
594
        raise errors.ConfigurationError("DRBD device passed unknown node")
595
    else:
596
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
597
    return result
598

    
599
  def ComputeNodeTree(self, parent_node_uuid):
600
    """Compute the node/disk tree for this disk and its children.
601

602
    This method, given the node on which the parent disk lives, will
603
    return the list of all (node UUID, disk) pairs which describe the disk
604
    tree in the most compact way. For example, a drbd/lvm stack
605
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
606
    which represents all the top-level devices on the nodes.
607

608
    """
609
    my_nodes = self.GetNodes(parent_node_uuid)
610
    result = [(node, self) for node in my_nodes]
611
    if not self.children:
612
      # leaf device
613
      return result
614
    for node in my_nodes:
615
      for child in self.children:
616
        child_result = child.ComputeNodeTree(node)
617
        if len(child_result) == 1:
618
          # child (and all its descendants) is simple, doesn't split
619
          # over multiple hosts, so we don't need to describe it, our
620
          # own entry for this node describes it completely
621
          continue
622
        else:
623
          # check if child nodes differ from my nodes; note that
624
          # subdisk can differ from the child itself, and be instead
625
          # one of its descendants
626
          for subnode, subdisk in child_result:
627
            if subnode not in my_nodes:
628
              result.append((subnode, subdisk))
629
            # otherwise child is under our own node, so we ignore this
630
            # entry (but probably the other results in the list will
631
            # be different)
632
    return result
633

    
634
  def ComputeGrowth(self, amount):
635
    """Compute the per-VG growth requirements.
636

637
    This only works for VG-based disks.
638

639
    @type amount: integer
640
    @param amount: the desired increase in (user-visible) disk space
641
    @rtype: dict
642
    @return: a dictionary of volume-groups and the required size
643

644
    """
645
    if self.dev_type == constants.DT_PLAIN:
646
      return {self.logical_id[0]: amount}
647
    elif self.dev_type == constants.DT_DRBD8:
648
      if self.children:
649
        return self.children[0].ComputeGrowth(amount)
650
      else:
651
        return {}
652
    else:
653
      # Other disk types do not require VG space
654
      return {}
655

    
656
  def RecordGrow(self, amount):
657
    """Update the size of this disk after growth.
658

659
    This method recurses over the disks's children and updates their
660
    size correspondigly. The method needs to be kept in sync with the
661
    actual algorithms from bdev.
662

663
    """
664
    if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE,
665
                         constants.DT_RBD, constants.DT_EXT,
666
                         constants.DT_SHARED_FILE):
667
      self.size += amount
668
    elif self.dev_type == constants.DT_DRBD8:
669
      if self.children:
670
        self.children[0].RecordGrow(amount)
671
      self.size += amount
672
    else:
673
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
674
                                   " disk type %s" % self.dev_type)
675

    
676
  def Update(self, size=None, mode=None, spindles=None):
677
    """Apply changes to size, spindles and mode.
678

679
    """
680
    if self.dev_type == constants.DT_DRBD8:
681
      if self.children:
682
        self.children[0].Update(size=size, mode=mode)
683
    else:
684
      assert not self.children
685

    
686
    if size is not None:
687
      self.size = size
688
    if mode is not None:
689
      self.mode = mode
690
    if spindles is not None:
691
      self.spindles = spindles
692

    
693
  def UnsetSize(self):
694
    """Sets recursively the size to zero for the disk and its children.
695

696
    """
697
    if self.children:
698
      for child in self.children:
699
        child.UnsetSize()
700
    self.size = 0
701

    
702
  def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
703
    """Updates the dynamic disk params for the given node.
704

705
    This is mainly used for drbd, which needs ip/port configuration.
706

707
    Arguments:
708
      - target_node_uuid: the node UUID we wish to configure for
709
      - nodes_ip: a mapping of node name to ip
710

711
    The target_node must exist in nodes_ip, and should be one of the
712
    nodes in the logical ID if this device is a DRBD device.
713

714
    """
715
    if self.children:
716
      for child in self.children:
717
        child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip)
718

    
719
    dyn_disk_params = {}
720
    if self.logical_id is not None and self.dev_type in constants.DTS_DRBD:
721
      pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id
722
      if target_node_uuid not in (pnode_uuid, snode_uuid):
723
        # disk object is being sent to neither the primary nor the secondary
724
        # node. reset the dynamic parameters, the target node is not
725
        # supposed to use them.
726
        self.dynamic_params = dyn_disk_params
727
        return
728

    
729
      pnode_ip = nodes_ip.get(pnode_uuid, None)
730
      snode_ip = nodes_ip.get(snode_uuid, None)
731
      if pnode_ip is None or snode_ip is None:
732
        raise errors.ConfigurationError("Can't find primary or secondary node"
733
                                        " for %s" % str(self))
734
      if pnode_uuid == target_node_uuid:
735
        dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip
736
        dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip
737
        dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor
738
        dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor
739
      else: # it must be secondary, we tested above
740
        dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip
741
        dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip
742
        dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor
743
        dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor
744

    
745
    self.dynamic_params = dyn_disk_params
746

    
747
  # pylint: disable=W0221
748
  def ToDict(self, include_dynamic_params=False):
749
    """Disk-specific conversion to standard python types.
750

751
    This replaces the children lists of objects with lists of
752
    standard python types.
753

754
    """
755
    bo = super(Disk, self).ToDict()
756
    if not include_dynamic_params and "dynamic_params" in bo:
757
      del bo["dynamic_params"]
758

    
759
    for attr in ("children",):
760
      alist = bo.get(attr, None)
761
      if alist:
762
        bo[attr] = outils.ContainerToDicts(alist)
763
    return bo
764

    
765
  @classmethod
766
  def FromDict(cls, val):
767
    """Custom function for Disks
768

769
    """
770
    obj = super(Disk, cls).FromDict(val)
771
    if obj.children:
772
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
773
    if obj.logical_id and isinstance(obj.logical_id, list):
774
      obj.logical_id = tuple(obj.logical_id)
775
    if obj.dev_type in constants.DTS_DRBD:
776
      # we need a tuple of length six here
777
      if len(obj.logical_id) < 6:
778
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
779
    return obj
780

    
781
  def __str__(self):
782
    """Custom str() formatter for disks.
783

784
    """
785
    if self.dev_type == constants.DT_PLAIN:
786
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
787
    elif self.dev_type in constants.DTS_DRBD:
788
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
789
      val = "<DRBD8("
790

    
791
      val += ("hosts=%s/%d-%s/%d, port=%s, " %
792
              (node_a, minor_a, node_b, minor_b, port))
793
      if self.children and self.children.count(None) == 0:
794
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
795
      else:
796
        val += "no local storage"
797
    else:
798
      val = ("<Disk(type=%s, logical_id=%s, children=%s" %
799
             (self.dev_type, self.logical_id, self.children))
800
    if self.iv_name is None:
801
      val += ", not visible"
802
    else:
803
      val += ", visible as /dev/%s" % self.iv_name
804
    if self.spindles is not None:
805
      val += ", spindles=%s" % self.spindles
806
    if isinstance(self.size, int):
807
      val += ", size=%dm)>" % self.size
808
    else:
809
      val += ", size='%s')>" % (self.size,)
810
    return val
811

    
812
  def Verify(self):
813
    """Checks that this disk is correctly configured.
814

815
    """
816
    all_errors = []
817
    if self.mode not in constants.DISK_ACCESS_SET:
818
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
819
    return all_errors
820

    
821
  def UpgradeConfig(self):
822
    """Fill defaults for missing configuration values.
823

824
    """
825
    if self.children:
826
      for child in self.children:
827
        child.UpgradeConfig()
828

    
829
    # FIXME: Make this configurable in Ganeti 2.7
830
    self.params = {}
831
    # add here config upgrade for this disk
832

    
833
    # map of legacy device types (mapping differing LD constants to new
834
    # DT constants)
835
    LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
836
    if self.dev_type in LEG_DEV_TYPE_MAP:
837
      self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
838

    
839
  @staticmethod
840
  def ComputeLDParams(disk_template, disk_params):
841
    """Computes Logical Disk parameters from Disk Template parameters.
842

843
    @type disk_template: string
844
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
845
    @type disk_params: dict
846
    @param disk_params: disk template parameters;
847
                        dict(template_name -> parameters
848
    @rtype: list(dict)
849
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
850
      contains the LD parameters of the node. The tree is flattened in-order.
851

852
    """
853
    if disk_template not in constants.DISK_TEMPLATES:
854
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
855

    
856
    assert disk_template in disk_params
857

    
858
    result = list()
859
    dt_params = disk_params[disk_template]
860
    if disk_template == constants.DT_DRBD8:
861
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], {
862
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
863
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
864
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
865
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
866
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
867
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
868
        constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
869
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
870
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
871
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
872
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
873
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
874
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
875
        }))
876

    
877
      # data LV
878
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
879
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
880
        }))
881

    
882
      # metadata LV
883
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
884
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
885
        }))
886

    
887
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
888
      result.append(constants.DISK_LD_DEFAULTS[disk_template])
889

    
890
    elif disk_template == constants.DT_PLAIN:
891
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
892
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
893
        }))
894

    
895
    elif disk_template == constants.DT_BLOCK:
896
      result.append(constants.DISK_LD_DEFAULTS[constants.DT_BLOCK])
897

    
898
    elif disk_template == constants.DT_RBD:
899
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_RBD], {
900
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
901
        constants.LDP_ACCESS: dt_params[constants.RBD_ACCESS],
902
        }))
903

    
904
    elif disk_template == constants.DT_EXT:
905
      result.append(constants.DISK_LD_DEFAULTS[constants.DT_EXT])
906

    
907
    return result
908

    
909

    
910
class InstancePolicy(ConfigObject):
911
  """Config object representing instance policy limits dictionary.
912

913
  Note that this object is not actually used in the config, it's just
914
  used as a placeholder for a few functions.
915

916
  """
917
  @classmethod
918
  def UpgradeDiskTemplates(cls, ipolicy, enabled_disk_templates):
919
    """Upgrades the ipolicy configuration."""
920
    if constants.IPOLICY_DTS in ipolicy:
921
      if not set(ipolicy[constants.IPOLICY_DTS]).issubset(
922
        set(enabled_disk_templates)):
923
        ipolicy[constants.IPOLICY_DTS] = list(
924
          set(ipolicy[constants.IPOLICY_DTS]) & set(enabled_disk_templates))
925

    
926
  @classmethod
927
  def CheckParameterSyntax(cls, ipolicy, check_std):
928
    """ Check the instance policy for validity.
929

930
    @type ipolicy: dict
931
    @param ipolicy: dictionary with min/max/std specs and policies
932
    @type check_std: bool
933
    @param check_std: Whether to check std value or just assume compliance
934
    @raise errors.ConfigurationError: when the policy is not legal
935

936
    """
937
    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
938
    if constants.IPOLICY_DTS in ipolicy:
939
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
940
    for key in constants.IPOLICY_PARAMETERS:
941
      if key in ipolicy:
942
        InstancePolicy.CheckParameter(key, ipolicy[key])
943
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
944
    if wrong_keys:
945
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
946
                                      utils.CommaJoin(wrong_keys))
947

    
948
  @classmethod
949
  def _CheckIncompleteSpec(cls, spec, keyname):
950
    missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
951
    if missing_params:
952
      msg = ("Missing instance specs parameters for %s: %s" %
953
             (keyname, utils.CommaJoin(missing_params)))
954
      raise errors.ConfigurationError(msg)
955

    
956
  @classmethod
957
  def CheckISpecSyntax(cls, ipolicy, check_std):
958
    """Check the instance policy specs for validity.
959

960
    @type ipolicy: dict
961
    @param ipolicy: dictionary with min/max/std specs
962
    @type check_std: bool
963
    @param check_std: Whether to check std value or just assume compliance
964
    @raise errors.ConfigurationError: when specs are not valid
965

966
    """
967
    if constants.ISPECS_MINMAX not in ipolicy:
968
      # Nothing to check
969
      return
970

    
971
    if check_std and constants.ISPECS_STD not in ipolicy:
972
      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
973
      raise errors.ConfigurationError(msg)
974
    stdspec = ipolicy.get(constants.ISPECS_STD)
975
    if check_std:
976
      InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
977

    
978
    if not ipolicy[constants.ISPECS_MINMAX]:
979
      raise errors.ConfigurationError("Empty minmax specifications")
980
    std_is_good = False
981
    for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
982
      missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
983
      if missing:
984
        msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
985
        raise errors.ConfigurationError(msg)
986
      for (key, spec) in minmaxspecs.items():
987
        InstancePolicy._CheckIncompleteSpec(spec, key)
988

    
989
      spec_std_ok = True
990
      for param in constants.ISPECS_PARAMETERS:
991
        par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
992
                                                           param, check_std)
993
        spec_std_ok = spec_std_ok and par_std_ok
994
      std_is_good = std_is_good or spec_std_ok
995
    if not std_is_good:
996
      raise errors.ConfigurationError("Invalid std specifications")
997

    
998
  @classmethod
999
  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
1000
    """Check the instance policy specs for validity on a given key.
1001

1002
    We check if the instance specs makes sense for a given key, that is
1003
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
1004

1005
    @type minmaxspecs: dict
1006
    @param minmaxspecs: dictionary with min and max instance spec
1007
    @type stdspec: dict
1008
    @param stdspec: dictionary with standard instance spec
1009
    @type name: string
1010
    @param name: what are the limits for
1011
    @type check_std: bool
1012
    @param check_std: Whether to check std value or just assume compliance
1013
    @rtype: bool
1014
    @return: C{True} when specs are valid, C{False} when standard spec for the
1015
        given name is not valid
1016
    @raise errors.ConfigurationError: when min/max specs for the given name
1017
        are not valid
1018

1019
    """
1020
    minspec = minmaxspecs[constants.ISPECS_MIN]
1021
    maxspec = minmaxspecs[constants.ISPECS_MAX]
1022
    min_v = minspec[name]
1023
    max_v = maxspec[name]
1024

    
1025
    if min_v > max_v:
1026
      err = ("Invalid specification of min/max values for %s: %s/%s" %
1027
             (name, min_v, max_v))
1028
      raise errors.ConfigurationError(err)
1029
    elif check_std:
1030
      std_v = stdspec.get(name, min_v)
1031
      return std_v >= min_v and std_v <= max_v
1032
    else:
1033
      return True
1034

    
1035
  @classmethod
1036
  def CheckDiskTemplates(cls, disk_templates):
1037
    """Checks the disk templates for validity.
1038

1039
    """
1040
    if not disk_templates:
1041
      raise errors.ConfigurationError("Instance policy must contain" +
1042
                                      " at least one disk template")
1043
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1044
    if wrong:
1045
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1046
                                      utils.CommaJoin(wrong))
1047

    
1048
  @classmethod
1049
  def CheckParameter(cls, key, value):
1050
    """Checks a parameter.
1051

1052
    Currently we expect all parameters to be float values.
1053

1054
    """
1055
    try:
1056
      float(value)
1057
    except (TypeError, ValueError), err:
1058
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1059
                                      " '%s', error: %s" % (key, value, err))
1060

    
1061

    
1062
class Instance(TaggableObject):
1063
  """Config object representing an instance."""
1064
  __slots__ = [
1065
    "name",
1066
    "primary_node",
1067
    "os",
1068
    "hypervisor",
1069
    "hvparams",
1070
    "beparams",
1071
    "osparams",
1072
    "admin_state",
1073
    "nics",
1074
    "disks",
1075
    "disk_template",
1076
    "disks_active",
1077
    "network_port",
1078
    "serial_no",
1079
    ] + _TIMESTAMPS + _UUID
1080

    
1081
  def _ComputeSecondaryNodes(self):
1082
    """Compute the list of secondary nodes.
1083

1084
    This is a simple wrapper over _ComputeAllNodes.
1085

1086
    """
1087
    all_nodes = set(self._ComputeAllNodes())
1088
    all_nodes.discard(self.primary_node)
1089
    return tuple(all_nodes)
1090

    
1091
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1092
                             "List of names of secondary nodes")
1093

    
1094
  def _ComputeAllNodes(self):
1095
    """Compute the list of all nodes.
1096

1097
    Since the data is already there (in the drbd disks), keeping it as
1098
    a separate normal attribute is redundant and if not properly
1099
    synchronised can cause problems. Thus it's better to compute it
1100
    dynamically.
1101

1102
    """
1103
    def _Helper(nodes, device):
1104
      """Recursively computes nodes given a top device."""
1105
      if device.dev_type in constants.DTS_DRBD:
1106
        nodea, nodeb = device.logical_id[:2]
1107
        nodes.add(nodea)
1108
        nodes.add(nodeb)
1109
      if device.children:
1110
        for child in device.children:
1111
          _Helper(nodes, child)
1112

    
1113
    all_nodes = set()
1114
    all_nodes.add(self.primary_node)
1115
    for device in self.disks:
1116
      _Helper(all_nodes, device)
1117
    return tuple(all_nodes)
1118

    
1119
  all_nodes = property(_ComputeAllNodes, None, None,
1120
                       "List of names of all the nodes of the instance")
1121

    
1122
  def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1123
    """Provide a mapping of nodes to LVs this instance owns.
1124

1125
    This function figures out what logical volumes should belong on
1126
    which nodes, recursing through a device tree.
1127

1128
    @type lvmap: dict
1129
    @param lvmap: optional dictionary to receive the
1130
        'node' : ['lv', ...] data.
1131
    @type devs: list of L{Disk}
1132
    @param devs: disks to get the LV name for. If None, all disk of this
1133
        instance are used.
1134
    @type node_uuid: string
1135
    @param node_uuid: UUID of the node to get the LV names for. If None, the
1136
        primary node of this instance is used.
1137
    @return: None if lvmap arg is given, otherwise, a dictionary of
1138
        the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
1139
        volumeN is of the form "vg_name/lv_name", compatible with
1140
        GetVolumeList()
1141

1142
    """
1143
    if node_uuid is None:
1144
      node_uuid = self.primary_node
1145

    
1146
    if lvmap is None:
1147
      lvmap = {
1148
        node_uuid: [],
1149
        }
1150
      ret = lvmap
1151
    else:
1152
      if not node_uuid in lvmap:
1153
        lvmap[node_uuid] = []
1154
      ret = None
1155

    
1156
    if not devs:
1157
      devs = self.disks
1158

    
1159
    for dev in devs:
1160
      if dev.dev_type == constants.DT_PLAIN:
1161
        lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1162

    
1163
      elif dev.dev_type in constants.DTS_DRBD:
1164
        if dev.children:
1165
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1166
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1167

    
1168
      elif dev.children:
1169
        self.MapLVsByNode(lvmap, dev.children, node_uuid)
1170

    
1171
    return ret
1172

    
1173
  def FindDisk(self, idx):
1174
    """Find a disk given having a specified index.
1175

1176
    This is just a wrapper that does validation of the index.
1177

1178
    @type idx: int
1179
    @param idx: the disk index
1180
    @rtype: L{Disk}
1181
    @return: the corresponding disk
1182
    @raise errors.OpPrereqError: when the given index is not valid
1183

1184
    """
1185
    try:
1186
      idx = int(idx)
1187
      return self.disks[idx]
1188
    except (TypeError, ValueError), err:
1189
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1190
                                 errors.ECODE_INVAL)
1191
    except IndexError:
1192
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1193
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1194
                                 errors.ECODE_INVAL)
1195

    
1196
  def ToDict(self):
1197
    """Instance-specific conversion to standard python types.
1198

1199
    This replaces the children lists of objects with lists of standard
1200
    python types.
1201

1202
    """
1203
    bo = super(Instance, self).ToDict()
1204

    
1205
    for attr in "nics", "disks":
1206
      alist = bo.get(attr, None)
1207
      if alist:
1208
        nlist = outils.ContainerToDicts(alist)
1209
      else:
1210
        nlist = []
1211
      bo[attr] = nlist
1212
    return bo
1213

    
1214
  @classmethod
1215
  def FromDict(cls, val):
1216
    """Custom function for instances.
1217

1218
    """
1219
    if "admin_state" not in val:
1220
      if val.get("admin_up", False):
1221
        val["admin_state"] = constants.ADMINST_UP
1222
      else:
1223
        val["admin_state"] = constants.ADMINST_DOWN
1224
    if "admin_up" in val:
1225
      del val["admin_up"]
1226
    obj = super(Instance, cls).FromDict(val)
1227
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1228
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1229
    return obj
1230

    
1231
  def UpgradeConfig(self):
1232
    """Fill defaults for missing configuration values.
1233

1234
    """
1235
    for nic in self.nics:
1236
      nic.UpgradeConfig()
1237
    for disk in self.disks:
1238
      disk.UpgradeConfig()
1239
    if self.hvparams:
1240
      for key in constants.HVC_GLOBALS:
1241
        try:
1242
          del self.hvparams[key]
1243
        except KeyError:
1244
          pass
1245
    if self.osparams is None:
1246
      self.osparams = {}
1247
    UpgradeBeParams(self.beparams)
1248
    if self.disks_active is None:
1249
      self.disks_active = self.admin_state == constants.ADMINST_UP
1250

    
1251

    
1252
class OS(ConfigObject):
1253
  """Config object representing an operating system.
1254

1255
  @type supported_parameters: list
1256
  @ivar supported_parameters: a list of tuples, name and description,
1257
      containing the supported parameters by this OS
1258

1259
  @type VARIANT_DELIM: string
1260
  @cvar VARIANT_DELIM: the variant delimiter
1261

1262
  """
1263
  __slots__ = [
1264
    "name",
1265
    "path",
1266
    "api_versions",
1267
    "create_script",
1268
    "export_script",
1269
    "import_script",
1270
    "rename_script",
1271
    "verify_script",
1272
    "supported_variants",
1273
    "supported_parameters",
1274
    ]
1275

    
1276
  VARIANT_DELIM = "+"
1277

    
1278
  @classmethod
1279
  def SplitNameVariant(cls, name):
1280
    """Splits the name into the proper name and variant.
1281

1282
    @param name: the OS (unprocessed) name
1283
    @rtype: list
1284
    @return: a list of two elements; if the original name didn't
1285
        contain a variant, it's returned as an empty string
1286

1287
    """
1288
    nv = name.split(cls.VARIANT_DELIM, 1)
1289
    if len(nv) == 1:
1290
      nv.append("")
1291
    return nv
1292

    
1293
  @classmethod
1294
  def GetName(cls, name):
1295
    """Returns the proper name of the os (without the variant).
1296

1297
    @param name: the OS (unprocessed) name
1298

1299
    """
1300
    return cls.SplitNameVariant(name)[0]
1301

    
1302
  @classmethod
1303
  def GetVariant(cls, name):
1304
    """Returns the variant the os (without the base name).
1305

1306
    @param name: the OS (unprocessed) name
1307

1308
    """
1309
    return cls.SplitNameVariant(name)[1]
1310

    
1311

    
1312
class ExtStorage(ConfigObject):
1313
  """Config object representing an External Storage Provider.
1314

1315
  """
1316
  __slots__ = [
1317
    "name",
1318
    "path",
1319
    "create_script",
1320
    "remove_script",
1321
    "grow_script",
1322
    "attach_script",
1323
    "detach_script",
1324
    "setinfo_script",
1325
    "verify_script",
1326
    "supported_parameters",
1327
    ]
1328

    
1329

    
1330
class NodeHvState(ConfigObject):
1331
  """Hypvervisor state on a node.
1332

1333
  @ivar mem_total: Total amount of memory
1334
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1335
    available)
1336
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1337
    rounding
1338
  @ivar mem_inst: Memory used by instances living on node
1339
  @ivar cpu_total: Total node CPU core count
1340
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1341

1342
  """
1343
  __slots__ = [
1344
    "mem_total",
1345
    "mem_node",
1346
    "mem_hv",
1347
    "mem_inst",
1348
    "cpu_total",
1349
    "cpu_node",
1350
    ] + _TIMESTAMPS
1351

    
1352

    
1353
class NodeDiskState(ConfigObject):
1354
  """Disk state on a node.
1355

1356
  """
1357
  __slots__ = [
1358
    "total",
1359
    "reserved",
1360
    "overhead",
1361
    ] + _TIMESTAMPS
1362

    
1363

    
1364
class Node(TaggableObject):
1365
  """Config object representing a node.
1366

1367
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1368
  @ivar hv_state_static: Hypervisor state overriden by user
1369
  @ivar disk_state: Disk state (e.g. free space)
1370
  @ivar disk_state_static: Disk state overriden by user
1371

1372
  """
1373
  __slots__ = [
1374
    "name",
1375
    "primary_ip",
1376
    "secondary_ip",
1377
    "serial_no",
1378
    "master_candidate",
1379
    "offline",
1380
    "drained",
1381
    "group",
1382
    "master_capable",
1383
    "vm_capable",
1384
    "ndparams",
1385
    "powered",
1386
    "hv_state",
1387
    "hv_state_static",
1388
    "disk_state",
1389
    "disk_state_static",
1390
    ] + _TIMESTAMPS + _UUID
1391

    
1392
  def UpgradeConfig(self):
1393
    """Fill defaults for missing configuration values.
1394

1395
    """
1396
    # pylint: disable=E0203
1397
    # because these are "defined" via slots, not manually
1398
    if self.master_capable is None:
1399
      self.master_capable = True
1400

    
1401
    if self.vm_capable is None:
1402
      self.vm_capable = True
1403

    
1404
    if self.ndparams is None:
1405
      self.ndparams = {}
1406
    # And remove any global parameter
1407
    for key in constants.NDC_GLOBALS:
1408
      if key in self.ndparams:
1409
        logging.warning("Ignoring %s node parameter for node %s",
1410
                        key, self.name)
1411
        del self.ndparams[key]
1412

    
1413
    if self.powered is None:
1414
      self.powered = True
1415

    
1416
  def ToDict(self):
1417
    """Custom function for serializing.
1418

1419
    """
1420
    data = super(Node, self).ToDict()
1421

    
1422
    hv_state = data.get("hv_state", None)
1423
    if hv_state is not None:
1424
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1425

    
1426
    disk_state = data.get("disk_state", None)
1427
    if disk_state is not None:
1428
      data["disk_state"] = \
1429
        dict((key, outils.ContainerToDicts(value))
1430
             for (key, value) in disk_state.items())
1431

    
1432
    return data
1433

    
1434
  @classmethod
1435
  def FromDict(cls, val):
1436
    """Custom function for deserializing.
1437

1438
    """
1439
    obj = super(Node, cls).FromDict(val)
1440

    
1441
    if obj.hv_state is not None:
1442
      obj.hv_state = \
1443
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1444

    
1445
    if obj.disk_state is not None:
1446
      obj.disk_state = \
1447
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1448
             for (key, value) in obj.disk_state.items())
1449

    
1450
    return obj
1451

    
1452

    
1453
class NodeGroup(TaggableObject):
1454
  """Config object representing a node group."""
1455
  __slots__ = [
1456
    "name",
1457
    "members",
1458
    "ndparams",
1459
    "diskparams",
1460
    "ipolicy",
1461
    "serial_no",
1462
    "hv_state_static",
1463
    "disk_state_static",
1464
    "alloc_policy",
1465
    "networks",
1466
    ] + _TIMESTAMPS + _UUID
1467

    
1468
  def ToDict(self):
1469
    """Custom function for nodegroup.
1470

1471
    This discards the members object, which gets recalculated and is only kept
1472
    in memory.
1473

1474
    """
1475
    mydict = super(NodeGroup, self).ToDict()
1476
    del mydict["members"]
1477
    return mydict
1478

    
1479
  @classmethod
1480
  def FromDict(cls, val):
1481
    """Custom function for nodegroup.
1482

1483
    The members slot is initialized to an empty list, upon deserialization.
1484

1485
    """
1486
    obj = super(NodeGroup, cls).FromDict(val)
1487
    obj.members = []
1488
    return obj
1489

    
1490
  def UpgradeConfig(self):
1491
    """Fill defaults for missing configuration values.
1492

1493
    """
1494
    if self.ndparams is None:
1495
      self.ndparams = {}
1496

    
1497
    if self.serial_no is None:
1498
      self.serial_no = 1
1499

    
1500
    if self.alloc_policy is None:
1501
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1502

    
1503
    # We only update mtime, and not ctime, since we would not be able
1504
    # to provide a correct value for creation time.
1505
    if self.mtime is None:
1506
      self.mtime = time.time()
1507

    
1508
    if self.diskparams is None:
1509
      self.diskparams = {}
1510
    if self.ipolicy is None:
1511
      self.ipolicy = MakeEmptyIPolicy()
1512

    
1513
    if self.networks is None:
1514
      self.networks = {}
1515

    
1516
  def FillND(self, node):
1517
    """Return filled out ndparams for L{objects.Node}
1518

1519
    @type node: L{objects.Node}
1520
    @param node: A Node object to fill
1521
    @return a copy of the node's ndparams with defaults filled
1522

1523
    """
1524
    return self.SimpleFillND(node.ndparams)
1525

    
1526
  def SimpleFillND(self, ndparams):
1527
    """Fill a given ndparams dict with defaults.
1528

1529
    @type ndparams: dict
1530
    @param ndparams: the dict to fill
1531
    @rtype: dict
1532
    @return: a copy of the passed in ndparams with missing keys filled
1533
        from the node group defaults
1534

1535
    """
1536
    return FillDict(self.ndparams, ndparams)
1537

    
1538

    
1539
class Cluster(TaggableObject):
1540
  """Config object representing the cluster."""
1541
  __slots__ = [
1542
    "serial_no",
1543
    "rsahostkeypub",
1544
    "dsahostkeypub",
1545
    "highest_used_port",
1546
    "tcpudp_port_pool",
1547
    "mac_prefix",
1548
    "volume_group_name",
1549
    "reserved_lvs",
1550
    "drbd_usermode_helper",
1551
    "default_bridge",
1552
    "default_hypervisor",
1553
    "master_node",
1554
    "master_ip",
1555
    "master_netdev",
1556
    "master_netmask",
1557
    "use_external_mip_script",
1558
    "cluster_name",
1559
    "file_storage_dir",
1560
    "shared_file_storage_dir",
1561
    "enabled_hypervisors",
1562
    "hvparams",
1563
    "ipolicy",
1564
    "os_hvp",
1565
    "beparams",
1566
    "osparams",
1567
    "nicparams",
1568
    "ndparams",
1569
    "diskparams",
1570
    "candidate_pool_size",
1571
    "modify_etc_hosts",
1572
    "modify_ssh_setup",
1573
    "maintain_node_health",
1574
    "uid_pool",
1575
    "default_iallocator",
1576
    "hidden_os",
1577
    "blacklisted_os",
1578
    "primary_ip_family",
1579
    "prealloc_wipe_disks",
1580
    "hv_state_static",
1581
    "disk_state_static",
1582
    "enabled_disk_templates",
1583
    ] + _TIMESTAMPS + _UUID
1584

    
1585
  def UpgradeConfig(self):
1586
    """Fill defaults for missing configuration values.
1587

1588
    """
1589
    # pylint: disable=E0203
1590
    # because these are "defined" via slots, not manually
1591
    if self.hvparams is None:
1592
      self.hvparams = constants.HVC_DEFAULTS
1593
    else:
1594
      for hypervisor in self.hvparams:
1595
        self.hvparams[hypervisor] = FillDict(
1596
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1597

    
1598
    if self.os_hvp is None:
1599
      self.os_hvp = {}
1600

    
1601
    # osparams added before 2.2
1602
    if self.osparams is None:
1603
      self.osparams = {}
1604

    
1605
    self.ndparams = UpgradeNDParams(self.ndparams)
1606

    
1607
    self.beparams = UpgradeGroupedParams(self.beparams,
1608
                                         constants.BEC_DEFAULTS)
1609
    for beparams_group in self.beparams:
1610
      UpgradeBeParams(self.beparams[beparams_group])
1611

    
1612
    migrate_default_bridge = not self.nicparams
1613
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1614
                                          constants.NICC_DEFAULTS)
1615
    if migrate_default_bridge:
1616
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1617
        self.default_bridge
1618

    
1619
    if self.modify_etc_hosts is None:
1620
      self.modify_etc_hosts = True
1621

    
1622
    if self.modify_ssh_setup is None:
1623
      self.modify_ssh_setup = True
1624

    
1625
    # default_bridge is no longer used in 2.1. The slot is left there to
1626
    # support auto-upgrading. It can be removed once we decide to deprecate
1627
    # upgrading straight from 2.0.
1628
    if self.default_bridge is not None:
1629
      self.default_bridge = None
1630

    
1631
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1632
    # code can be removed once upgrading straight from 2.0 is deprecated.
1633
    if self.default_hypervisor is not None:
1634
      self.enabled_hypervisors = ([self.default_hypervisor] +
1635
                                  [hvname for hvname in self.enabled_hypervisors
1636
                                   if hvname != self.default_hypervisor])
1637
      self.default_hypervisor = None
1638

    
1639
    # maintain_node_health added after 2.1.1
1640
    if self.maintain_node_health is None:
1641
      self.maintain_node_health = False
1642

    
1643
    if self.uid_pool is None:
1644
      self.uid_pool = []
1645

    
1646
    if self.default_iallocator is None:
1647
      self.default_iallocator = ""
1648

    
1649
    # reserved_lvs added before 2.2
1650
    if self.reserved_lvs is None:
1651
      self.reserved_lvs = []
1652

    
1653
    # hidden and blacklisted operating systems added before 2.2.1
1654
    if self.hidden_os is None:
1655
      self.hidden_os = []
1656

    
1657
    if self.blacklisted_os is None:
1658
      self.blacklisted_os = []
1659

    
1660
    # primary_ip_family added before 2.3
1661
    if self.primary_ip_family is None:
1662
      self.primary_ip_family = AF_INET
1663

    
1664
    if self.master_netmask is None:
1665
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1666
      self.master_netmask = ipcls.iplen
1667

    
1668
    if self.prealloc_wipe_disks is None:
1669
      self.prealloc_wipe_disks = False
1670

    
1671
    # shared_file_storage_dir added before 2.5
1672
    if self.shared_file_storage_dir is None:
1673
      self.shared_file_storage_dir = ""
1674

    
1675
    if self.use_external_mip_script is None:
1676
      self.use_external_mip_script = False
1677

    
1678
    if self.diskparams:
1679
      self.diskparams = UpgradeDiskParams(self.diskparams)
1680
    else:
1681
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1682

    
1683
    # instance policy added before 2.6
1684
    if self.ipolicy is None:
1685
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1686
    else:
1687
      # we can either make sure to upgrade the ipolicy always, or only
1688
      # do it in some corner cases (e.g. missing keys); note that this
1689
      # will break any removal of keys from the ipolicy dict
1690
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1691
      if wrongkeys:
1692
        # These keys would be silently removed by FillIPolicy()
1693
        msg = ("Cluster instance policy contains spurious keys: %s" %
1694
               utils.CommaJoin(wrongkeys))
1695
        raise errors.ConfigurationError(msg)
1696
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1697

    
1698
  @property
1699
  def primary_hypervisor(self):
1700
    """The first hypervisor is the primary.
1701

1702
    Useful, for example, for L{Node}'s hv/disk state.
1703

1704
    """
1705
    return self.enabled_hypervisors[0]
1706

    
1707
  def ToDict(self):
1708
    """Custom function for cluster.
1709

1710
    """
1711
    mydict = super(Cluster, self).ToDict()
1712

    
1713
    if self.tcpudp_port_pool is None:
1714
      tcpudp_port_pool = []
1715
    else:
1716
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1717

    
1718
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1719

    
1720
    return mydict
1721

    
1722
  @classmethod
1723
  def FromDict(cls, val):
1724
    """Custom function for cluster.
1725

1726
    """
1727
    obj = super(Cluster, cls).FromDict(val)
1728

    
1729
    if obj.tcpudp_port_pool is None:
1730
      obj.tcpudp_port_pool = set()
1731
    elif not isinstance(obj.tcpudp_port_pool, set):
1732
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1733

    
1734
    return obj
1735

    
1736
  def SimpleFillDP(self, diskparams):
1737
    """Fill a given diskparams dict with cluster defaults.
1738

1739
    @param diskparams: The diskparams
1740
    @return: The defaults dict
1741

1742
    """
1743
    return FillDiskParams(self.diskparams, diskparams)
1744

    
1745
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1746
    """Get the default hypervisor parameters for the cluster.
1747

1748
    @param hypervisor: the hypervisor name
1749
    @param os_name: if specified, we'll also update the defaults for this OS
1750
    @param skip_keys: if passed, list of keys not to use
1751
    @return: the defaults dict
1752

1753
    """
1754
    if skip_keys is None:
1755
      skip_keys = []
1756

    
1757
    fill_stack = [self.hvparams.get(hypervisor, {})]
1758
    if os_name is not None:
1759
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1760
      fill_stack.append(os_hvp)
1761

    
1762
    ret_dict = {}
1763
    for o_dict in fill_stack:
1764
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1765

    
1766
    return ret_dict
1767

    
1768
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1769
    """Fill a given hvparams dict with cluster defaults.
1770

1771
    @type hv_name: string
1772
    @param hv_name: the hypervisor to use
1773
    @type os_name: string
1774
    @param os_name: the OS to use for overriding the hypervisor defaults
1775
    @type skip_globals: boolean
1776
    @param skip_globals: if True, the global hypervisor parameters will
1777
        not be filled
1778
    @rtype: dict
1779
    @return: a copy of the given hvparams with missing keys filled from
1780
        the cluster defaults
1781

1782
    """
1783
    if skip_globals:
1784
      skip_keys = constants.HVC_GLOBALS
1785
    else:
1786
      skip_keys = []
1787

    
1788
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1789
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1790

    
1791
  def FillHV(self, instance, skip_globals=False):
1792
    """Fill an instance's hvparams dict with cluster defaults.
1793

1794
    @type instance: L{objects.Instance}
1795
    @param instance: the instance parameter to fill
1796
    @type skip_globals: boolean
1797
    @param skip_globals: if True, the global hypervisor parameters will
1798
        not be filled
1799
    @rtype: dict
1800
    @return: a copy of the instance's hvparams with missing keys filled from
1801
        the cluster defaults
1802

1803
    """
1804
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1805
                             instance.hvparams, skip_globals)
1806

    
1807
  def SimpleFillBE(self, beparams):
1808
    """Fill a given beparams dict with cluster defaults.
1809

1810
    @type beparams: dict
1811
    @param beparams: the dict to fill
1812
    @rtype: dict
1813
    @return: a copy of the passed in beparams with missing keys filled
1814
        from the cluster defaults
1815

1816
    """
1817
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1818

    
1819
  def FillBE(self, instance):
1820
    """Fill an instance's beparams dict with cluster defaults.
1821

1822
    @type instance: L{objects.Instance}
1823
    @param instance: the instance parameter to fill
1824
    @rtype: dict
1825
    @return: a copy of the instance's beparams with missing keys filled from
1826
        the cluster defaults
1827

1828
    """
1829
    return self.SimpleFillBE(instance.beparams)
1830

    
1831
  def SimpleFillNIC(self, nicparams):
1832
    """Fill a given nicparams dict with cluster defaults.
1833

1834
    @type nicparams: dict
1835
    @param nicparams: the dict to fill
1836
    @rtype: dict
1837
    @return: a copy of the passed in nicparams with missing keys filled
1838
        from the cluster defaults
1839

1840
    """
1841
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1842

    
1843
  def SimpleFillOS(self, os_name, os_params):
1844
    """Fill an instance's osparams dict with cluster defaults.
1845

1846
    @type os_name: string
1847
    @param os_name: the OS name to use
1848
    @type os_params: dict
1849
    @param os_params: the dict to fill with default values
1850
    @rtype: dict
1851
    @return: a copy of the instance's osparams with missing keys filled from
1852
        the cluster defaults
1853

1854
    """
1855
    name_only = os_name.split("+", 1)[0]
1856
    # base OS
1857
    result = self.osparams.get(name_only, {})
1858
    # OS with variant
1859
    result = FillDict(result, self.osparams.get(os_name, {}))
1860
    # specified params
1861
    return FillDict(result, os_params)
1862

    
1863
  @staticmethod
1864
  def SimpleFillHvState(hv_state):
1865
    """Fill an hv_state sub dict with cluster defaults.
1866

1867
    """
1868
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1869

    
1870
  @staticmethod
1871
  def SimpleFillDiskState(disk_state):
1872
    """Fill an disk_state sub dict with cluster defaults.
1873

1874
    """
1875
    return FillDict(constants.DS_DEFAULTS, disk_state)
1876

    
1877
  def FillND(self, node, nodegroup):
1878
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1879

1880
    @type node: L{objects.Node}
1881
    @param node: A Node object to fill
1882
    @type nodegroup: L{objects.NodeGroup}
1883
    @param nodegroup: A Node object to fill
1884
    @return a copy of the node's ndparams with defaults filled
1885

1886
    """
1887
    return self.SimpleFillND(nodegroup.FillND(node))
1888

    
1889
  def FillNDGroup(self, nodegroup):
1890
    """Return filled out ndparams for just L{objects.NodeGroup}
1891

1892
    @type nodegroup: L{objects.NodeGroup}
1893
    @param nodegroup: A Node object to fill
1894
    @return a copy of the node group's ndparams with defaults filled
1895

1896
    """
1897
    return self.SimpleFillND(nodegroup.SimpleFillND({}))
1898

    
1899
  def SimpleFillND(self, ndparams):
1900
    """Fill a given ndparams dict with defaults.
1901

1902
    @type ndparams: dict
1903
    @param ndparams: the dict to fill
1904
    @rtype: dict
1905
    @return: a copy of the passed in ndparams with missing keys filled
1906
        from the cluster defaults
1907

1908
    """
1909
    return FillDict(self.ndparams, ndparams)
1910

    
1911
  def SimpleFillIPolicy(self, ipolicy):
1912
    """ Fill instance policy dict with defaults.
1913

1914
    @type ipolicy: dict
1915
    @param ipolicy: the dict to fill
1916
    @rtype: dict
1917
    @return: a copy of passed ipolicy with missing keys filled from
1918
      the cluster defaults
1919

1920
    """
1921
    return FillIPolicy(self.ipolicy, ipolicy)
1922

    
1923
  def IsDiskTemplateEnabled(self, disk_template):
1924
    """Checks if a particular disk template is enabled.
1925

1926
    """
1927
    return utils.storage.IsDiskTemplateEnabled(
1928
        disk_template, self.enabled_disk_templates)
1929

    
1930
  def IsFileStorageEnabled(self):
1931
    """Checks if file storage is enabled.
1932

1933
    """
1934
    return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
1935

    
1936
  def IsSharedFileStorageEnabled(self):
1937
    """Checks if shared file storage is enabled.
1938

1939
    """
1940
    return utils.storage.IsSharedFileStorageEnabled(
1941
        self.enabled_disk_templates)
1942

    
1943

    
1944
class BlockDevStatus(ConfigObject):
1945
  """Config object representing the status of a block device."""
1946
  __slots__ = [
1947
    "dev_path",
1948
    "major",
1949
    "minor",
1950
    "sync_percent",
1951
    "estimated_time",
1952
    "is_degraded",
1953
    "ldisk_status",
1954
    ]
1955

    
1956

    
1957
class ImportExportStatus(ConfigObject):
1958
  """Config object representing the status of an import or export."""
1959
  __slots__ = [
1960
    "recent_output",
1961
    "listen_port",
1962
    "connected",
1963
    "progress_mbytes",
1964
    "progress_throughput",
1965
    "progress_eta",
1966
    "progress_percent",
1967
    "exit_status",
1968
    "error_message",
1969
    ] + _TIMESTAMPS
1970

    
1971

    
1972
class ImportExportOptions(ConfigObject):
1973
  """Options for import/export daemon
1974

1975
  @ivar key_name: X509 key name (None for cluster certificate)
1976
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1977
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1978
  @ivar magic: Used to ensure the connection goes to the right disk
1979
  @ivar ipv6: Whether to use IPv6
1980
  @ivar connect_timeout: Number of seconds for establishing connection
1981

1982
  """
1983
  __slots__ = [
1984
    "key_name",
1985
    "ca_pem",
1986
    "compress",
1987
    "magic",
1988
    "ipv6",
1989
    "connect_timeout",
1990
    ]
1991

    
1992

    
1993
class ConfdRequest(ConfigObject):
1994
  """Object holding a confd request.
1995

1996
  @ivar protocol: confd protocol version
1997
  @ivar type: confd query type
1998
  @ivar query: query request
1999
  @ivar rsalt: requested reply salt
2000

2001
  """
2002
  __slots__ = [
2003
    "protocol",
2004
    "type",
2005
    "query",
2006
    "rsalt",
2007
    ]
2008

    
2009

    
2010
class ConfdReply(ConfigObject):
2011
  """Object holding a confd reply.
2012

2013
  @ivar protocol: confd protocol version
2014
  @ivar status: reply status code (ok, error)
2015
  @ivar answer: confd query reply
2016
  @ivar serial: configuration serial number
2017

2018
  """
2019
  __slots__ = [
2020
    "protocol",
2021
    "status",
2022
    "answer",
2023
    "serial",
2024
    ]
2025

    
2026

    
2027
class QueryFieldDefinition(ConfigObject):
2028
  """Object holding a query field definition.
2029

2030
  @ivar name: Field name
2031
  @ivar title: Human-readable title
2032
  @ivar kind: Field type
2033
  @ivar doc: Human-readable description
2034

2035
  """
2036
  __slots__ = [
2037
    "name",
2038
    "title",
2039
    "kind",
2040
    "doc",
2041
    ]
2042

    
2043

    
2044
class _QueryResponseBase(ConfigObject):
2045
  __slots__ = [
2046
    "fields",
2047
    ]
2048

    
2049
  def ToDict(self):
2050
    """Custom function for serializing.
2051

2052
    """
2053
    mydict = super(_QueryResponseBase, self).ToDict()
2054
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2055
    return mydict
2056

    
2057
  @classmethod
2058
  def FromDict(cls, val):
2059
    """Custom function for de-serializing.
2060

2061
    """
2062
    obj = super(_QueryResponseBase, cls).FromDict(val)
2063
    obj.fields = \
2064
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2065
    return obj
2066

    
2067

    
2068
class QueryResponse(_QueryResponseBase):
2069
  """Object holding the response to a query.
2070

2071
  @ivar fields: List of L{QueryFieldDefinition} objects
2072
  @ivar data: Requested data
2073

2074
  """
2075
  __slots__ = [
2076
    "data",
2077
    ]
2078

    
2079

    
2080
class QueryFieldsRequest(ConfigObject):
2081
  """Object holding a request for querying available fields.
2082

2083
  """
2084
  __slots__ = [
2085
    "what",
2086
    "fields",
2087
    ]
2088

    
2089

    
2090
class QueryFieldsResponse(_QueryResponseBase):
2091
  """Object holding the response to a query for fields.
2092

2093
  @ivar fields: List of L{QueryFieldDefinition} objects
2094

2095
  """
2096
  __slots__ = []
2097

    
2098

    
2099
class MigrationStatus(ConfigObject):
2100
  """Object holding the status of a migration.
2101

2102
  """
2103
  __slots__ = [
2104
    "status",
2105
    "transferred_ram",
2106
    "total_ram",
2107
    ]
2108

    
2109

    
2110
class InstanceConsole(ConfigObject):
2111
  """Object describing how to access the console of an instance.
2112

2113
  """
2114
  __slots__ = [
2115
    "instance",
2116
    "kind",
2117
    "message",
2118
    "host",
2119
    "port",
2120
    "user",
2121
    "command",
2122
    "display",
2123
    ]
2124

    
2125
  def Validate(self):
2126
    """Validates contents of this object.
2127

2128
    """
2129
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2130
    assert self.instance, "Missing instance name"
2131
    assert self.message or self.kind in [constants.CONS_SSH,
2132
                                         constants.CONS_SPICE,
2133
                                         constants.CONS_VNC]
2134
    assert self.host or self.kind == constants.CONS_MESSAGE
2135
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2136
                                      constants.CONS_SSH]
2137
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2138
                                      constants.CONS_SPICE,
2139
                                      constants.CONS_VNC]
2140
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2141
                                         constants.CONS_SPICE,
2142
                                         constants.CONS_VNC]
2143
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2144
                                         constants.CONS_SPICE,
2145
                                         constants.CONS_SSH]
2146
    return True
2147

    
2148

    
2149
class Network(TaggableObject):
2150
  """Object representing a network definition for ganeti.
2151

2152
  """
2153
  __slots__ = [
2154
    "name",
2155
    "serial_no",
2156
    "mac_prefix",
2157
    "network",
2158
    "network6",
2159
    "gateway",
2160
    "gateway6",
2161
    "reservations",
2162
    "ext_reservations",
2163
    ] + _TIMESTAMPS + _UUID
2164

    
2165
  def HooksDict(self, prefix=""):
2166
    """Export a dictionary used by hooks with a network's information.
2167

2168
    @type prefix: String
2169
    @param prefix: Prefix to prepend to the dict entries
2170

2171
    """
2172
    result = {
2173
      "%sNETWORK_NAME" % prefix: self.name,
2174
      "%sNETWORK_UUID" % prefix: self.uuid,
2175
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2176
    }
2177
    if self.network:
2178
      result["%sNETWORK_SUBNET" % prefix] = self.network
2179
    if self.gateway:
2180
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2181
    if self.network6:
2182
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2183
    if self.gateway6:
2184
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2185
    if self.mac_prefix:
2186
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2187

    
2188
    return result
2189

    
2190
  @classmethod
2191
  def FromDict(cls, val):
2192
    """Custom function for networks.
2193

2194
    Remove deprecated network_type and family.
2195

2196
    """
2197
    if "network_type" in val:
2198
      del val["network_type"]
2199
    if "family" in val:
2200
      del val["family"]
2201
    obj = super(Network, cls).FromDict(val)
2202
    return obj
2203

    
2204

    
2205
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2206
  """Simple wrapper over ConfigParse that allows serialization.
2207

2208
  This class is basically ConfigParser.SafeConfigParser with two
2209
  additional methods that allow it to serialize/unserialize to/from a
2210
  buffer.
2211

2212
  """
2213
  def Dumps(self):
2214
    """Dump this instance and return the string representation."""
2215
    buf = StringIO()
2216
    self.write(buf)
2217
    return buf.getvalue()
2218

    
2219
  @classmethod
2220
  def Loads(cls, data):
2221
    """Load data from a string."""
2222
    buf = StringIO(data)
2223
    cfp = cls()
2224
    cfp.readfp(buf)
2225
    return cfp
2226

    
2227

    
2228
class LvmPvInfo(ConfigObject):
2229
  """Information about an LVM physical volume (PV).
2230

2231
  @type name: string
2232
  @ivar name: name of the PV
2233
  @type vg_name: string
2234
  @ivar vg_name: name of the volume group containing the PV
2235
  @type size: float
2236
  @ivar size: size of the PV in MiB
2237
  @type free: float
2238
  @ivar free: free space in the PV, in MiB
2239
  @type attributes: string
2240
  @ivar attributes: PV attributes
2241
  @type lv_list: list of strings
2242
  @ivar lv_list: names of the LVs hosted on the PV
2243
  """
2244
  __slots__ = [
2245
    "name",
2246
    "vg_name",
2247
    "size",
2248
    "free",
2249
    "attributes",
2250
    "lv_list"
2251
    ]
2252

    
2253
  def IsEmpty(self):
2254
    """Is this PV empty?
2255

2256
    """
2257
    return self.size <= (self.free + 1)
2258

    
2259
  def IsAllocatable(self):
2260
    """Is this PV allocatable?
2261

2262
    """
2263
    return ("a" in self.attributes)