Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 653bc0f1

History | View | Annotate | Download (66.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      if k in ret_dict:
79
        del ret_dict[k]
80
  return ret_dict
81

    
82

    
83
def FillIPolicy(default_ipolicy, custom_ipolicy):
84
  """Fills an instance policy with defaults.
85

86
  """
87
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
88
  ret_dict = copy.deepcopy(custom_ipolicy)
89
  for key in default_ipolicy:
90
    if key not in ret_dict:
91
      ret_dict[key] = copy.deepcopy(default_ipolicy[key])
92
    elif key == constants.ISPECS_STD:
93
      ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
94
  return ret_dict
95

    
96

    
97
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
98
  """Fills the disk parameter defaults.
99

100
  @see: L{FillDict} for parameters and return value
101

102
  """
103
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
104

    
105
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
106
                             skip_keys=skip_keys))
107
              for dt in constants.DISK_TEMPLATES)
108

    
109

    
110
def UpgradeGroupedParams(target, defaults):
111
  """Update all groups for the target parameter.
112

113
  @type target: dict of dicts
114
  @param target: {group: {parameter: value}}
115
  @type defaults: dict
116
  @param defaults: default parameter values
117

118
  """
119
  if target is None:
120
    target = {constants.PP_DEFAULT: defaults}
121
  else:
122
    for group in target:
123
      target[group] = FillDict(defaults, target[group])
124
  return target
125

    
126

    
127
def UpgradeBeParams(target):
128
  """Update the be parameters dict to the new format.
129

130
  @type target: dict
131
  @param target: "be" parameters dict
132

133
  """
134
  if constants.BE_MEMORY in target:
135
    memory = target[constants.BE_MEMORY]
136
    target[constants.BE_MAXMEM] = memory
137
    target[constants.BE_MINMEM] = memory
138
    del target[constants.BE_MEMORY]
139

    
140

    
141
def UpgradeDiskParams(diskparams):
142
  """Upgrade the disk parameters.
143

144
  @type diskparams: dict
145
  @param diskparams: disk parameters to upgrade
146
  @rtype: dict
147
  @return: the upgraded disk parameters dict
148

149
  """
150
  if not diskparams:
151
    result = {}
152
  else:
153
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
154

    
155
  return result
156

    
157

    
158
def UpgradeNDParams(ndparams):
159
  """Upgrade ndparams structure.
160

161
  @type ndparams: dict
162
  @param ndparams: disk parameters to upgrade
163
  @rtype: dict
164
  @return: the upgraded node parameters dict
165

166
  """
167
  if ndparams is None:
168
    ndparams = {}
169

    
170
  if (constants.ND_OOB_PROGRAM in ndparams and
171
      ndparams[constants.ND_OOB_PROGRAM] is None):
172
    # will be reset by the line below
173
    del ndparams[constants.ND_OOB_PROGRAM]
174
  return FillDict(constants.NDC_DEFAULTS, ndparams)
175

    
176

    
177
def MakeEmptyIPolicy():
178
  """Create empty IPolicy dictionary.
179

180
  """
181
  return {}
182

    
183

    
184
class ConfigObject(outils.ValidatedSlots):
185
  """A generic config object.
186

187
  It has the following properties:
188

189
    - provides somewhat safe recursive unpickling and pickling for its classes
190
    - unset attributes which are defined in slots are always returned
191
      as None instead of raising an error
192

193
  Classes derived from this must always declare __slots__ (we use many
194
  config objects and the memory reduction is useful)
195

196
  """
197
  __slots__ = []
198

    
199
  def __getattr__(self, name):
200
    if name not in self.GetAllSlots():
201
      raise AttributeError("Invalid object attribute %s.%s" %
202
                           (type(self).__name__, name))
203
    return None
204

    
205
  def __setstate__(self, state):
206
    slots = self.GetAllSlots()
207
    for name in state:
208
      if name in slots:
209
        setattr(self, name, state[name])
210

    
211
  def Validate(self):
212
    """Validates the slots.
213

214
    """
215

    
216
  def ToDict(self):
217
    """Convert to a dict holding only standard python types.
218

219
    The generic routine just dumps all of this object's attributes in
220
    a dict. It does not work if the class has children who are
221
    ConfigObjects themselves (e.g. the nics list in an Instance), in
222
    which case the object should subclass the function in order to
223
    make sure all objects returned are only standard python types.
224

225
    """
226
    result = {}
227
    for name in self.GetAllSlots():
228
      value = getattr(self, name, None)
229
      if value is not None:
230
        result[name] = value
231
    return result
232

    
233
  __getstate__ = ToDict
234

    
235
  @classmethod
236
  def FromDict(cls, val):
237
    """Create an object from a dictionary.
238

239
    This generic routine takes a dict, instantiates a new instance of
240
    the given class, and sets attributes based on the dict content.
241

242
    As for `ToDict`, this does not work if the class has children
243
    who are ConfigObjects themselves (e.g. the nics list in an
244
    Instance), in which case the object should subclass the function
245
    and alter the objects.
246

247
    """
248
    if not isinstance(val, dict):
249
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
250
                                      " expected dict, got %s" % type(val))
251
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
252
    obj = cls(**val_str) # pylint: disable=W0142
253
    return obj
254

    
255
  def Copy(self):
256
    """Makes a deep copy of the current object and its children.
257

258
    """
259
    dict_form = self.ToDict()
260
    clone_obj = self.__class__.FromDict(dict_form)
261
    return clone_obj
262

    
263
  def __repr__(self):
264
    """Implement __repr__ for ConfigObjects."""
265
    return repr(self.ToDict())
266

    
267
  def __eq__(self, other):
268
    """Implement __eq__ for ConfigObjects."""
269
    return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
270

    
271
  def UpgradeConfig(self):
272
    """Fill defaults for missing configuration values.
273

274
    This method will be called at configuration load time, and its
275
    implementation will be object dependent.
276

277
    """
278
    pass
279

    
280

    
281
class TaggableObject(ConfigObject):
282
  """An generic class supporting tags.
283

284
  """
285
  __slots__ = ["tags"]
286
  VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$")
287

    
288
  @classmethod
289
  def ValidateTag(cls, tag):
290
    """Check if a tag is valid.
291

292
    If the tag is invalid, an errors.TagError will be raised. The
293
    function has no return value.
294

295
    """
296
    if not isinstance(tag, basestring):
297
      raise errors.TagError("Invalid tag type (not a string)")
298
    if len(tag) > constants.MAX_TAG_LEN:
299
      raise errors.TagError("Tag too long (>%d characters)" %
300
                            constants.MAX_TAG_LEN)
301
    if not tag:
302
      raise errors.TagError("Tags cannot be empty")
303
    if not cls.VALID_TAG_RE.match(tag):
304
      raise errors.TagError("Tag contains invalid characters")
305

    
306
  def GetTags(self):
307
    """Return the tags list.
308

309
    """
310
    tags = getattr(self, "tags", None)
311
    if tags is None:
312
      tags = self.tags = set()
313
    return tags
314

    
315
  def AddTag(self, tag):
316
    """Add a new tag.
317

318
    """
319
    self.ValidateTag(tag)
320
    tags = self.GetTags()
321
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
322
      raise errors.TagError("Too many tags")
323
    self.GetTags().add(tag)
324

    
325
  def RemoveTag(self, tag):
326
    """Remove a tag.
327

328
    """
329
    self.ValidateTag(tag)
330
    tags = self.GetTags()
331
    try:
332
      tags.remove(tag)
333
    except KeyError:
334
      raise errors.TagError("Tag not found")
335

    
336
  def ToDict(self):
337
    """Taggable-object-specific conversion to standard python types.
338

339
    This replaces the tags set with a list.
340

341
    """
342
    bo = super(TaggableObject, self).ToDict()
343

    
344
    tags = bo.get("tags", None)
345
    if isinstance(tags, set):
346
      bo["tags"] = list(tags)
347
    return bo
348

    
349
  @classmethod
350
  def FromDict(cls, val):
351
    """Custom function for instances.
352

353
    """
354
    obj = super(TaggableObject, cls).FromDict(val)
355
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
356
      obj.tags = set(obj.tags)
357
    return obj
358

    
359

    
360
class MasterNetworkParameters(ConfigObject):
361
  """Network configuration parameters for the master
362

363
  @ivar uuid: master nodes UUID
364
  @ivar ip: master IP
365
  @ivar netmask: master netmask
366
  @ivar netdev: master network device
367
  @ivar ip_family: master IP family
368

369
  """
370
  __slots__ = [
371
    "uuid",
372
    "ip",
373
    "netmask",
374
    "netdev",
375
    "ip_family",
376
    ]
377

    
378

    
379
class ConfigData(ConfigObject):
380
  """Top-level config object."""
381
  __slots__ = [
382
    "version",
383
    "cluster",
384
    "nodes",
385
    "nodegroups",
386
    "instances",
387
    "networks",
388
    "serial_no",
389
    ] + _TIMESTAMPS
390

    
391
  def ToDict(self):
392
    """Custom function for top-level config data.
393

394
    This just replaces the list of instances, nodes and the cluster
395
    with standard python types.
396

397
    """
398
    mydict = super(ConfigData, self).ToDict()
399
    mydict["cluster"] = mydict["cluster"].ToDict()
400
    for key in "nodes", "instances", "nodegroups", "networks":
401
      mydict[key] = outils.ContainerToDicts(mydict[key])
402

    
403
    return mydict
404

    
405
  @classmethod
406
  def FromDict(cls, val):
407
    """Custom function for top-level config data
408

409
    """
410
    obj = super(ConfigData, cls).FromDict(val)
411
    obj.cluster = Cluster.FromDict(obj.cluster)
412
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
413
    obj.instances = \
414
      outils.ContainerFromDicts(obj.instances, dict, Instance)
415
    obj.nodegroups = \
416
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
417
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
418
    return obj
419

    
420
  def HasAnyDiskOfType(self, dev_type):
421
    """Check if in there is at disk of the given type in the configuration.
422

423
    @type dev_type: L{constants.DTS_BLOCK}
424
    @param dev_type: the type to look for
425
    @rtype: boolean
426
    @return: boolean indicating if a disk of the given type was found or not
427

428
    """
429
    for instance in self.instances.values():
430
      for disk in instance.disks:
431
        if disk.IsBasedOnDiskType(dev_type):
432
          return True
433
    return False
434

    
435
  def UpgradeConfig(self):
436
    """Fill defaults for missing configuration values.
437

438
    """
439
    self.cluster.UpgradeConfig()
440
    for node in self.nodes.values():
441
      node.UpgradeConfig()
442
    for instance in self.instances.values():
443
      instance.UpgradeConfig()
444
    self._UpgradeEnabledDiskTemplates()
445
    if self.nodegroups is None:
446
      self.nodegroups = {}
447
    for nodegroup in self.nodegroups.values():
448
      nodegroup.UpgradeConfig()
449
      InstancePolicy.UpgradeDiskTemplates(
450
        nodegroup.ipolicy, self.cluster.enabled_disk_templates)
451
    if self.cluster.drbd_usermode_helper is None:
452
      if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
453
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
454
    if self.networks is None:
455
      self.networks = {}
456
    for network in self.networks.values():
457
      network.UpgradeConfig()
458

    
459
  def _UpgradeEnabledDiskTemplates(self):
460
    """Upgrade the cluster's enabled disk templates by inspecting the currently
461
       enabled and/or used disk templates.
462

463
    """
464
    if not self.cluster.enabled_disk_templates:
465
      template_set = \
466
        set([inst.disk_template for inst in self.instances.values()])
467
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
468
      if self.cluster.volume_group_name:
469
        template_set.add(constants.DT_DRBD8)
470
        template_set.add(constants.DT_PLAIN)
471
      # Set enabled_disk_templates to the inferred disk templates. Order them
472
      # according to a preference list that is based on Ganeti's history of
473
      # supported disk templates.
474
      self.cluster.enabled_disk_templates = []
475
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
476
        if preferred_template in template_set:
477
          self.cluster.enabled_disk_templates.append(preferred_template)
478
          template_set.remove(preferred_template)
479
      self.cluster.enabled_disk_templates.extend(list(template_set))
480
    InstancePolicy.UpgradeDiskTemplates(
481
      self.cluster.ipolicy, self.cluster.enabled_disk_templates)
482

    
483

    
484
class NIC(ConfigObject):
485
  """Config object representing a network card."""
486
  __slots__ = ["name", "mac", "ip", "network",
487
               "nicparams", "netinfo", "pci"] + _UUID
488

    
489
  @classmethod
490
  def CheckParameterSyntax(cls, nicparams):
491
    """Check the given parameters for validity.
492

493
    @type nicparams:  dict
494
    @param nicparams: dictionary with parameter names/value
495
    @raise errors.ConfigurationError: when a parameter is not valid
496

497
    """
498
    mode = nicparams[constants.NIC_MODE]
499
    if (mode not in constants.NIC_VALID_MODES and
500
        mode != constants.VALUE_AUTO):
501
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
502

    
503
    if (mode == constants.NIC_MODE_BRIDGED and
504
        not nicparams[constants.NIC_LINK]):
505
      raise errors.ConfigurationError("Missing bridged NIC link")
506

    
507

    
508
class Disk(ConfigObject):
509
  """Config object representing a block device."""
510
  __slots__ = (["name", "dev_type", "logical_id", "children", "iv_name",
511
                "size", "mode", "params", "spindles", "pci"] + _UUID +
512
               # dynamic_params is special. It depends on the node this instance
513
               # is sent to, and should not be persisted.
514
               ["dynamic_params"])
515

    
516
  def CreateOnSecondary(self):
517
    """Test if this device needs to be created on a secondary node."""
518
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
519

    
520
  def AssembleOnSecondary(self):
521
    """Test if this device needs to be assembled on a secondary node."""
522
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
523

    
524
  def OpenOnSecondary(self):
525
    """Test if this device needs to be opened on a secondary node."""
526
    return self.dev_type in (constants.DT_PLAIN,)
527

    
528
  def StaticDevPath(self):
529
    """Return the device path if this device type has a static one.
530

531
    Some devices (LVM for example) live always at the same /dev/ path,
532
    irrespective of their status. For such devices, we return this
533
    path, for others we return None.
534

535
    @warning: The path returned is not a normalized pathname; callers
536
        should check that it is a valid path.
537

538
    """
539
    if self.dev_type == constants.DT_PLAIN:
540
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
541
    elif self.dev_type == constants.DT_BLOCK:
542
      return self.logical_id[1]
543
    elif self.dev_type == constants.DT_RBD:
544
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
545
    return None
546

    
547
  def ChildrenNeeded(self):
548
    """Compute the needed number of children for activation.
549

550
    This method will return either -1 (all children) or a positive
551
    number denoting the minimum number of children needed for
552
    activation (only mirrored devices will usually return >=0).
553

554
    Currently, only DRBD8 supports diskless activation (therefore we
555
    return 0), for all other we keep the previous semantics and return
556
    -1.
557

558
    """
559
    if self.dev_type == constants.DT_DRBD8:
560
      return 0
561
    return -1
562

    
563
  def IsBasedOnDiskType(self, dev_type):
564
    """Check if the disk or its children are based on the given type.
565

566
    @type dev_type: L{constants.DTS_BLOCK}
567
    @param dev_type: the type to look for
568
    @rtype: boolean
569
    @return: boolean indicating if a device of the given type was found or not
570

571
    """
572
    if self.children:
573
      for child in self.children:
574
        if child.IsBasedOnDiskType(dev_type):
575
          return True
576
    return self.dev_type == dev_type
577

    
578
  def GetNodes(self, node_uuid):
579
    """This function returns the nodes this device lives on.
580

581
    Given the node on which the parent of the device lives on (or, in
582
    case of a top-level device, the primary node of the devices'
583
    instance), this function will return a list of nodes on which this
584
    devices needs to (or can) be assembled.
585

586
    """
587
    if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
588
                         constants.DT_BLOCK, constants.DT_RBD,
589
                         constants.DT_EXT, constants.DT_SHARED_FILE,
590
                         constants.DT_GLUSTER]:
591
      result = [node_uuid]
592
    elif self.dev_type in constants.DTS_DRBD:
593
      result = [self.logical_id[0], self.logical_id[1]]
594
      if node_uuid not in result:
595
        raise errors.ConfigurationError("DRBD device passed unknown node")
596
    else:
597
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
598
    return result
599

    
600
  def ComputeNodeTree(self, parent_node_uuid):
601
    """Compute the node/disk tree for this disk and its children.
602

603
    This method, given the node on which the parent disk lives, will
604
    return the list of all (node UUID, disk) pairs which describe the disk
605
    tree in the most compact way. For example, a drbd/lvm stack
606
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
607
    which represents all the top-level devices on the nodes.
608

609
    """
610
    my_nodes = self.GetNodes(parent_node_uuid)
611
    result = [(node, self) for node in my_nodes]
612
    if not self.children:
613
      # leaf device
614
      return result
615
    for node in my_nodes:
616
      for child in self.children:
617
        child_result = child.ComputeNodeTree(node)
618
        if len(child_result) == 1:
619
          # child (and all its descendants) is simple, doesn't split
620
          # over multiple hosts, so we don't need to describe it, our
621
          # own entry for this node describes it completely
622
          continue
623
        else:
624
          # check if child nodes differ from my nodes; note that
625
          # subdisk can differ from the child itself, and be instead
626
          # one of its descendants
627
          for subnode, subdisk in child_result:
628
            if subnode not in my_nodes:
629
              result.append((subnode, subdisk))
630
            # otherwise child is under our own node, so we ignore this
631
            # entry (but probably the other results in the list will
632
            # be different)
633
    return result
634

    
635
  def ComputeGrowth(self, amount):
636
    """Compute the per-VG growth requirements.
637

638
    This only works for VG-based disks.
639

640
    @type amount: integer
641
    @param amount: the desired increase in (user-visible) disk space
642
    @rtype: dict
643
    @return: a dictionary of volume-groups and the required size
644

645
    """
646
    if self.dev_type == constants.DT_PLAIN:
647
      return {self.logical_id[0]: amount}
648
    elif self.dev_type == constants.DT_DRBD8:
649
      if self.children:
650
        return self.children[0].ComputeGrowth(amount)
651
      else:
652
        return {}
653
    else:
654
      # Other disk types do not require VG space
655
      return {}
656

    
657
  def RecordGrow(self, amount):
658
    """Update the size of this disk after growth.
659

660
    This method recurses over the disks's children and updates their
661
    size correspondigly. The method needs to be kept in sync with the
662
    actual algorithms from bdev.
663

664
    """
665
    if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE,
666
                         constants.DT_RBD, constants.DT_EXT,
667
                         constants.DT_SHARED_FILE, constants.DT_GLUSTER):
668
      self.size += amount
669
    elif self.dev_type == constants.DT_DRBD8:
670
      if self.children:
671
        self.children[0].RecordGrow(amount)
672
      self.size += amount
673
    else:
674
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
675
                                   " disk type %s" % self.dev_type)
676

    
677
  def Update(self, size=None, mode=None, spindles=None):
678
    """Apply changes to size, spindles and mode.
679

680
    """
681
    if self.dev_type == constants.DT_DRBD8:
682
      if self.children:
683
        self.children[0].Update(size=size, mode=mode)
684
    else:
685
      assert not self.children
686

    
687
    if size is not None:
688
      self.size = size
689
    if mode is not None:
690
      self.mode = mode
691
    if spindles is not None:
692
      self.spindles = spindles
693

    
694
  def UnsetSize(self):
695
    """Sets recursively the size to zero for the disk and its children.
696

697
    """
698
    if self.children:
699
      for child in self.children:
700
        child.UnsetSize()
701
    self.size = 0
702

    
703
  def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
704
    """Updates the dynamic disk params for the given node.
705

706
    This is mainly used for drbd, which needs ip/port configuration.
707

708
    Arguments:
709
      - target_node_uuid: the node UUID we wish to configure for
710
      - nodes_ip: a mapping of node name to ip
711

712
    The target_node must exist in nodes_ip, and should be one of the
713
    nodes in the logical ID if this device is a DRBD device.
714

715
    """
716
    if self.children:
717
      for child in self.children:
718
        child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip)
719

    
720
    dyn_disk_params = {}
721
    if self.logical_id is not None and self.dev_type in constants.DTS_DRBD:
722
      pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id
723
      if target_node_uuid not in (pnode_uuid, snode_uuid):
724
        # disk object is being sent to neither the primary nor the secondary
725
        # node. reset the dynamic parameters, the target node is not
726
        # supposed to use them.
727
        self.dynamic_params = dyn_disk_params
728
        return
729

    
730
      pnode_ip = nodes_ip.get(pnode_uuid, None)
731
      snode_ip = nodes_ip.get(snode_uuid, None)
732
      if pnode_ip is None or snode_ip is None:
733
        raise errors.ConfigurationError("Can't find primary or secondary node"
734
                                        " for %s" % str(self))
735
      if pnode_uuid == target_node_uuid:
736
        dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip
737
        dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip
738
        dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor
739
        dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor
740
      else: # it must be secondary, we tested above
741
        dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip
742
        dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip
743
        dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor
744
        dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor
745

    
746
    self.dynamic_params = dyn_disk_params
747

    
748
  # pylint: disable=W0221
749
  def ToDict(self, include_dynamic_params=False):
750
    """Disk-specific conversion to standard python types.
751

752
    This replaces the children lists of objects with lists of
753
    standard python types.
754

755
    """
756
    bo = super(Disk, self).ToDict()
757
    if not include_dynamic_params and "dynamic_params" in bo:
758
      del bo["dynamic_params"]
759

    
760
    for attr in ("children",):
761
      alist = bo.get(attr, None)
762
      if alist:
763
        bo[attr] = outils.ContainerToDicts(alist)
764
    return bo
765

    
766
  @classmethod
767
  def FromDict(cls, val):
768
    """Custom function for Disks
769

770
    """
771
    obj = super(Disk, cls).FromDict(val)
772
    if obj.children:
773
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
774
    if obj.logical_id and isinstance(obj.logical_id, list):
775
      obj.logical_id = tuple(obj.logical_id)
776
    if obj.dev_type in constants.DTS_DRBD:
777
      # we need a tuple of length six here
778
      if len(obj.logical_id) < 6:
779
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
780
    return obj
781

    
782
  def __str__(self):
783
    """Custom str() formatter for disks.
784

785
    """
786
    if self.dev_type == constants.DT_PLAIN:
787
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
788
    elif self.dev_type in constants.DTS_DRBD:
789
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
790
      val = "<DRBD8("
791

    
792
      val += ("hosts=%s/%d-%s/%d, port=%s, " %
793
              (node_a, minor_a, node_b, minor_b, port))
794
      if self.children and self.children.count(None) == 0:
795
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
796
      else:
797
        val += "no local storage"
798
    else:
799
      val = ("<Disk(type=%s, logical_id=%s, children=%s" %
800
             (self.dev_type, self.logical_id, self.children))
801
    if self.iv_name is None:
802
      val += ", not visible"
803
    else:
804
      val += ", visible as /dev/%s" % self.iv_name
805
    if self.spindles is not None:
806
      val += ", spindles=%s" % self.spindles
807
    if isinstance(self.size, int):
808
      val += ", size=%dm)>" % self.size
809
    else:
810
      val += ", size='%s')>" % (self.size,)
811
    return val
812

    
813
  def Verify(self):
814
    """Checks that this disk is correctly configured.
815

816
    """
817
    all_errors = []
818
    if self.mode not in constants.DISK_ACCESS_SET:
819
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
820
    return all_errors
821

    
822
  def UpgradeConfig(self):
823
    """Fill defaults for missing configuration values.
824

825
    """
826
    if self.children:
827
      for child in self.children:
828
        child.UpgradeConfig()
829

    
830
    # FIXME: Make this configurable in Ganeti 2.7
831
    # Params should be an empty dict that gets filled any time needed
832
    # In case of ext template we allow arbitrary params that should not
833
    # be overrided during a config reload/upgrade.
834
    if not self.params or not isinstance(self.params, dict):
835
      self.params = {}
836

    
837
    # add here config upgrade for this disk
838

    
839
    # map of legacy device types (mapping differing LD constants to new
840
    # DT constants)
841
    LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
842
    if self.dev_type in LEG_DEV_TYPE_MAP:
843
      self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
844

    
845
  @staticmethod
846
  def ComputeLDParams(disk_template, disk_params):
847
    """Computes Logical Disk parameters from Disk Template parameters.
848

849
    @type disk_template: string
850
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
851
    @type disk_params: dict
852
    @param disk_params: disk template parameters;
853
                        dict(template_name -> parameters
854
    @rtype: list(dict)
855
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
856
      contains the LD parameters of the node. The tree is flattened in-order.
857

858
    """
859
    if disk_template not in constants.DISK_TEMPLATES:
860
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
861

    
862
    assert disk_template in disk_params
863

    
864
    result = list()
865
    dt_params = disk_params[disk_template]
866

    
867
    if disk_template == constants.DT_DRBD8:
868
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], {
869
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
870
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
871
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
872
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
873
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
874
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
875
        constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
876
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
877
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
878
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
879
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
880
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
881
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
882
        }))
883

    
884
      # data LV
885
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
886
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
887
        }))
888

    
889
      # metadata LV
890
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
891
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
892
        }))
893

    
894
    else:
895
      defaults = constants.DISK_LD_DEFAULTS[disk_template]
896
      values = {}
897
      for field in defaults:
898
        values[field] = dt_params[field]
899
      result.append(FillDict(defaults, values))
900

    
901
    return result
902

    
903

    
904
class InstancePolicy(ConfigObject):
905
  """Config object representing instance policy limits dictionary.
906

907
  Note that this object is not actually used in the config, it's just
908
  used as a placeholder for a few functions.
909

910
  """
911
  @classmethod
912
  def UpgradeDiskTemplates(cls, ipolicy, enabled_disk_templates):
913
    """Upgrades the ipolicy configuration."""
914
    if constants.IPOLICY_DTS in ipolicy:
915
      if not set(ipolicy[constants.IPOLICY_DTS]).issubset(
916
        set(enabled_disk_templates)):
917
        ipolicy[constants.IPOLICY_DTS] = list(
918
          set(ipolicy[constants.IPOLICY_DTS]) & set(enabled_disk_templates))
919

    
920
  @classmethod
921
  def CheckParameterSyntax(cls, ipolicy, check_std):
922
    """ Check the instance policy for validity.
923

924
    @type ipolicy: dict
925
    @param ipolicy: dictionary with min/max/std specs and policies
926
    @type check_std: bool
927
    @param check_std: Whether to check std value or just assume compliance
928
    @raise errors.ConfigurationError: when the policy is not legal
929

930
    """
931
    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
932
    if constants.IPOLICY_DTS in ipolicy:
933
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
934
    for key in constants.IPOLICY_PARAMETERS:
935
      if key in ipolicy:
936
        InstancePolicy.CheckParameter(key, ipolicy[key])
937
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
938
    if wrong_keys:
939
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
940
                                      utils.CommaJoin(wrong_keys))
941

    
942
  @classmethod
943
  def _CheckIncompleteSpec(cls, spec, keyname):
944
    missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
945
    if missing_params:
946
      msg = ("Missing instance specs parameters for %s: %s" %
947
             (keyname, utils.CommaJoin(missing_params)))
948
      raise errors.ConfigurationError(msg)
949

    
950
  @classmethod
951
  def CheckISpecSyntax(cls, ipolicy, check_std):
952
    """Check the instance policy specs for validity.
953

954
    @type ipolicy: dict
955
    @param ipolicy: dictionary with min/max/std specs
956
    @type check_std: bool
957
    @param check_std: Whether to check std value or just assume compliance
958
    @raise errors.ConfigurationError: when specs are not valid
959

960
    """
961
    if constants.ISPECS_MINMAX not in ipolicy:
962
      # Nothing to check
963
      return
964

    
965
    if check_std and constants.ISPECS_STD not in ipolicy:
966
      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
967
      raise errors.ConfigurationError(msg)
968
    stdspec = ipolicy.get(constants.ISPECS_STD)
969
    if check_std:
970
      InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
971

    
972
    if not ipolicy[constants.ISPECS_MINMAX]:
973
      raise errors.ConfigurationError("Empty minmax specifications")
974
    std_is_good = False
975
    for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
976
      missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
977
      if missing:
978
        msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
979
        raise errors.ConfigurationError(msg)
980
      for (key, spec) in minmaxspecs.items():
981
        InstancePolicy._CheckIncompleteSpec(spec, key)
982

    
983
      spec_std_ok = True
984
      for param in constants.ISPECS_PARAMETERS:
985
        par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
986
                                                           param, check_std)
987
        spec_std_ok = spec_std_ok and par_std_ok
988
      std_is_good = std_is_good or spec_std_ok
989
    if not std_is_good:
990
      raise errors.ConfigurationError("Invalid std specifications")
991

    
992
  @classmethod
993
  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
994
    """Check the instance policy specs for validity on a given key.
995

996
    We check if the instance specs makes sense for a given key, that is
997
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
998

999
    @type minmaxspecs: dict
1000
    @param minmaxspecs: dictionary with min and max instance spec
1001
    @type stdspec: dict
1002
    @param stdspec: dictionary with standard instance spec
1003
    @type name: string
1004
    @param name: what are the limits for
1005
    @type check_std: bool
1006
    @param check_std: Whether to check std value or just assume compliance
1007
    @rtype: bool
1008
    @return: C{True} when specs are valid, C{False} when standard spec for the
1009
        given name is not valid
1010
    @raise errors.ConfigurationError: when min/max specs for the given name
1011
        are not valid
1012

1013
    """
1014
    minspec = minmaxspecs[constants.ISPECS_MIN]
1015
    maxspec = minmaxspecs[constants.ISPECS_MAX]
1016
    min_v = minspec[name]
1017
    max_v = maxspec[name]
1018

    
1019
    if min_v > max_v:
1020
      err = ("Invalid specification of min/max values for %s: %s/%s" %
1021
             (name, min_v, max_v))
1022
      raise errors.ConfigurationError(err)
1023
    elif check_std:
1024
      std_v = stdspec.get(name, min_v)
1025
      return std_v >= min_v and std_v <= max_v
1026
    else:
1027
      return True
1028

    
1029
  @classmethod
1030
  def CheckDiskTemplates(cls, disk_templates):
1031
    """Checks the disk templates for validity.
1032

1033
    """
1034
    if not disk_templates:
1035
      raise errors.ConfigurationError("Instance policy must contain" +
1036
                                      " at least one disk template")
1037
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1038
    if wrong:
1039
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1040
                                      utils.CommaJoin(wrong))
1041

    
1042
  @classmethod
1043
  def CheckParameter(cls, key, value):
1044
    """Checks a parameter.
1045

1046
    Currently we expect all parameters to be float values.
1047

1048
    """
1049
    try:
1050
      float(value)
1051
    except (TypeError, ValueError), err:
1052
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1053
                                      " '%s', error: %s" % (key, value, err))
1054

    
1055

    
1056
class Instance(TaggableObject):
1057
  """Config object representing an instance."""
1058
  __slots__ = [
1059
    "name",
1060
    "primary_node",
1061
    "os",
1062
    "hypervisor",
1063
    "hvparams",
1064
    "beparams",
1065
    "osparams",
1066
    "admin_state",
1067
    "nics",
1068
    "disks",
1069
    "disk_template",
1070
    "disks_active",
1071
    "network_port",
1072
    "serial_no",
1073
    ] + _TIMESTAMPS + _UUID
1074

    
1075
  def _ComputeSecondaryNodes(self):
1076
    """Compute the list of secondary nodes.
1077

1078
    This is a simple wrapper over _ComputeAllNodes.
1079

1080
    """
1081
    all_nodes = set(self._ComputeAllNodes())
1082
    all_nodes.discard(self.primary_node)
1083
    return tuple(all_nodes)
1084

    
1085
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1086
                             "List of names of secondary nodes")
1087

    
1088
  def _ComputeAllNodes(self):
1089
    """Compute the list of all nodes.
1090

1091
    Since the data is already there (in the drbd disks), keeping it as
1092
    a separate normal attribute is redundant and if not properly
1093
    synchronised can cause problems. Thus it's better to compute it
1094
    dynamically.
1095

1096
    """
1097
    def _Helper(nodes, device):
1098
      """Recursively computes nodes given a top device."""
1099
      if device.dev_type in constants.DTS_DRBD:
1100
        nodea, nodeb = device.logical_id[:2]
1101
        nodes.add(nodea)
1102
        nodes.add(nodeb)
1103
      if device.children:
1104
        for child in device.children:
1105
          _Helper(nodes, child)
1106

    
1107
    all_nodes = set()
1108
    for device in self.disks:
1109
      _Helper(all_nodes, device)
1110
    # ensure that the primary node is always the first
1111
    all_nodes.discard(self.primary_node)
1112
    return (self.primary_node, ) + tuple(all_nodes)
1113

    
1114
  all_nodes = property(_ComputeAllNodes, None, None,
1115
                       "List of names of all the nodes of the instance")
1116

    
1117
  def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1118
    """Provide a mapping of nodes to LVs this instance owns.
1119

1120
    This function figures out what logical volumes should belong on
1121
    which nodes, recursing through a device tree.
1122

1123
    @type lvmap: dict
1124
    @param lvmap: optional dictionary to receive the
1125
        'node' : ['lv', ...] data.
1126
    @type devs: list of L{Disk}
1127
    @param devs: disks to get the LV name for. If None, all disk of this
1128
        instance are used.
1129
    @type node_uuid: string
1130
    @param node_uuid: UUID of the node to get the LV names for. If None, the
1131
        primary node of this instance is used.
1132
    @return: None if lvmap arg is given, otherwise, a dictionary of
1133
        the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
1134
        volumeN is of the form "vg_name/lv_name", compatible with
1135
        GetVolumeList()
1136

1137
    """
1138
    if node_uuid is None:
1139
      node_uuid = self.primary_node
1140

    
1141
    if lvmap is None:
1142
      lvmap = {
1143
        node_uuid: [],
1144
        }
1145
      ret = lvmap
1146
    else:
1147
      if not node_uuid in lvmap:
1148
        lvmap[node_uuid] = []
1149
      ret = None
1150

    
1151
    if not devs:
1152
      devs = self.disks
1153

    
1154
    for dev in devs:
1155
      if dev.dev_type == constants.DT_PLAIN:
1156
        lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1157

    
1158
      elif dev.dev_type in constants.DTS_DRBD:
1159
        if dev.children:
1160
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1161
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1162

    
1163
      elif dev.children:
1164
        self.MapLVsByNode(lvmap, dev.children, node_uuid)
1165

    
1166
    return ret
1167

    
1168
  def FindDisk(self, idx):
1169
    """Find a disk given having a specified index.
1170

1171
    This is just a wrapper that does validation of the index.
1172

1173
    @type idx: int
1174
    @param idx: the disk index
1175
    @rtype: L{Disk}
1176
    @return: the corresponding disk
1177
    @raise errors.OpPrereqError: when the given index is not valid
1178

1179
    """
1180
    try:
1181
      idx = int(idx)
1182
      return self.disks[idx]
1183
    except (TypeError, ValueError), err:
1184
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1185
                                 errors.ECODE_INVAL)
1186
    except IndexError:
1187
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1188
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1189
                                 errors.ECODE_INVAL)
1190

    
1191
  def ToDict(self):
1192
    """Instance-specific conversion to standard python types.
1193

1194
    This replaces the children lists of objects with lists of standard
1195
    python types.
1196

1197
    """
1198
    bo = super(Instance, self).ToDict()
1199

    
1200
    for attr in "nics", "disks":
1201
      alist = bo.get(attr, None)
1202
      if alist:
1203
        nlist = outils.ContainerToDicts(alist)
1204
      else:
1205
        nlist = []
1206
      bo[attr] = nlist
1207
    return bo
1208

    
1209
  @classmethod
1210
  def FromDict(cls, val):
1211
    """Custom function for instances.
1212

1213
    """
1214
    if "admin_state" not in val:
1215
      if val.get("admin_up", False):
1216
        val["admin_state"] = constants.ADMINST_UP
1217
      else:
1218
        val["admin_state"] = constants.ADMINST_DOWN
1219
    if "admin_up" in val:
1220
      del val["admin_up"]
1221
    obj = super(Instance, cls).FromDict(val)
1222
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1223
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1224
    return obj
1225

    
1226
  def UpgradeConfig(self):
1227
    """Fill defaults for missing configuration values.
1228

1229
    """
1230
    for nic in self.nics:
1231
      nic.UpgradeConfig()
1232
    for disk in self.disks:
1233
      disk.UpgradeConfig()
1234
    if self.hvparams:
1235
      for key in constants.HVC_GLOBALS:
1236
        try:
1237
          del self.hvparams[key]
1238
        except KeyError:
1239
          pass
1240
    if self.osparams is None:
1241
      self.osparams = {}
1242
    UpgradeBeParams(self.beparams)
1243
    if self.disks_active is None:
1244
      self.disks_active = self.admin_state == constants.ADMINST_UP
1245

    
1246

    
1247
class OS(ConfigObject):
1248
  """Config object representing an operating system.
1249

1250
  @type supported_parameters: list
1251
  @ivar supported_parameters: a list of tuples, name and description,
1252
      containing the supported parameters by this OS
1253

1254
  @type VARIANT_DELIM: string
1255
  @cvar VARIANT_DELIM: the variant delimiter
1256

1257
  """
1258
  __slots__ = [
1259
    "name",
1260
    "path",
1261
    "api_versions",
1262
    "create_script",
1263
    "export_script",
1264
    "import_script",
1265
    "rename_script",
1266
    "verify_script",
1267
    "supported_variants",
1268
    "supported_parameters",
1269
    ]
1270

    
1271
  VARIANT_DELIM = "+"
1272

    
1273
  @classmethod
1274
  def SplitNameVariant(cls, name):
1275
    """Splits the name into the proper name and variant.
1276

1277
    @param name: the OS (unprocessed) name
1278
    @rtype: list
1279
    @return: a list of two elements; if the original name didn't
1280
        contain a variant, it's returned as an empty string
1281

1282
    """
1283
    nv = name.split(cls.VARIANT_DELIM, 1)
1284
    if len(nv) == 1:
1285
      nv.append("")
1286
    return nv
1287

    
1288
  @classmethod
1289
  def GetName(cls, name):
1290
    """Returns the proper name of the os (without the variant).
1291

1292
    @param name: the OS (unprocessed) name
1293

1294
    """
1295
    return cls.SplitNameVariant(name)[0]
1296

    
1297
  @classmethod
1298
  def GetVariant(cls, name):
1299
    """Returns the variant the os (without the base name).
1300

1301
    @param name: the OS (unprocessed) name
1302

1303
    """
1304
    return cls.SplitNameVariant(name)[1]
1305

    
1306

    
1307
class ExtStorage(ConfigObject):
1308
  """Config object representing an External Storage Provider.
1309

1310
  """
1311
  __slots__ = [
1312
    "name",
1313
    "path",
1314
    "create_script",
1315
    "remove_script",
1316
    "grow_script",
1317
    "attach_script",
1318
    "detach_script",
1319
    "setinfo_script",
1320
    "verify_script",
1321
    "supported_parameters",
1322
    ]
1323

    
1324

    
1325
class NodeHvState(ConfigObject):
1326
  """Hypvervisor state on a node.
1327

1328
  @ivar mem_total: Total amount of memory
1329
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1330
    available)
1331
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1332
    rounding
1333
  @ivar mem_inst: Memory used by instances living on node
1334
  @ivar cpu_total: Total node CPU core count
1335
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1336

1337
  """
1338
  __slots__ = [
1339
    "mem_total",
1340
    "mem_node",
1341
    "mem_hv",
1342
    "mem_inst",
1343
    "cpu_total",
1344
    "cpu_node",
1345
    ] + _TIMESTAMPS
1346

    
1347

    
1348
class NodeDiskState(ConfigObject):
1349
  """Disk state on a node.
1350

1351
  """
1352
  __slots__ = [
1353
    "total",
1354
    "reserved",
1355
    "overhead",
1356
    ] + _TIMESTAMPS
1357

    
1358

    
1359
class Node(TaggableObject):
1360
  """Config object representing a node.
1361

1362
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1363
  @ivar hv_state_static: Hypervisor state overriden by user
1364
  @ivar disk_state: Disk state (e.g. free space)
1365
  @ivar disk_state_static: Disk state overriden by user
1366

1367
  """
1368
  __slots__ = [
1369
    "name",
1370
    "primary_ip",
1371
    "secondary_ip",
1372
    "serial_no",
1373
    "master_candidate",
1374
    "offline",
1375
    "drained",
1376
    "group",
1377
    "master_capable",
1378
    "vm_capable",
1379
    "ndparams",
1380
    "powered",
1381
    "hv_state",
1382
    "hv_state_static",
1383
    "disk_state",
1384
    "disk_state_static",
1385
    ] + _TIMESTAMPS + _UUID
1386

    
1387
  def UpgradeConfig(self):
1388
    """Fill defaults for missing configuration values.
1389

1390
    """
1391
    # pylint: disable=E0203
1392
    # because these are "defined" via slots, not manually
1393
    if self.master_capable is None:
1394
      self.master_capable = True
1395

    
1396
    if self.vm_capable is None:
1397
      self.vm_capable = True
1398

    
1399
    if self.ndparams is None:
1400
      self.ndparams = {}
1401
    # And remove any global parameter
1402
    for key in constants.NDC_GLOBALS:
1403
      if key in self.ndparams:
1404
        logging.warning("Ignoring %s node parameter for node %s",
1405
                        key, self.name)
1406
        del self.ndparams[key]
1407

    
1408
    if self.powered is None:
1409
      self.powered = True
1410

    
1411
  def ToDict(self):
1412
    """Custom function for serializing.
1413

1414
    """
1415
    data = super(Node, self).ToDict()
1416

    
1417
    hv_state = data.get("hv_state", None)
1418
    if hv_state is not None:
1419
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1420

    
1421
    disk_state = data.get("disk_state", None)
1422
    if disk_state is not None:
1423
      data["disk_state"] = \
1424
        dict((key, outils.ContainerToDicts(value))
1425
             for (key, value) in disk_state.items())
1426

    
1427
    return data
1428

    
1429
  @classmethod
1430
  def FromDict(cls, val):
1431
    """Custom function for deserializing.
1432

1433
    """
1434
    obj = super(Node, cls).FromDict(val)
1435

    
1436
    if obj.hv_state is not None:
1437
      obj.hv_state = \
1438
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1439

    
1440
    if obj.disk_state is not None:
1441
      obj.disk_state = \
1442
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1443
             for (key, value) in obj.disk_state.items())
1444

    
1445
    return obj
1446

    
1447

    
1448
class NodeGroup(TaggableObject):
1449
  """Config object representing a node group."""
1450
  __slots__ = [
1451
    "name",
1452
    "members",
1453
    "ndparams",
1454
    "diskparams",
1455
    "ipolicy",
1456
    "serial_no",
1457
    "hv_state_static",
1458
    "disk_state_static",
1459
    "alloc_policy",
1460
    "networks",
1461
    ] + _TIMESTAMPS + _UUID
1462

    
1463
  def ToDict(self):
1464
    """Custom function for nodegroup.
1465

1466
    This discards the members object, which gets recalculated and is only kept
1467
    in memory.
1468

1469
    """
1470
    mydict = super(NodeGroup, self).ToDict()
1471
    del mydict["members"]
1472
    return mydict
1473

    
1474
  @classmethod
1475
  def FromDict(cls, val):
1476
    """Custom function for nodegroup.
1477

1478
    The members slot is initialized to an empty list, upon deserialization.
1479

1480
    """
1481
    obj = super(NodeGroup, cls).FromDict(val)
1482
    obj.members = []
1483
    return obj
1484

    
1485
  def UpgradeConfig(self):
1486
    """Fill defaults for missing configuration values.
1487

1488
    """
1489
    if self.ndparams is None:
1490
      self.ndparams = {}
1491

    
1492
    if self.serial_no is None:
1493
      self.serial_no = 1
1494

    
1495
    if self.alloc_policy is None:
1496
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1497

    
1498
    # We only update mtime, and not ctime, since we would not be able
1499
    # to provide a correct value for creation time.
1500
    if self.mtime is None:
1501
      self.mtime = time.time()
1502

    
1503
    if self.diskparams is None:
1504
      self.diskparams = {}
1505
    if self.ipolicy is None:
1506
      self.ipolicy = MakeEmptyIPolicy()
1507

    
1508
    if self.networks is None:
1509
      self.networks = {}
1510

    
1511
  def FillND(self, node):
1512
    """Return filled out ndparams for L{objects.Node}
1513

1514
    @type node: L{objects.Node}
1515
    @param node: A Node object to fill
1516
    @return a copy of the node's ndparams with defaults filled
1517

1518
    """
1519
    return self.SimpleFillND(node.ndparams)
1520

    
1521
  def SimpleFillND(self, ndparams):
1522
    """Fill a given ndparams dict with defaults.
1523

1524
    @type ndparams: dict
1525
    @param ndparams: the dict to fill
1526
    @rtype: dict
1527
    @return: a copy of the passed in ndparams with missing keys filled
1528
        from the node group defaults
1529

1530
    """
1531
    return FillDict(self.ndparams, ndparams)
1532

    
1533

    
1534
class Cluster(TaggableObject):
1535
  """Config object representing the cluster."""
1536
  __slots__ = [
1537
    "serial_no",
1538
    "rsahostkeypub",
1539
    "dsahostkeypub",
1540
    "highest_used_port",
1541
    "tcpudp_port_pool",
1542
    "mac_prefix",
1543
    "volume_group_name",
1544
    "reserved_lvs",
1545
    "drbd_usermode_helper",
1546
    "default_bridge",
1547
    "default_hypervisor",
1548
    "master_node",
1549
    "master_ip",
1550
    "master_netdev",
1551
    "master_netmask",
1552
    "use_external_mip_script",
1553
    "cluster_name",
1554
    "file_storage_dir",
1555
    "shared_file_storage_dir",
1556
    "gluster_storage_dir",
1557
    "enabled_hypervisors",
1558
    "hvparams",
1559
    "ipolicy",
1560
    "os_hvp",
1561
    "beparams",
1562
    "osparams",
1563
    "nicparams",
1564
    "ndparams",
1565
    "diskparams",
1566
    "candidate_pool_size",
1567
    "modify_etc_hosts",
1568
    "modify_ssh_setup",
1569
    "maintain_node_health",
1570
    "uid_pool",
1571
    "default_iallocator",
1572
    "default_iallocator_params",
1573
    "hidden_os",
1574
    "blacklisted_os",
1575
    "primary_ip_family",
1576
    "prealloc_wipe_disks",
1577
    "hv_state_static",
1578
    "disk_state_static",
1579
    "enabled_disk_templates",
1580
    "candidate_certs",
1581
    "max_running_jobs",
1582
    ] + _TIMESTAMPS + _UUID
1583

    
1584
  def UpgradeConfig(self):
1585
    """Fill defaults for missing configuration values.
1586

1587
    """
1588
    # pylint: disable=E0203
1589
    # because these are "defined" via slots, not manually
1590
    if self.hvparams is None:
1591
      self.hvparams = constants.HVC_DEFAULTS
1592
    else:
1593
      for hypervisor in constants.HYPER_TYPES:
1594
        try:
1595
          existing_params = self.hvparams[hypervisor]
1596
        except KeyError:
1597
          existing_params = {}
1598
        self.hvparams[hypervisor] = FillDict(
1599
            constants.HVC_DEFAULTS[hypervisor], existing_params)
1600

    
1601
    if self.os_hvp is None:
1602
      self.os_hvp = {}
1603

    
1604
    # osparams added before 2.2
1605
    if self.osparams is None:
1606
      self.osparams = {}
1607

    
1608
    self.ndparams = UpgradeNDParams(self.ndparams)
1609

    
1610
    self.beparams = UpgradeGroupedParams(self.beparams,
1611
                                         constants.BEC_DEFAULTS)
1612
    for beparams_group in self.beparams:
1613
      UpgradeBeParams(self.beparams[beparams_group])
1614

    
1615
    migrate_default_bridge = not self.nicparams
1616
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1617
                                          constants.NICC_DEFAULTS)
1618
    if migrate_default_bridge:
1619
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1620
        self.default_bridge
1621

    
1622
    if self.modify_etc_hosts is None:
1623
      self.modify_etc_hosts = True
1624

    
1625
    if self.modify_ssh_setup is None:
1626
      self.modify_ssh_setup = True
1627

    
1628
    # default_bridge is no longer used in 2.1. The slot is left there to
1629
    # support auto-upgrading. It can be removed once we decide to deprecate
1630
    # upgrading straight from 2.0.
1631
    if self.default_bridge is not None:
1632
      self.default_bridge = None
1633

    
1634
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1635
    # code can be removed once upgrading straight from 2.0 is deprecated.
1636
    if self.default_hypervisor is not None:
1637
      self.enabled_hypervisors = ([self.default_hypervisor] +
1638
                                  [hvname for hvname in self.enabled_hypervisors
1639
                                   if hvname != self.default_hypervisor])
1640
      self.default_hypervisor = None
1641

    
1642
    # maintain_node_health added after 2.1.1
1643
    if self.maintain_node_health is None:
1644
      self.maintain_node_health = False
1645

    
1646
    if self.uid_pool is None:
1647
      self.uid_pool = []
1648

    
1649
    if self.default_iallocator is None:
1650
      self.default_iallocator = ""
1651

    
1652
    if self.default_iallocator_params is None:
1653
      self.default_iallocator_params = {}
1654

    
1655
    # reserved_lvs added before 2.2
1656
    if self.reserved_lvs is None:
1657
      self.reserved_lvs = []
1658

    
1659
    # hidden and blacklisted operating systems added before 2.2.1
1660
    if self.hidden_os is None:
1661
      self.hidden_os = []
1662

    
1663
    if self.blacklisted_os is None:
1664
      self.blacklisted_os = []
1665

    
1666
    # primary_ip_family added before 2.3
1667
    if self.primary_ip_family is None:
1668
      self.primary_ip_family = AF_INET
1669

    
1670
    if self.master_netmask is None:
1671
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1672
      self.master_netmask = ipcls.iplen
1673

    
1674
    if self.prealloc_wipe_disks is None:
1675
      self.prealloc_wipe_disks = False
1676

    
1677
    # shared_file_storage_dir added before 2.5
1678
    if self.shared_file_storage_dir is None:
1679
      self.shared_file_storage_dir = ""
1680

    
1681
    # gluster_storage_dir added in 2.11
1682
    if self.gluster_storage_dir is None:
1683
      self.gluster_storage_dir = ""
1684

    
1685
    if self.use_external_mip_script is None:
1686
      self.use_external_mip_script = False
1687

    
1688
    if self.diskparams:
1689
      self.diskparams = UpgradeDiskParams(self.diskparams)
1690
    else:
1691
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1692

    
1693
    # instance policy added before 2.6
1694
    if self.ipolicy is None:
1695
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1696
    else:
1697
      # we can either make sure to upgrade the ipolicy always, or only
1698
      # do it in some corner cases (e.g. missing keys); note that this
1699
      # will break any removal of keys from the ipolicy dict
1700
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1701
      if wrongkeys:
1702
        # These keys would be silently removed by FillIPolicy()
1703
        msg = ("Cluster instance policy contains spurious keys: %s" %
1704
               utils.CommaJoin(wrongkeys))
1705
        raise errors.ConfigurationError(msg)
1706
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1707

    
1708
    if self.candidate_certs is None:
1709
      self.candidate_certs = {}
1710

    
1711
    if self.max_running_jobs is None:
1712
      self.max_running_jobs = constants.LUXID_MAXIMAL_RUNNING_JOBS_DEFAULT
1713

    
1714
  @property
1715
  def primary_hypervisor(self):
1716
    """The first hypervisor is the primary.
1717

1718
    Useful, for example, for L{Node}'s hv/disk state.
1719

1720
    """
1721
    return self.enabled_hypervisors[0]
1722

    
1723
  def ToDict(self):
1724
    """Custom function for cluster.
1725

1726
    """
1727
    mydict = super(Cluster, self).ToDict()
1728

    
1729
    if self.tcpudp_port_pool is None:
1730
      tcpudp_port_pool = []
1731
    else:
1732
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1733

    
1734
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1735

    
1736
    return mydict
1737

    
1738
  @classmethod
1739
  def FromDict(cls, val):
1740
    """Custom function for cluster.
1741

1742
    """
1743
    obj = super(Cluster, cls).FromDict(val)
1744

    
1745
    if obj.tcpudp_port_pool is None:
1746
      obj.tcpudp_port_pool = set()
1747
    elif not isinstance(obj.tcpudp_port_pool, set):
1748
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1749

    
1750
    return obj
1751

    
1752
  def SimpleFillDP(self, diskparams):
1753
    """Fill a given diskparams dict with cluster defaults.
1754

1755
    @param diskparams: The diskparams
1756
    @return: The defaults dict
1757

1758
    """
1759
    return FillDiskParams(self.diskparams, diskparams)
1760

    
1761
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1762
    """Get the default hypervisor parameters for the cluster.
1763

1764
    @param hypervisor: the hypervisor name
1765
    @param os_name: if specified, we'll also update the defaults for this OS
1766
    @param skip_keys: if passed, list of keys not to use
1767
    @return: the defaults dict
1768

1769
    """
1770
    if skip_keys is None:
1771
      skip_keys = []
1772

    
1773
    fill_stack = [self.hvparams.get(hypervisor, {})]
1774
    if os_name is not None:
1775
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1776
      fill_stack.append(os_hvp)
1777

    
1778
    ret_dict = {}
1779
    for o_dict in fill_stack:
1780
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1781

    
1782
    return ret_dict
1783

    
1784
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1785
    """Fill a given hvparams dict with cluster defaults.
1786

1787
    @type hv_name: string
1788
    @param hv_name: the hypervisor to use
1789
    @type os_name: string
1790
    @param os_name: the OS to use for overriding the hypervisor defaults
1791
    @type skip_globals: boolean
1792
    @param skip_globals: if True, the global hypervisor parameters will
1793
        not be filled
1794
    @rtype: dict
1795
    @return: a copy of the given hvparams with missing keys filled from
1796
        the cluster defaults
1797

1798
    """
1799
    if skip_globals:
1800
      skip_keys = constants.HVC_GLOBALS
1801
    else:
1802
      skip_keys = []
1803

    
1804
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1805
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1806

    
1807
  def FillHV(self, instance, skip_globals=False):
1808
    """Fill an instance's hvparams dict with cluster defaults.
1809

1810
    @type instance: L{objects.Instance}
1811
    @param instance: the instance parameter to fill
1812
    @type skip_globals: boolean
1813
    @param skip_globals: if True, the global hypervisor parameters will
1814
        not be filled
1815
    @rtype: dict
1816
    @return: a copy of the instance's hvparams with missing keys filled from
1817
        the cluster defaults
1818

1819
    """
1820
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1821
                             instance.hvparams, skip_globals)
1822

    
1823
  def SimpleFillBE(self, beparams):
1824
    """Fill a given beparams dict with cluster defaults.
1825

1826
    @type beparams: dict
1827
    @param beparams: the dict to fill
1828
    @rtype: dict
1829
    @return: a copy of the passed in beparams with missing keys filled
1830
        from the cluster defaults
1831

1832
    """
1833
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1834

    
1835
  def FillBE(self, instance):
1836
    """Fill an instance's beparams dict with cluster defaults.
1837

1838
    @type instance: L{objects.Instance}
1839
    @param instance: the instance parameter to fill
1840
    @rtype: dict
1841
    @return: a copy of the instance's beparams with missing keys filled from
1842
        the cluster defaults
1843

1844
    """
1845
    return self.SimpleFillBE(instance.beparams)
1846

    
1847
  def SimpleFillNIC(self, nicparams):
1848
    """Fill a given nicparams dict with cluster defaults.
1849

1850
    @type nicparams: dict
1851
    @param nicparams: the dict to fill
1852
    @rtype: dict
1853
    @return: a copy of the passed in nicparams with missing keys filled
1854
        from the cluster defaults
1855

1856
    """
1857
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1858

    
1859
  def SimpleFillOS(self, os_name, os_params):
1860
    """Fill an instance's osparams dict with cluster defaults.
1861

1862
    @type os_name: string
1863
    @param os_name: the OS name to use
1864
    @type os_params: dict
1865
    @param os_params: the dict to fill with default values
1866
    @rtype: dict
1867
    @return: a copy of the instance's osparams with missing keys filled from
1868
        the cluster defaults
1869

1870
    """
1871
    name_only = os_name.split("+", 1)[0]
1872
    # base OS
1873
    result = self.osparams.get(name_only, {})
1874
    # OS with variant
1875
    result = FillDict(result, self.osparams.get(os_name, {}))
1876
    # specified params
1877
    return FillDict(result, os_params)
1878

    
1879
  @staticmethod
1880
  def SimpleFillHvState(hv_state):
1881
    """Fill an hv_state sub dict with cluster defaults.
1882

1883
    """
1884
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1885

    
1886
  @staticmethod
1887
  def SimpleFillDiskState(disk_state):
1888
    """Fill an disk_state sub dict with cluster defaults.
1889

1890
    """
1891
    return FillDict(constants.DS_DEFAULTS, disk_state)
1892

    
1893
  def FillND(self, node, nodegroup):
1894
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1895

1896
    @type node: L{objects.Node}
1897
    @param node: A Node object to fill
1898
    @type nodegroup: L{objects.NodeGroup}
1899
    @param nodegroup: A Node object to fill
1900
    @return a copy of the node's ndparams with defaults filled
1901

1902
    """
1903
    return self.SimpleFillND(nodegroup.FillND(node))
1904

    
1905
  def FillNDGroup(self, nodegroup):
1906
    """Return filled out ndparams for just L{objects.NodeGroup}
1907

1908
    @type nodegroup: L{objects.NodeGroup}
1909
    @param nodegroup: A Node object to fill
1910
    @return a copy of the node group's ndparams with defaults filled
1911

1912
    """
1913
    return self.SimpleFillND(nodegroup.SimpleFillND({}))
1914

    
1915
  def SimpleFillND(self, ndparams):
1916
    """Fill a given ndparams dict with defaults.
1917

1918
    @type ndparams: dict
1919
    @param ndparams: the dict to fill
1920
    @rtype: dict
1921
    @return: a copy of the passed in ndparams with missing keys filled
1922
        from the cluster defaults
1923

1924
    """
1925
    return FillDict(self.ndparams, ndparams)
1926

    
1927
  def SimpleFillIPolicy(self, ipolicy):
1928
    """ Fill instance policy dict with defaults.
1929

1930
    @type ipolicy: dict
1931
    @param ipolicy: the dict to fill
1932
    @rtype: dict
1933
    @return: a copy of passed ipolicy with missing keys filled from
1934
      the cluster defaults
1935

1936
    """
1937
    return FillIPolicy(self.ipolicy, ipolicy)
1938

    
1939
  def IsDiskTemplateEnabled(self, disk_template):
1940
    """Checks if a particular disk template is enabled.
1941

1942
    """
1943
    return utils.storage.IsDiskTemplateEnabled(
1944
        disk_template, self.enabled_disk_templates)
1945

    
1946
  def IsFileStorageEnabled(self):
1947
    """Checks if file storage is enabled.
1948

1949
    """
1950
    return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
1951

    
1952
  def IsSharedFileStorageEnabled(self):
1953
    """Checks if shared file storage is enabled.
1954

1955
    """
1956
    return utils.storage.IsSharedFileStorageEnabled(
1957
        self.enabled_disk_templates)
1958

    
1959

    
1960
class BlockDevStatus(ConfigObject):
1961
  """Config object representing the status of a block device."""
1962
  __slots__ = [
1963
    "dev_path",
1964
    "major",
1965
    "minor",
1966
    "sync_percent",
1967
    "estimated_time",
1968
    "is_degraded",
1969
    "ldisk_status",
1970
    ]
1971

    
1972

    
1973
class ImportExportStatus(ConfigObject):
1974
  """Config object representing the status of an import or export."""
1975
  __slots__ = [
1976
    "recent_output",
1977
    "listen_port",
1978
    "connected",
1979
    "progress_mbytes",
1980
    "progress_throughput",
1981
    "progress_eta",
1982
    "progress_percent",
1983
    "exit_status",
1984
    "error_message",
1985
    ] + _TIMESTAMPS
1986

    
1987

    
1988
class ImportExportOptions(ConfigObject):
1989
  """Options for import/export daemon
1990

1991
  @ivar key_name: X509 key name (None for cluster certificate)
1992
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1993
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1994
  @ivar magic: Used to ensure the connection goes to the right disk
1995
  @ivar ipv6: Whether to use IPv6
1996
  @ivar connect_timeout: Number of seconds for establishing connection
1997

1998
  """
1999
  __slots__ = [
2000
    "key_name",
2001
    "ca_pem",
2002
    "compress",
2003
    "magic",
2004
    "ipv6",
2005
    "connect_timeout",
2006
    ]
2007

    
2008

    
2009
class ConfdRequest(ConfigObject):
2010
  """Object holding a confd request.
2011

2012
  @ivar protocol: confd protocol version
2013
  @ivar type: confd query type
2014
  @ivar query: query request
2015
  @ivar rsalt: requested reply salt
2016

2017
  """
2018
  __slots__ = [
2019
    "protocol",
2020
    "type",
2021
    "query",
2022
    "rsalt",
2023
    ]
2024

    
2025

    
2026
class ConfdReply(ConfigObject):
2027
  """Object holding a confd reply.
2028

2029
  @ivar protocol: confd protocol version
2030
  @ivar status: reply status code (ok, error)
2031
  @ivar answer: confd query reply
2032
  @ivar serial: configuration serial number
2033

2034
  """
2035
  __slots__ = [
2036
    "protocol",
2037
    "status",
2038
    "answer",
2039
    "serial",
2040
    ]
2041

    
2042

    
2043
class QueryFieldDefinition(ConfigObject):
2044
  """Object holding a query field definition.
2045

2046
  @ivar name: Field name
2047
  @ivar title: Human-readable title
2048
  @ivar kind: Field type
2049
  @ivar doc: Human-readable description
2050

2051
  """
2052
  __slots__ = [
2053
    "name",
2054
    "title",
2055
    "kind",
2056
    "doc",
2057
    ]
2058

    
2059

    
2060
class _QueryResponseBase(ConfigObject):
2061
  __slots__ = [
2062
    "fields",
2063
    ]
2064

    
2065
  def ToDict(self):
2066
    """Custom function for serializing.
2067

2068
    """
2069
    mydict = super(_QueryResponseBase, self).ToDict()
2070
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2071
    return mydict
2072

    
2073
  @classmethod
2074
  def FromDict(cls, val):
2075
    """Custom function for de-serializing.
2076

2077
    """
2078
    obj = super(_QueryResponseBase, cls).FromDict(val)
2079
    obj.fields = \
2080
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2081
    return obj
2082

    
2083

    
2084
class QueryResponse(_QueryResponseBase):
2085
  """Object holding the response to a query.
2086

2087
  @ivar fields: List of L{QueryFieldDefinition} objects
2088
  @ivar data: Requested data
2089

2090
  """
2091
  __slots__ = [
2092
    "data",
2093
    ]
2094

    
2095

    
2096
class QueryFieldsRequest(ConfigObject):
2097
  """Object holding a request for querying available fields.
2098

2099
  """
2100
  __slots__ = [
2101
    "what",
2102
    "fields",
2103
    ]
2104

    
2105

    
2106
class QueryFieldsResponse(_QueryResponseBase):
2107
  """Object holding the response to a query for fields.
2108

2109
  @ivar fields: List of L{QueryFieldDefinition} objects
2110

2111
  """
2112
  __slots__ = []
2113

    
2114

    
2115
class MigrationStatus(ConfigObject):
2116
  """Object holding the status of a migration.
2117

2118
  """
2119
  __slots__ = [
2120
    "status",
2121
    "transferred_ram",
2122
    "total_ram",
2123
    ]
2124

    
2125

    
2126
class InstanceConsole(ConfigObject):
2127
  """Object describing how to access the console of an instance.
2128

2129
  """
2130
  __slots__ = [
2131
    "instance",
2132
    "kind",
2133
    "message",
2134
    "host",
2135
    "port",
2136
    "user",
2137
    "command",
2138
    "display",
2139
    ]
2140

    
2141
  def Validate(self):
2142
    """Validates contents of this object.
2143

2144
    """
2145
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2146
    assert self.instance, "Missing instance name"
2147
    assert self.message or self.kind in [constants.CONS_SSH,
2148
                                         constants.CONS_SPICE,
2149
                                         constants.CONS_VNC]
2150
    assert self.host or self.kind == constants.CONS_MESSAGE
2151
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2152
                                      constants.CONS_SSH]
2153
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2154
                                      constants.CONS_SPICE,
2155
                                      constants.CONS_VNC]
2156
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2157
                                         constants.CONS_SPICE,
2158
                                         constants.CONS_VNC]
2159
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2160
                                         constants.CONS_SPICE,
2161
                                         constants.CONS_SSH]
2162
    return True
2163

    
2164

    
2165
class Network(TaggableObject):
2166
  """Object representing a network definition for ganeti.
2167

2168
  """
2169
  __slots__ = [
2170
    "name",
2171
    "serial_no",
2172
    "mac_prefix",
2173
    "network",
2174
    "network6",
2175
    "gateway",
2176
    "gateway6",
2177
    "reservations",
2178
    "ext_reservations",
2179
    ] + _TIMESTAMPS + _UUID
2180

    
2181
  def HooksDict(self, prefix=""):
2182
    """Export a dictionary used by hooks with a network's information.
2183

2184
    @type prefix: String
2185
    @param prefix: Prefix to prepend to the dict entries
2186

2187
    """
2188
    result = {
2189
      "%sNETWORK_NAME" % prefix: self.name,
2190
      "%sNETWORK_UUID" % prefix: self.uuid,
2191
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2192
    }
2193
    if self.network:
2194
      result["%sNETWORK_SUBNET" % prefix] = self.network
2195
    if self.gateway:
2196
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2197
    if self.network6:
2198
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2199
    if self.gateway6:
2200
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2201
    if self.mac_prefix:
2202
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2203

    
2204
    return result
2205

    
2206
  @classmethod
2207
  def FromDict(cls, val):
2208
    """Custom function for networks.
2209

2210
    Remove deprecated network_type and family.
2211

2212
    """
2213
    if "network_type" in val:
2214
      del val["network_type"]
2215
    if "family" in val:
2216
      del val["family"]
2217
    obj = super(Network, cls).FromDict(val)
2218
    return obj
2219

    
2220

    
2221
# need to inherit object in order to use super()
2222
class SerializableConfigParser(ConfigParser.SafeConfigParser, object):
2223
  """Simple wrapper over ConfigParse that allows serialization.
2224

2225
  This class is basically ConfigParser.SafeConfigParser with two
2226
  additional methods that allow it to serialize/unserialize to/from a
2227
  buffer.
2228

2229
  """
2230
  def Dumps(self):
2231
    """Dump this instance and return the string representation."""
2232
    buf = StringIO()
2233
    self.write(buf)
2234
    return buf.getvalue()
2235

    
2236
  @classmethod
2237
  def Loads(cls, data):
2238
    """Load data from a string."""
2239
    buf = StringIO(data)
2240
    cfp = cls()
2241
    cfp.readfp(buf)
2242
    return cfp
2243

    
2244
  def get(self, section, option, **kwargs):
2245
    value = None
2246
    try:
2247
      value = super(SerializableConfigParser, self).get(section, option,
2248
                                                        **kwargs)
2249
      if value.lower() == constants.VALUE_NONE:
2250
        value = None
2251
    except ConfigParser.NoOptionError:
2252
      r = re.compile(r"(disk|nic)\d+_name|nic\d+_(network|vlan)")
2253
      match = r.match(option)
2254
      if match:
2255
        pass
2256
      else:
2257
        raise
2258

    
2259
    return value
2260

    
2261

    
2262
class LvmPvInfo(ConfigObject):
2263
  """Information about an LVM physical volume (PV).
2264

2265
  @type name: string
2266
  @ivar name: name of the PV
2267
  @type vg_name: string
2268
  @ivar vg_name: name of the volume group containing the PV
2269
  @type size: float
2270
  @ivar size: size of the PV in MiB
2271
  @type free: float
2272
  @ivar free: free space in the PV, in MiB
2273
  @type attributes: string
2274
  @ivar attributes: PV attributes
2275
  @type lv_list: list of strings
2276
  @ivar lv_list: names of the LVs hosted on the PV
2277
  """
2278
  __slots__ = [
2279
    "name",
2280
    "vg_name",
2281
    "size",
2282
    "free",
2283
    "attributes",
2284
    "lv_list"
2285
    ]
2286

    
2287
  def IsEmpty(self):
2288
    """Is this PV empty?
2289

2290
    """
2291
    return self.size <= (self.free + 1)
2292

    
2293
  def IsAllocatable(self):
2294
    """Is this PV allocatable?
2295

2296
    """
2297
    return ("a" in self.attributes)