Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 5e450b04

History | View | Annotate | Download (66.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def FillIPolicy(default_ipolicy, custom_ipolicy):
86
  """Fills an instance policy with defaults.
87

88
  """
89
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90
  ret_dict = copy.deepcopy(custom_ipolicy)
91
  for key in default_ipolicy:
92
    if key not in ret_dict:
93
      ret_dict[key] = copy.deepcopy(default_ipolicy[key])
94
    elif key == constants.ISPECS_STD:
95
      ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
96
  return ret_dict
97

    
98

    
99
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
100
  """Fills the disk parameter defaults.
101

102
  @see: L{FillDict} for parameters and return value
103

104
  """
105
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
106

    
107
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
108
                             skip_keys=skip_keys))
109
              for dt in constants.DISK_TEMPLATES)
110

    
111

    
112
def UpgradeGroupedParams(target, defaults):
113
  """Update all groups for the target parameter.
114

115
  @type target: dict of dicts
116
  @param target: {group: {parameter: value}}
117
  @type defaults: dict
118
  @param defaults: default parameter values
119

120
  """
121
  if target is None:
122
    target = {constants.PP_DEFAULT: defaults}
123
  else:
124
    for group in target:
125
      target[group] = FillDict(defaults, target[group])
126
  return target
127

    
128

    
129
def UpgradeBeParams(target):
130
  """Update the be parameters dict to the new format.
131

132
  @type target: dict
133
  @param target: "be" parameters dict
134

135
  """
136
  if constants.BE_MEMORY in target:
137
    memory = target[constants.BE_MEMORY]
138
    target[constants.BE_MAXMEM] = memory
139
    target[constants.BE_MINMEM] = memory
140
    del target[constants.BE_MEMORY]
141

    
142

    
143
def UpgradeDiskParams(diskparams):
144
  """Upgrade the disk parameters.
145

146
  @type diskparams: dict
147
  @param diskparams: disk parameters to upgrade
148
  @rtype: dict
149
  @return: the upgraded disk parameters dict
150

151
  """
152
  if not diskparams:
153
    result = {}
154
  else:
155
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
156

    
157
  return result
158

    
159

    
160
def UpgradeNDParams(ndparams):
161
  """Upgrade ndparams structure.
162

163
  @type ndparams: dict
164
  @param ndparams: disk parameters to upgrade
165
  @rtype: dict
166
  @return: the upgraded node parameters dict
167

168
  """
169
  if ndparams is None:
170
    ndparams = {}
171

    
172
  if (constants.ND_OOB_PROGRAM in ndparams and
173
      ndparams[constants.ND_OOB_PROGRAM] is None):
174
    # will be reset by the line below
175
    del ndparams[constants.ND_OOB_PROGRAM]
176
  return FillDict(constants.NDC_DEFAULTS, ndparams)
177

    
178

    
179
def MakeEmptyIPolicy():
180
  """Create empty IPolicy dictionary.
181

182
  """
183
  return {}
184

    
185

    
186
class ConfigObject(outils.ValidatedSlots):
187
  """A generic config object.
188

189
  It has the following properties:
190

191
    - provides somewhat safe recursive unpickling and pickling for its classes
192
    - unset attributes which are defined in slots are always returned
193
      as None instead of raising an error
194

195
  Classes derived from this must always declare __slots__ (we use many
196
  config objects and the memory reduction is useful)
197

198
  """
199
  __slots__ = []
200

    
201
  def __getattr__(self, name):
202
    if name not in self.GetAllSlots():
203
      raise AttributeError("Invalid object attribute %s.%s" %
204
                           (type(self).__name__, name))
205
    return None
206

    
207
  def __setstate__(self, state):
208
    slots = self.GetAllSlots()
209
    for name in state:
210
      if name in slots:
211
        setattr(self, name, state[name])
212

    
213
  def Validate(self):
214
    """Validates the slots.
215

216
    """
217

    
218
  def ToDict(self):
219
    """Convert to a dict holding only standard python types.
220

221
    The generic routine just dumps all of this object's attributes in
222
    a dict. It does not work if the class has children who are
223
    ConfigObjects themselves (e.g. the nics list in an Instance), in
224
    which case the object should subclass the function in order to
225
    make sure all objects returned are only standard python types.
226

227
    """
228
    result = {}
229
    for name in self.GetAllSlots():
230
      value = getattr(self, name, None)
231
      if value is not None:
232
        result[name] = value
233
    return result
234

    
235
  __getstate__ = ToDict
236

    
237
  @classmethod
238
  def FromDict(cls, val):
239
    """Create an object from a dictionary.
240

241
    This generic routine takes a dict, instantiates a new instance of
242
    the given class, and sets attributes based on the dict content.
243

244
    As for `ToDict`, this does not work if the class has children
245
    who are ConfigObjects themselves (e.g. the nics list in an
246
    Instance), in which case the object should subclass the function
247
    and alter the objects.
248

249
    """
250
    if not isinstance(val, dict):
251
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
252
                                      " expected dict, got %s" % type(val))
253
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
254
    obj = cls(**val_str) # pylint: disable=W0142
255
    return obj
256

    
257
  def Copy(self):
258
    """Makes a deep copy of the current object and its children.
259

260
    """
261
    dict_form = self.ToDict()
262
    clone_obj = self.__class__.FromDict(dict_form)
263
    return clone_obj
264

    
265
  def __repr__(self):
266
    """Implement __repr__ for ConfigObjects."""
267
    return repr(self.ToDict())
268

    
269
  def __eq__(self, other):
270
    """Implement __eq__ for ConfigObjects."""
271
    return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
272

    
273
  def UpgradeConfig(self):
274
    """Fill defaults for missing configuration values.
275

276
    This method will be called at configuration load time, and its
277
    implementation will be object dependent.
278

279
    """
280
    pass
281

    
282

    
283
class TaggableObject(ConfigObject):
284
  """An generic class supporting tags.
285

286
  """
287
  __slots__ = ["tags"]
288
  VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$")
289

    
290
  @classmethod
291
  def ValidateTag(cls, tag):
292
    """Check if a tag is valid.
293

294
    If the tag is invalid, an errors.TagError will be raised. The
295
    function has no return value.
296

297
    """
298
    if not isinstance(tag, basestring):
299
      raise errors.TagError("Invalid tag type (not a string)")
300
    if len(tag) > constants.MAX_TAG_LEN:
301
      raise errors.TagError("Tag too long (>%d characters)" %
302
                            constants.MAX_TAG_LEN)
303
    if not tag:
304
      raise errors.TagError("Tags cannot be empty")
305
    if not cls.VALID_TAG_RE.match(tag):
306
      raise errors.TagError("Tag contains invalid characters")
307

    
308
  def GetTags(self):
309
    """Return the tags list.
310

311
    """
312
    tags = getattr(self, "tags", None)
313
    if tags is None:
314
      tags = self.tags = set()
315
    return tags
316

    
317
  def AddTag(self, tag):
318
    """Add a new tag.
319

320
    """
321
    self.ValidateTag(tag)
322
    tags = self.GetTags()
323
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
324
      raise errors.TagError("Too many tags")
325
    self.GetTags().add(tag)
326

    
327
  def RemoveTag(self, tag):
328
    """Remove a tag.
329

330
    """
331
    self.ValidateTag(tag)
332
    tags = self.GetTags()
333
    try:
334
      tags.remove(tag)
335
    except KeyError:
336
      raise errors.TagError("Tag not found")
337

    
338
  def ToDict(self):
339
    """Taggable-object-specific conversion to standard python types.
340

341
    This replaces the tags set with a list.
342

343
    """
344
    bo = super(TaggableObject, self).ToDict()
345

    
346
    tags = bo.get("tags", None)
347
    if isinstance(tags, set):
348
      bo["tags"] = list(tags)
349
    return bo
350

    
351
  @classmethod
352
  def FromDict(cls, val):
353
    """Custom function for instances.
354

355
    """
356
    obj = super(TaggableObject, cls).FromDict(val)
357
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
358
      obj.tags = set(obj.tags)
359
    return obj
360

    
361

    
362
class MasterNetworkParameters(ConfigObject):
363
  """Network configuration parameters for the master
364

365
  @ivar uuid: master nodes UUID
366
  @ivar ip: master IP
367
  @ivar netmask: master netmask
368
  @ivar netdev: master network device
369
  @ivar ip_family: master IP family
370

371
  """
372
  __slots__ = [
373
    "uuid",
374
    "ip",
375
    "netmask",
376
    "netdev",
377
    "ip_family",
378
    ]
379

    
380

    
381
class ConfigData(ConfigObject):
382
  """Top-level config object."""
383
  __slots__ = [
384
    "version",
385
    "cluster",
386
    "nodes",
387
    "nodegroups",
388
    "instances",
389
    "networks",
390
    "serial_no",
391
    ] + _TIMESTAMPS
392

    
393
  def ToDict(self):
394
    """Custom function for top-level config data.
395

396
    This just replaces the list of instances, nodes and the cluster
397
    with standard python types.
398

399
    """
400
    mydict = super(ConfigData, self).ToDict()
401
    mydict["cluster"] = mydict["cluster"].ToDict()
402
    for key in "nodes", "instances", "nodegroups", "networks":
403
      mydict[key] = outils.ContainerToDicts(mydict[key])
404

    
405
    return mydict
406

    
407
  @classmethod
408
  def FromDict(cls, val):
409
    """Custom function for top-level config data
410

411
    """
412
    obj = super(ConfigData, cls).FromDict(val)
413
    obj.cluster = Cluster.FromDict(obj.cluster)
414
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
415
    obj.instances = \
416
      outils.ContainerFromDicts(obj.instances, dict, Instance)
417
    obj.nodegroups = \
418
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
419
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
420
    return obj
421

    
422
  def HasAnyDiskOfType(self, dev_type):
423
    """Check if in there is at disk of the given type in the configuration.
424

425
    @type dev_type: L{constants.DTS_BLOCK}
426
    @param dev_type: the type to look for
427
    @rtype: boolean
428
    @return: boolean indicating if a disk of the given type was found or not
429

430
    """
431
    for instance in self.instances.values():
432
      for disk in instance.disks:
433
        if disk.IsBasedOnDiskType(dev_type):
434
          return True
435
    return False
436

    
437
  def UpgradeConfig(self):
438
    """Fill defaults for missing configuration values.
439

440
    """
441
    self.cluster.UpgradeConfig()
442
    for node in self.nodes.values():
443
      node.UpgradeConfig()
444
    for instance in self.instances.values():
445
      instance.UpgradeConfig()
446
    self._UpgradeEnabledDiskTemplates()
447
    if self.nodegroups is None:
448
      self.nodegroups = {}
449
    for nodegroup in self.nodegroups.values():
450
      nodegroup.UpgradeConfig()
451
      InstancePolicy.UpgradeDiskTemplates(
452
        nodegroup.ipolicy, self.cluster.enabled_disk_templates)
453
    if self.cluster.drbd_usermode_helper is None:
454
      if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
455
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
456
    if self.networks is None:
457
      self.networks = {}
458
    for network in self.networks.values():
459
      network.UpgradeConfig()
460

    
461
  def _UpgradeEnabledDiskTemplates(self):
462
    """Upgrade the cluster's enabled disk templates by inspecting the currently
463
       enabled and/or used disk templates.
464

465
    """
466
    if not self.cluster.enabled_disk_templates:
467
      template_set = \
468
        set([inst.disk_template for inst in self.instances.values()])
469
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
470
      if self.cluster.volume_group_name:
471
        template_set.add(constants.DT_DRBD8)
472
        template_set.add(constants.DT_PLAIN)
473
      # Set enabled_disk_templates to the inferred disk templates. Order them
474
      # according to a preference list that is based on Ganeti's history of
475
      # supported disk templates.
476
      self.cluster.enabled_disk_templates = []
477
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
478
        if preferred_template in template_set:
479
          self.cluster.enabled_disk_templates.append(preferred_template)
480
          template_set.remove(preferred_template)
481
      self.cluster.enabled_disk_templates.extend(list(template_set))
482
    InstancePolicy.UpgradeDiskTemplates(
483
      self.cluster.ipolicy, self.cluster.enabled_disk_templates)
484

    
485

    
486
class NIC(ConfigObject):
487
  """Config object representing a network card."""
488
  __slots__ = ["name", "mac", "ip", "network",
489
               "nicparams", "netinfo", "pci"] + _UUID
490

    
491
  @classmethod
492
  def CheckParameterSyntax(cls, nicparams):
493
    """Check the given parameters for validity.
494

495
    @type nicparams:  dict
496
    @param nicparams: dictionary with parameter names/value
497
    @raise errors.ConfigurationError: when a parameter is not valid
498

499
    """
500
    mode = nicparams[constants.NIC_MODE]
501
    if (mode not in constants.NIC_VALID_MODES and
502
        mode != constants.VALUE_AUTO):
503
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
504

    
505
    if (mode == constants.NIC_MODE_BRIDGED and
506
        not nicparams[constants.NIC_LINK]):
507
      raise errors.ConfigurationError("Missing bridged NIC link")
508

    
509

    
510
class Disk(ConfigObject):
511
  """Config object representing a block device."""
512
  __slots__ = (["name", "dev_type", "logical_id", "children", "iv_name",
513
                "size", "mode", "params", "spindles", "pci"] + _UUID +
514
               # dynamic_params is special. It depends on the node this instance
515
               # is sent to, and should not be persisted.
516
               ["dynamic_params"])
517

    
518
  def CreateOnSecondary(self):
519
    """Test if this device needs to be created on a secondary node."""
520
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
521

    
522
  def AssembleOnSecondary(self):
523
    """Test if this device needs to be assembled on a secondary node."""
524
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
525

    
526
  def OpenOnSecondary(self):
527
    """Test if this device needs to be opened on a secondary node."""
528
    return self.dev_type in (constants.DT_PLAIN,)
529

    
530
  def StaticDevPath(self):
531
    """Return the device path if this device type has a static one.
532

533
    Some devices (LVM for example) live always at the same /dev/ path,
534
    irrespective of their status. For such devices, we return this
535
    path, for others we return None.
536

537
    @warning: The path returned is not a normalized pathname; callers
538
        should check that it is a valid path.
539

540
    """
541
    if self.dev_type == constants.DT_PLAIN:
542
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
543
    elif self.dev_type == constants.DT_BLOCK:
544
      return self.logical_id[1]
545
    elif self.dev_type == constants.DT_RBD:
546
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
547
    return None
548

    
549
  def ChildrenNeeded(self):
550
    """Compute the needed number of children for activation.
551

552
    This method will return either -1 (all children) or a positive
553
    number denoting the minimum number of children needed for
554
    activation (only mirrored devices will usually return >=0).
555

556
    Currently, only DRBD8 supports diskless activation (therefore we
557
    return 0), for all other we keep the previous semantics and return
558
    -1.
559

560
    """
561
    if self.dev_type == constants.DT_DRBD8:
562
      return 0
563
    return -1
564

    
565
  def IsBasedOnDiskType(self, dev_type):
566
    """Check if the disk or its children are based on the given type.
567

568
    @type dev_type: L{constants.DTS_BLOCK}
569
    @param dev_type: the type to look for
570
    @rtype: boolean
571
    @return: boolean indicating if a device of the given type was found or not
572

573
    """
574
    if self.children:
575
      for child in self.children:
576
        if child.IsBasedOnDiskType(dev_type):
577
          return True
578
    return self.dev_type == dev_type
579

    
580
  def GetNodes(self, node_uuid):
581
    """This function returns the nodes this device lives on.
582

583
    Given the node on which the parent of the device lives on (or, in
584
    case of a top-level device, the primary node of the devices'
585
    instance), this function will return a list of nodes on which this
586
    devices needs to (or can) be assembled.
587

588
    """
589
    if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
590
                         constants.DT_BLOCK, constants.DT_RBD,
591
                         constants.DT_EXT, constants.DT_SHARED_FILE]:
592
      result = [node_uuid]
593
    elif self.dev_type in constants.DTS_DRBD:
594
      result = [self.logical_id[0], self.logical_id[1]]
595
      if node_uuid not in result:
596
        raise errors.ConfigurationError("DRBD device passed unknown node")
597
    else:
598
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
599
    return result
600

    
601
  def ComputeNodeTree(self, parent_node_uuid):
602
    """Compute the node/disk tree for this disk and its children.
603

604
    This method, given the node on which the parent disk lives, will
605
    return the list of all (node UUID, disk) pairs which describe the disk
606
    tree in the most compact way. For example, a drbd/lvm stack
607
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
608
    which represents all the top-level devices on the nodes.
609

610
    """
611
    my_nodes = self.GetNodes(parent_node_uuid)
612
    result = [(node, self) for node in my_nodes]
613
    if not self.children:
614
      # leaf device
615
      return result
616
    for node in my_nodes:
617
      for child in self.children:
618
        child_result = child.ComputeNodeTree(node)
619
        if len(child_result) == 1:
620
          # child (and all its descendants) is simple, doesn't split
621
          # over multiple hosts, so we don't need to describe it, our
622
          # own entry for this node describes it completely
623
          continue
624
        else:
625
          # check if child nodes differ from my nodes; note that
626
          # subdisk can differ from the child itself, and be instead
627
          # one of its descendants
628
          for subnode, subdisk in child_result:
629
            if subnode not in my_nodes:
630
              result.append((subnode, subdisk))
631
            # otherwise child is under our own node, so we ignore this
632
            # entry (but probably the other results in the list will
633
            # be different)
634
    return result
635

    
636
  def ComputeGrowth(self, amount):
637
    """Compute the per-VG growth requirements.
638

639
    This only works for VG-based disks.
640

641
    @type amount: integer
642
    @param amount: the desired increase in (user-visible) disk space
643
    @rtype: dict
644
    @return: a dictionary of volume-groups and the required size
645

646
    """
647
    if self.dev_type == constants.DT_PLAIN:
648
      return {self.logical_id[0]: amount}
649
    elif self.dev_type == constants.DT_DRBD8:
650
      if self.children:
651
        return self.children[0].ComputeGrowth(amount)
652
      else:
653
        return {}
654
    else:
655
      # Other disk types do not require VG space
656
      return {}
657

    
658
  def RecordGrow(self, amount):
659
    """Update the size of this disk after growth.
660

661
    This method recurses over the disks's children and updates their
662
    size correspondigly. The method needs to be kept in sync with the
663
    actual algorithms from bdev.
664

665
    """
666
    if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE,
667
                         constants.DT_RBD, constants.DT_EXT,
668
                         constants.DT_SHARED_FILE):
669
      self.size += amount
670
    elif self.dev_type == constants.DT_DRBD8:
671
      if self.children:
672
        self.children[0].RecordGrow(amount)
673
      self.size += amount
674
    else:
675
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
676
                                   " disk type %s" % self.dev_type)
677

    
678
  def Update(self, size=None, mode=None, spindles=None):
679
    """Apply changes to size, spindles and mode.
680

681
    """
682
    if self.dev_type == constants.DT_DRBD8:
683
      if self.children:
684
        self.children[0].Update(size=size, mode=mode)
685
    else:
686
      assert not self.children
687

    
688
    if size is not None:
689
      self.size = size
690
    if mode is not None:
691
      self.mode = mode
692
    if spindles is not None:
693
      self.spindles = spindles
694

    
695
  def UnsetSize(self):
696
    """Sets recursively the size to zero for the disk and its children.
697

698
    """
699
    if self.children:
700
      for child in self.children:
701
        child.UnsetSize()
702
    self.size = 0
703

    
704
  def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
705
    """Updates the dynamic disk params for the given node.
706

707
    This is mainly used for drbd, which needs ip/port configuration.
708

709
    Arguments:
710
      - target_node_uuid: the node UUID we wish to configure for
711
      - nodes_ip: a mapping of node name to ip
712

713
    The target_node must exist in nodes_ip, and should be one of the
714
    nodes in the logical ID if this device is a DRBD device.
715

716
    """
717
    if self.children:
718
      for child in self.children:
719
        child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip)
720

    
721
    dyn_disk_params = {}
722
    if self.logical_id is not None and self.dev_type in constants.DTS_DRBD:
723
      pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id
724
      if target_node_uuid not in (pnode_uuid, snode_uuid):
725
        # disk object is being sent to neither the primary nor the secondary
726
        # node. reset the dynamic parameters, the target node is not
727
        # supposed to use them.
728
        self.dynamic_params = dyn_disk_params
729
        return
730

    
731
      pnode_ip = nodes_ip.get(pnode_uuid, None)
732
      snode_ip = nodes_ip.get(snode_uuid, None)
733
      if pnode_ip is None or snode_ip is None:
734
        raise errors.ConfigurationError("Can't find primary or secondary node"
735
                                        " for %s" % str(self))
736
      if pnode_uuid == target_node_uuid:
737
        dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip
738
        dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip
739
        dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor
740
        dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor
741
      else: # it must be secondary, we tested above
742
        dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip
743
        dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip
744
        dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor
745
        dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor
746

    
747
    self.dynamic_params = dyn_disk_params
748

    
749
  # pylint: disable=W0221
750
  def ToDict(self, include_dynamic_params=False):
751
    """Disk-specific conversion to standard python types.
752

753
    This replaces the children lists of objects with lists of
754
    standard python types.
755

756
    """
757
    bo = super(Disk, self).ToDict()
758
    if not include_dynamic_params and "dynamic_params" in bo:
759
      del bo["dynamic_params"]
760

    
761
    for attr in ("children",):
762
      alist = bo.get(attr, None)
763
      if alist:
764
        bo[attr] = outils.ContainerToDicts(alist)
765
    return bo
766

    
767
  @classmethod
768
  def FromDict(cls, val):
769
    """Custom function for Disks
770

771
    """
772
    if "physical_id" in val:
773
      del val["physical_id"]
774
    obj = super(Disk, cls).FromDict(val)
775
    if obj.children:
776
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
777
    if obj.logical_id and isinstance(obj.logical_id, list):
778
      obj.logical_id = tuple(obj.logical_id)
779
    if obj.dev_type in constants.DTS_DRBD:
780
      # we need a tuple of length six here
781
      if len(obj.logical_id) < 6:
782
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
783
    return obj
784

    
785
  def __str__(self):
786
    """Custom str() formatter for disks.
787

788
    """
789
    if self.dev_type == constants.DT_PLAIN:
790
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
791
    elif self.dev_type in constants.DTS_DRBD:
792
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
793
      val = "<DRBD8("
794

    
795
      val += ("hosts=%s/%d-%s/%d, port=%s, " %
796
              (node_a, minor_a, node_b, minor_b, port))
797
      if self.children and self.children.count(None) == 0:
798
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
799
      else:
800
        val += "no local storage"
801
    else:
802
      val = ("<Disk(type=%s, logical_id=%s, children=%s" %
803
             (self.dev_type, self.logical_id, self.children))
804
    if self.iv_name is None:
805
      val += ", not visible"
806
    else:
807
      val += ", visible as /dev/%s" % self.iv_name
808
    if self.spindles is not None:
809
      val += ", spindles=%s" % self.spindles
810
    if isinstance(self.size, int):
811
      val += ", size=%dm)>" % self.size
812
    else:
813
      val += ", size='%s')>" % (self.size,)
814
    return val
815

    
816
  def Verify(self):
817
    """Checks that this disk is correctly configured.
818

819
    """
820
    all_errors = []
821
    if self.mode not in constants.DISK_ACCESS_SET:
822
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
823
    return all_errors
824

    
825
  def UpgradeConfig(self):
826
    """Fill defaults for missing configuration values.
827

828
    """
829
    if self.children:
830
      for child in self.children:
831
        child.UpgradeConfig()
832

    
833
    # FIXME: Make this configurable in Ganeti 2.7
834
    # Params should be an empty dict that gets filled any time needed
835
    # In case of ext template we allow arbitrary params that should not
836
    # be overrided during a config reload/upgrade.
837
    if not self.params or not isinstance(self.params, dict):
838
      self.params = {}
839

    
840
    # add here config upgrade for this disk
841

    
842
    # map of legacy device types (mapping differing LD constants to new
843
    # DT constants)
844
    LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
845
    if self.dev_type in LEG_DEV_TYPE_MAP:
846
      self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
847

    
848
  @staticmethod
849
  def ComputeLDParams(disk_template, disk_params):
850
    """Computes Logical Disk parameters from Disk Template parameters.
851

852
    @type disk_template: string
853
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
854
    @type disk_params: dict
855
    @param disk_params: disk template parameters;
856
                        dict(template_name -> parameters
857
    @rtype: list(dict)
858
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
859
      contains the LD parameters of the node. The tree is flattened in-order.
860

861
    """
862
    if disk_template not in constants.DISK_TEMPLATES:
863
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
864

    
865
    assert disk_template in disk_params
866

    
867
    result = list()
868
    dt_params = disk_params[disk_template]
869
    if disk_template == constants.DT_DRBD8:
870
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], {
871
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
872
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
873
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
874
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
875
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
876
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
877
        constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
878
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
879
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
880
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
881
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
882
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
883
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
884
        }))
885

    
886
      # data LV
887
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
888
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
889
        }))
890

    
891
      # metadata LV
892
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
893
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
894
        }))
895

    
896
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
897
      result.append(constants.DISK_LD_DEFAULTS[disk_template])
898

    
899
    elif disk_template == constants.DT_PLAIN:
900
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
901
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
902
        }))
903

    
904
    elif disk_template == constants.DT_BLOCK:
905
      result.append(constants.DISK_LD_DEFAULTS[constants.DT_BLOCK])
906

    
907
    elif disk_template == constants.DT_RBD:
908
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_RBD], {
909
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
910
        constants.LDP_ACCESS: dt_params[constants.RBD_ACCESS],
911
        }))
912

    
913
    elif disk_template == constants.DT_EXT:
914
      result.append(constants.DISK_LD_DEFAULTS[constants.DT_EXT])
915

    
916
    return result
917

    
918

    
919
class InstancePolicy(ConfigObject):
920
  """Config object representing instance policy limits dictionary.
921

922
  Note that this object is not actually used in the config, it's just
923
  used as a placeholder for a few functions.
924

925
  """
926
  @classmethod
927
  def UpgradeDiskTemplates(cls, ipolicy, enabled_disk_templates):
928
    """Upgrades the ipolicy configuration."""
929
    if constants.IPOLICY_DTS in ipolicy:
930
      if not set(ipolicy[constants.IPOLICY_DTS]).issubset(
931
        set(enabled_disk_templates)):
932
        ipolicy[constants.IPOLICY_DTS] = list(
933
          set(ipolicy[constants.IPOLICY_DTS]) & set(enabled_disk_templates))
934

    
935
  @classmethod
936
  def CheckParameterSyntax(cls, ipolicy, check_std):
937
    """ Check the instance policy for validity.
938

939
    @type ipolicy: dict
940
    @param ipolicy: dictionary with min/max/std specs and policies
941
    @type check_std: bool
942
    @param check_std: Whether to check std value or just assume compliance
943
    @raise errors.ConfigurationError: when the policy is not legal
944

945
    """
946
    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
947
    if constants.IPOLICY_DTS in ipolicy:
948
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
949
    for key in constants.IPOLICY_PARAMETERS:
950
      if key in ipolicy:
951
        InstancePolicy.CheckParameter(key, ipolicy[key])
952
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
953
    if wrong_keys:
954
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
955
                                      utils.CommaJoin(wrong_keys))
956

    
957
  @classmethod
958
  def _CheckIncompleteSpec(cls, spec, keyname):
959
    missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
960
    if missing_params:
961
      msg = ("Missing instance specs parameters for %s: %s" %
962
             (keyname, utils.CommaJoin(missing_params)))
963
      raise errors.ConfigurationError(msg)
964

    
965
  @classmethod
966
  def CheckISpecSyntax(cls, ipolicy, check_std):
967
    """Check the instance policy specs for validity.
968

969
    @type ipolicy: dict
970
    @param ipolicy: dictionary with min/max/std specs
971
    @type check_std: bool
972
    @param check_std: Whether to check std value or just assume compliance
973
    @raise errors.ConfigurationError: when specs are not valid
974

975
    """
976
    if constants.ISPECS_MINMAX not in ipolicy:
977
      # Nothing to check
978
      return
979

    
980
    if check_std and constants.ISPECS_STD not in ipolicy:
981
      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
982
      raise errors.ConfigurationError(msg)
983
    stdspec = ipolicy.get(constants.ISPECS_STD)
984
    if check_std:
985
      InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
986

    
987
    if not ipolicy[constants.ISPECS_MINMAX]:
988
      raise errors.ConfigurationError("Empty minmax specifications")
989
    std_is_good = False
990
    for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
991
      missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
992
      if missing:
993
        msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
994
        raise errors.ConfigurationError(msg)
995
      for (key, spec) in minmaxspecs.items():
996
        InstancePolicy._CheckIncompleteSpec(spec, key)
997

    
998
      spec_std_ok = True
999
      for param in constants.ISPECS_PARAMETERS:
1000
        par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
1001
                                                           param, check_std)
1002
        spec_std_ok = spec_std_ok and par_std_ok
1003
      std_is_good = std_is_good or spec_std_ok
1004
    if not std_is_good:
1005
      raise errors.ConfigurationError("Invalid std specifications")
1006

    
1007
  @classmethod
1008
  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
1009
    """Check the instance policy specs for validity on a given key.
1010

1011
    We check if the instance specs makes sense for a given key, that is
1012
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
1013

1014
    @type minmaxspecs: dict
1015
    @param minmaxspecs: dictionary with min and max instance spec
1016
    @type stdspec: dict
1017
    @param stdspec: dictionary with standard instance spec
1018
    @type name: string
1019
    @param name: what are the limits for
1020
    @type check_std: bool
1021
    @param check_std: Whether to check std value or just assume compliance
1022
    @rtype: bool
1023
    @return: C{True} when specs are valid, C{False} when standard spec for the
1024
        given name is not valid
1025
    @raise errors.ConfigurationError: when min/max specs for the given name
1026
        are not valid
1027

1028
    """
1029
    minspec = minmaxspecs[constants.ISPECS_MIN]
1030
    maxspec = minmaxspecs[constants.ISPECS_MAX]
1031
    min_v = minspec[name]
1032
    max_v = maxspec[name]
1033

    
1034
    if min_v > max_v:
1035
      err = ("Invalid specification of min/max values for %s: %s/%s" %
1036
             (name, min_v, max_v))
1037
      raise errors.ConfigurationError(err)
1038
    elif check_std:
1039
      std_v = stdspec.get(name, min_v)
1040
      return std_v >= min_v and std_v <= max_v
1041
    else:
1042
      return True
1043

    
1044
  @classmethod
1045
  def CheckDiskTemplates(cls, disk_templates):
1046
    """Checks the disk templates for validity.
1047

1048
    """
1049
    if not disk_templates:
1050
      raise errors.ConfigurationError("Instance policy must contain" +
1051
                                      " at least one disk template")
1052
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1053
    if wrong:
1054
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1055
                                      utils.CommaJoin(wrong))
1056

    
1057
  @classmethod
1058
  def CheckParameter(cls, key, value):
1059
    """Checks a parameter.
1060

1061
    Currently we expect all parameters to be float values.
1062

1063
    """
1064
    try:
1065
      float(value)
1066
    except (TypeError, ValueError), err:
1067
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1068
                                      " '%s', error: %s" % (key, value, err))
1069

    
1070

    
1071
class Instance(TaggableObject):
1072
  """Config object representing an instance."""
1073
  __slots__ = [
1074
    "name",
1075
    "primary_node",
1076
    "os",
1077
    "hypervisor",
1078
    "hvparams",
1079
    "beparams",
1080
    "osparams",
1081
    "admin_state",
1082
    "nics",
1083
    "disks",
1084
    "disk_template",
1085
    "disks_active",
1086
    "network_port",
1087
    "serial_no",
1088
    ] + _TIMESTAMPS + _UUID
1089

    
1090
  def _ComputeSecondaryNodes(self):
1091
    """Compute the list of secondary nodes.
1092

1093
    This is a simple wrapper over _ComputeAllNodes.
1094

1095
    """
1096
    all_nodes = set(self._ComputeAllNodes())
1097
    all_nodes.discard(self.primary_node)
1098
    return tuple(all_nodes)
1099

    
1100
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1101
                             "List of names of secondary nodes")
1102

    
1103
  def _ComputeAllNodes(self):
1104
    """Compute the list of all nodes.
1105

1106
    Since the data is already there (in the drbd disks), keeping it as
1107
    a separate normal attribute is redundant and if not properly
1108
    synchronised can cause problems. Thus it's better to compute it
1109
    dynamically.
1110

1111
    """
1112
    def _Helper(nodes, device):
1113
      """Recursively computes nodes given a top device."""
1114
      if device.dev_type in constants.DTS_DRBD:
1115
        nodea, nodeb = device.logical_id[:2]
1116
        nodes.add(nodea)
1117
        nodes.add(nodeb)
1118
      if device.children:
1119
        for child in device.children:
1120
          _Helper(nodes, child)
1121

    
1122
    all_nodes = set()
1123
    for device in self.disks:
1124
      _Helper(all_nodes, device)
1125
    # ensure that the primary node is always the first
1126
    all_nodes.discard(self.primary_node)
1127
    return (self.primary_node, ) + tuple(all_nodes)
1128

    
1129
  all_nodes = property(_ComputeAllNodes, None, None,
1130
                       "List of names of all the nodes of the instance")
1131

    
1132
  def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1133
    """Provide a mapping of nodes to LVs this instance owns.
1134

1135
    This function figures out what logical volumes should belong on
1136
    which nodes, recursing through a device tree.
1137

1138
    @type lvmap: dict
1139
    @param lvmap: optional dictionary to receive the
1140
        'node' : ['lv', ...] data.
1141
    @type devs: list of L{Disk}
1142
    @param devs: disks to get the LV name for. If None, all disk of this
1143
        instance are used.
1144
    @type node_uuid: string
1145
    @param node_uuid: UUID of the node to get the LV names for. If None, the
1146
        primary node of this instance is used.
1147
    @return: None if lvmap arg is given, otherwise, a dictionary of
1148
        the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
1149
        volumeN is of the form "vg_name/lv_name", compatible with
1150
        GetVolumeList()
1151

1152
    """
1153
    if node_uuid is None:
1154
      node_uuid = self.primary_node
1155

    
1156
    if lvmap is None:
1157
      lvmap = {
1158
        node_uuid: [],
1159
        }
1160
      ret = lvmap
1161
    else:
1162
      if not node_uuid in lvmap:
1163
        lvmap[node_uuid] = []
1164
      ret = None
1165

    
1166
    if not devs:
1167
      devs = self.disks
1168

    
1169
    for dev in devs:
1170
      if dev.dev_type == constants.DT_PLAIN:
1171
        lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1172

    
1173
      elif dev.dev_type in constants.DTS_DRBD:
1174
        if dev.children:
1175
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1176
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1177

    
1178
      elif dev.children:
1179
        self.MapLVsByNode(lvmap, dev.children, node_uuid)
1180

    
1181
    return ret
1182

    
1183
  def FindDisk(self, idx):
1184
    """Find a disk given having a specified index.
1185

1186
    This is just a wrapper that does validation of the index.
1187

1188
    @type idx: int
1189
    @param idx: the disk index
1190
    @rtype: L{Disk}
1191
    @return: the corresponding disk
1192
    @raise errors.OpPrereqError: when the given index is not valid
1193

1194
    """
1195
    try:
1196
      idx = int(idx)
1197
      return self.disks[idx]
1198
    except (TypeError, ValueError), err:
1199
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1200
                                 errors.ECODE_INVAL)
1201
    except IndexError:
1202
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1203
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1204
                                 errors.ECODE_INVAL)
1205

    
1206
  def ToDict(self):
1207
    """Instance-specific conversion to standard python types.
1208

1209
    This replaces the children lists of objects with lists of standard
1210
    python types.
1211

1212
    """
1213
    bo = super(Instance, self).ToDict()
1214

    
1215
    for attr in "nics", "disks":
1216
      alist = bo.get(attr, None)
1217
      if alist:
1218
        nlist = outils.ContainerToDicts(alist)
1219
      else:
1220
        nlist = []
1221
      bo[attr] = nlist
1222
    return bo
1223

    
1224
  @classmethod
1225
  def FromDict(cls, val):
1226
    """Custom function for instances.
1227

1228
    """
1229
    if "admin_state" not in val:
1230
      if val.get("admin_up", False):
1231
        val["admin_state"] = constants.ADMINST_UP
1232
      else:
1233
        val["admin_state"] = constants.ADMINST_DOWN
1234
    if "admin_up" in val:
1235
      del val["admin_up"]
1236
    obj = super(Instance, cls).FromDict(val)
1237
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1238
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1239
    return obj
1240

    
1241
  def UpgradeConfig(self):
1242
    """Fill defaults for missing configuration values.
1243

1244
    """
1245
    for nic in self.nics:
1246
      nic.UpgradeConfig()
1247
    for disk in self.disks:
1248
      disk.UpgradeConfig()
1249
    if self.hvparams:
1250
      for key in constants.HVC_GLOBALS:
1251
        try:
1252
          del self.hvparams[key]
1253
        except KeyError:
1254
          pass
1255
    if self.osparams is None:
1256
      self.osparams = {}
1257
    UpgradeBeParams(self.beparams)
1258
    if self.disks_active is None:
1259
      self.disks_active = self.admin_state == constants.ADMINST_UP
1260

    
1261

    
1262
class OS(ConfigObject):
1263
  """Config object representing an operating system.
1264

1265
  @type supported_parameters: list
1266
  @ivar supported_parameters: a list of tuples, name and description,
1267
      containing the supported parameters by this OS
1268

1269
  @type VARIANT_DELIM: string
1270
  @cvar VARIANT_DELIM: the variant delimiter
1271

1272
  """
1273
  __slots__ = [
1274
    "name",
1275
    "path",
1276
    "api_versions",
1277
    "create_script",
1278
    "export_script",
1279
    "import_script",
1280
    "rename_script",
1281
    "verify_script",
1282
    "supported_variants",
1283
    "supported_parameters",
1284
    ]
1285

    
1286
  VARIANT_DELIM = "+"
1287

    
1288
  @classmethod
1289
  def SplitNameVariant(cls, name):
1290
    """Splits the name into the proper name and variant.
1291

1292
    @param name: the OS (unprocessed) name
1293
    @rtype: list
1294
    @return: a list of two elements; if the original name didn't
1295
        contain a variant, it's returned as an empty string
1296

1297
    """
1298
    nv = name.split(cls.VARIANT_DELIM, 1)
1299
    if len(nv) == 1:
1300
      nv.append("")
1301
    return nv
1302

    
1303
  @classmethod
1304
  def GetName(cls, name):
1305
    """Returns the proper name of the os (without the variant).
1306

1307
    @param name: the OS (unprocessed) name
1308

1309
    """
1310
    return cls.SplitNameVariant(name)[0]
1311

    
1312
  @classmethod
1313
  def GetVariant(cls, name):
1314
    """Returns the variant the os (without the base name).
1315

1316
    @param name: the OS (unprocessed) name
1317

1318
    """
1319
    return cls.SplitNameVariant(name)[1]
1320

    
1321

    
1322
class ExtStorage(ConfigObject):
1323
  """Config object representing an External Storage Provider.
1324

1325
  """
1326
  __slots__ = [
1327
    "name",
1328
    "path",
1329
    "create_script",
1330
    "remove_script",
1331
    "grow_script",
1332
    "attach_script",
1333
    "detach_script",
1334
    "setinfo_script",
1335
    "verify_script",
1336
    "snapshot_script",
1337
    "supported_parameters",
1338
    ]
1339

    
1340

    
1341
class NodeHvState(ConfigObject):
1342
  """Hypvervisor state on a node.
1343

1344
  @ivar mem_total: Total amount of memory
1345
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1346
    available)
1347
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1348
    rounding
1349
  @ivar mem_inst: Memory used by instances living on node
1350
  @ivar cpu_total: Total node CPU core count
1351
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1352

1353
  """
1354
  __slots__ = [
1355
    "mem_total",
1356
    "mem_node",
1357
    "mem_hv",
1358
    "mem_inst",
1359
    "cpu_total",
1360
    "cpu_node",
1361
    ] + _TIMESTAMPS
1362

    
1363

    
1364
class NodeDiskState(ConfigObject):
1365
  """Disk state on a node.
1366

1367
  """
1368
  __slots__ = [
1369
    "total",
1370
    "reserved",
1371
    "overhead",
1372
    ] + _TIMESTAMPS
1373

    
1374

    
1375
class Node(TaggableObject):
1376
  """Config object representing a node.
1377

1378
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1379
  @ivar hv_state_static: Hypervisor state overriden by user
1380
  @ivar disk_state: Disk state (e.g. free space)
1381
  @ivar disk_state_static: Disk state overriden by user
1382

1383
  """
1384
  __slots__ = [
1385
    "name",
1386
    "primary_ip",
1387
    "secondary_ip",
1388
    "serial_no",
1389
    "master_candidate",
1390
    "offline",
1391
    "drained",
1392
    "group",
1393
    "master_capable",
1394
    "vm_capable",
1395
    "ndparams",
1396
    "powered",
1397
    "hv_state",
1398
    "hv_state_static",
1399
    "disk_state",
1400
    "disk_state_static",
1401
    ] + _TIMESTAMPS + _UUID
1402

    
1403
  def UpgradeConfig(self):
1404
    """Fill defaults for missing configuration values.
1405

1406
    """
1407
    # pylint: disable=E0203
1408
    # because these are "defined" via slots, not manually
1409
    if self.master_capable is None:
1410
      self.master_capable = True
1411

    
1412
    if self.vm_capable is None:
1413
      self.vm_capable = True
1414

    
1415
    if self.ndparams is None:
1416
      self.ndparams = {}
1417
    # And remove any global parameter
1418
    for key in constants.NDC_GLOBALS:
1419
      if key in self.ndparams:
1420
        logging.warning("Ignoring %s node parameter for node %s",
1421
                        key, self.name)
1422
        del self.ndparams[key]
1423

    
1424
    if self.powered is None:
1425
      self.powered = True
1426

    
1427
  def ToDict(self):
1428
    """Custom function for serializing.
1429

1430
    """
1431
    data = super(Node, self).ToDict()
1432

    
1433
    hv_state = data.get("hv_state", None)
1434
    if hv_state is not None:
1435
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1436

    
1437
    disk_state = data.get("disk_state", None)
1438
    if disk_state is not None:
1439
      data["disk_state"] = \
1440
        dict((key, outils.ContainerToDicts(value))
1441
             for (key, value) in disk_state.items())
1442

    
1443
    return data
1444

    
1445
  @classmethod
1446
  def FromDict(cls, val):
1447
    """Custom function for deserializing.
1448

1449
    """
1450
    obj = super(Node, cls).FromDict(val)
1451

    
1452
    if obj.hv_state is not None:
1453
      obj.hv_state = \
1454
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1455

    
1456
    if obj.disk_state is not None:
1457
      obj.disk_state = \
1458
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1459
             for (key, value) in obj.disk_state.items())
1460

    
1461
    return obj
1462

    
1463

    
1464
class NodeGroup(TaggableObject):
1465
  """Config object representing a node group."""
1466
  __slots__ = [
1467
    "name",
1468
    "members",
1469
    "ndparams",
1470
    "diskparams",
1471
    "ipolicy",
1472
    "serial_no",
1473
    "hv_state_static",
1474
    "disk_state_static",
1475
    "alloc_policy",
1476
    "networks",
1477
    ] + _TIMESTAMPS + _UUID
1478

    
1479
  def ToDict(self):
1480
    """Custom function for nodegroup.
1481

1482
    This discards the members object, which gets recalculated and is only kept
1483
    in memory.
1484

1485
    """
1486
    mydict = super(NodeGroup, self).ToDict()
1487
    del mydict["members"]
1488
    return mydict
1489

    
1490
  @classmethod
1491
  def FromDict(cls, val):
1492
    """Custom function for nodegroup.
1493

1494
    The members slot is initialized to an empty list, upon deserialization.
1495

1496
    """
1497
    obj = super(NodeGroup, cls).FromDict(val)
1498
    obj.members = []
1499
    return obj
1500

    
1501
  def UpgradeConfig(self):
1502
    """Fill defaults for missing configuration values.
1503

1504
    """
1505
    if self.ndparams is None:
1506
      self.ndparams = {}
1507

    
1508
    if self.serial_no is None:
1509
      self.serial_no = 1
1510

    
1511
    if self.alloc_policy is None:
1512
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1513

    
1514
    # We only update mtime, and not ctime, since we would not be able
1515
    # to provide a correct value for creation time.
1516
    if self.mtime is None:
1517
      self.mtime = time.time()
1518

    
1519
    if self.diskparams is None:
1520
      self.diskparams = {}
1521
    if self.ipolicy is None:
1522
      self.ipolicy = MakeEmptyIPolicy()
1523

    
1524
    if self.networks is None:
1525
      self.networks = {}
1526

    
1527
  def FillND(self, node):
1528
    """Return filled out ndparams for L{objects.Node}
1529

1530
    @type node: L{objects.Node}
1531
    @param node: A Node object to fill
1532
    @return a copy of the node's ndparams with defaults filled
1533

1534
    """
1535
    return self.SimpleFillND(node.ndparams)
1536

    
1537
  def SimpleFillND(self, ndparams):
1538
    """Fill a given ndparams dict with defaults.
1539

1540
    @type ndparams: dict
1541
    @param ndparams: the dict to fill
1542
    @rtype: dict
1543
    @return: a copy of the passed in ndparams with missing keys filled
1544
        from the node group defaults
1545

1546
    """
1547
    return FillDict(self.ndparams, ndparams)
1548

    
1549

    
1550
class Cluster(TaggableObject):
1551
  """Config object representing the cluster."""
1552
  __slots__ = [
1553
    "serial_no",
1554
    "rsahostkeypub",
1555
    "dsahostkeypub",
1556
    "highest_used_port",
1557
    "tcpudp_port_pool",
1558
    "mac_prefix",
1559
    "volume_group_name",
1560
    "reserved_lvs",
1561
    "drbd_usermode_helper",
1562
    "default_bridge",
1563
    "default_hypervisor",
1564
    "master_node",
1565
    "master_ip",
1566
    "master_netdev",
1567
    "master_netmask",
1568
    "use_external_mip_script",
1569
    "cluster_name",
1570
    "file_storage_dir",
1571
    "shared_file_storage_dir",
1572
    "enabled_hypervisors",
1573
    "hvparams",
1574
    "ipolicy",
1575
    "os_hvp",
1576
    "beparams",
1577
    "osparams",
1578
    "nicparams",
1579
    "ndparams",
1580
    "diskparams",
1581
    "candidate_pool_size",
1582
    "modify_etc_hosts",
1583
    "modify_ssh_setup",
1584
    "maintain_node_health",
1585
    "uid_pool",
1586
    "default_iallocator",
1587
    "hidden_os",
1588
    "blacklisted_os",
1589
    "primary_ip_family",
1590
    "prealloc_wipe_disks",
1591
    "hv_state_static",
1592
    "disk_state_static",
1593
    "enabled_disk_templates",
1594
    ] + _TIMESTAMPS + _UUID
1595

    
1596
  def UpgradeConfig(self):
1597
    """Fill defaults for missing configuration values.
1598

1599
    """
1600
    # pylint: disable=E0203
1601
    # because these are "defined" via slots, not manually
1602
    if self.hvparams is None:
1603
      self.hvparams = constants.HVC_DEFAULTS
1604
    else:
1605
      for hypervisor in constants.HYPER_TYPES:
1606
        try:
1607
          existing_params = self.hvparams[hypervisor]
1608
        except KeyError:
1609
          existing_params = {}
1610
        self.hvparams[hypervisor] = FillDict(
1611
            constants.HVC_DEFAULTS[hypervisor], existing_params)
1612

    
1613
    if self.os_hvp is None:
1614
      self.os_hvp = {}
1615

    
1616
    # osparams added before 2.2
1617
    if self.osparams is None:
1618
      self.osparams = {}
1619

    
1620
    self.ndparams = UpgradeNDParams(self.ndparams)
1621

    
1622
    self.beparams = UpgradeGroupedParams(self.beparams,
1623
                                         constants.BEC_DEFAULTS)
1624
    for beparams_group in self.beparams:
1625
      UpgradeBeParams(self.beparams[beparams_group])
1626

    
1627
    migrate_default_bridge = not self.nicparams
1628
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1629
                                          constants.NICC_DEFAULTS)
1630
    if migrate_default_bridge:
1631
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1632
        self.default_bridge
1633

    
1634
    if self.modify_etc_hosts is None:
1635
      self.modify_etc_hosts = True
1636

    
1637
    if self.modify_ssh_setup is None:
1638
      self.modify_ssh_setup = True
1639

    
1640
    # default_bridge is no longer used in 2.1. The slot is left there to
1641
    # support auto-upgrading. It can be removed once we decide to deprecate
1642
    # upgrading straight from 2.0.
1643
    if self.default_bridge is not None:
1644
      self.default_bridge = None
1645

    
1646
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1647
    # code can be removed once upgrading straight from 2.0 is deprecated.
1648
    if self.default_hypervisor is not None:
1649
      self.enabled_hypervisors = ([self.default_hypervisor] +
1650
                                  [hvname for hvname in self.enabled_hypervisors
1651
                                   if hvname != self.default_hypervisor])
1652
      self.default_hypervisor = None
1653

    
1654
    # maintain_node_health added after 2.1.1
1655
    if self.maintain_node_health is None:
1656
      self.maintain_node_health = False
1657

    
1658
    if self.uid_pool is None:
1659
      self.uid_pool = []
1660

    
1661
    if self.default_iallocator is None:
1662
      self.default_iallocator = ""
1663

    
1664
    # reserved_lvs added before 2.2
1665
    if self.reserved_lvs is None:
1666
      self.reserved_lvs = []
1667

    
1668
    # hidden and blacklisted operating systems added before 2.2.1
1669
    if self.hidden_os is None:
1670
      self.hidden_os = []
1671

    
1672
    if self.blacklisted_os is None:
1673
      self.blacklisted_os = []
1674

    
1675
    # primary_ip_family added before 2.3
1676
    if self.primary_ip_family is None:
1677
      self.primary_ip_family = AF_INET
1678

    
1679
    if self.master_netmask is None:
1680
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1681
      self.master_netmask = ipcls.iplen
1682

    
1683
    if self.prealloc_wipe_disks is None:
1684
      self.prealloc_wipe_disks = False
1685

    
1686
    # shared_file_storage_dir added before 2.5
1687
    if self.shared_file_storage_dir is None:
1688
      self.shared_file_storage_dir = ""
1689

    
1690
    if self.use_external_mip_script is None:
1691
      self.use_external_mip_script = False
1692

    
1693
    if self.diskparams:
1694
      self.diskparams = UpgradeDiskParams(self.diskparams)
1695
    else:
1696
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1697

    
1698
    # instance policy added before 2.6
1699
    if self.ipolicy is None:
1700
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1701
    else:
1702
      # we can either make sure to upgrade the ipolicy always, or only
1703
      # do it in some corner cases (e.g. missing keys); note that this
1704
      # will break any removal of keys from the ipolicy dict
1705
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1706
      if wrongkeys:
1707
        # These keys would be silently removed by FillIPolicy()
1708
        msg = ("Cluster instance policy contains spurious keys: %s" %
1709
               utils.CommaJoin(wrongkeys))
1710
        raise errors.ConfigurationError(msg)
1711
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1712

    
1713
  @property
1714
  def primary_hypervisor(self):
1715
    """The first hypervisor is the primary.
1716

1717
    Useful, for example, for L{Node}'s hv/disk state.
1718

1719
    """
1720
    return self.enabled_hypervisors[0]
1721

    
1722
  def ToDict(self):
1723
    """Custom function for cluster.
1724

1725
    """
1726
    mydict = super(Cluster, self).ToDict()
1727

    
1728
    if self.tcpudp_port_pool is None:
1729
      tcpudp_port_pool = []
1730
    else:
1731
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1732

    
1733
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1734

    
1735
    return mydict
1736

    
1737
  @classmethod
1738
  def FromDict(cls, val):
1739
    """Custom function for cluster.
1740

1741
    """
1742
    obj = super(Cluster, cls).FromDict(val)
1743

    
1744
    if obj.tcpudp_port_pool is None:
1745
      obj.tcpudp_port_pool = set()
1746
    elif not isinstance(obj.tcpudp_port_pool, set):
1747
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1748

    
1749
    return obj
1750

    
1751
  def SimpleFillDP(self, diskparams):
1752
    """Fill a given diskparams dict with cluster defaults.
1753

1754
    @param diskparams: The diskparams
1755
    @return: The defaults dict
1756

1757
    """
1758
    return FillDiskParams(self.diskparams, diskparams)
1759

    
1760
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1761
    """Get the default hypervisor parameters for the cluster.
1762

1763
    @param hypervisor: the hypervisor name
1764
    @param os_name: if specified, we'll also update the defaults for this OS
1765
    @param skip_keys: if passed, list of keys not to use
1766
    @return: the defaults dict
1767

1768
    """
1769
    if skip_keys is None:
1770
      skip_keys = []
1771

    
1772
    fill_stack = [self.hvparams.get(hypervisor, {})]
1773
    if os_name is not None:
1774
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1775
      fill_stack.append(os_hvp)
1776

    
1777
    ret_dict = {}
1778
    for o_dict in fill_stack:
1779
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1780

    
1781
    return ret_dict
1782

    
1783
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1784
    """Fill a given hvparams dict with cluster defaults.
1785

1786
    @type hv_name: string
1787
    @param hv_name: the hypervisor to use
1788
    @type os_name: string
1789
    @param os_name: the OS to use for overriding the hypervisor defaults
1790
    @type skip_globals: boolean
1791
    @param skip_globals: if True, the global hypervisor parameters will
1792
        not be filled
1793
    @rtype: dict
1794
    @return: a copy of the given hvparams with missing keys filled from
1795
        the cluster defaults
1796

1797
    """
1798
    if skip_globals:
1799
      skip_keys = constants.HVC_GLOBALS
1800
    else:
1801
      skip_keys = []
1802

    
1803
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1804
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1805

    
1806
  def FillHV(self, instance, skip_globals=False):
1807
    """Fill an instance's hvparams dict with cluster defaults.
1808

1809
    @type instance: L{objects.Instance}
1810
    @param instance: the instance parameter to fill
1811
    @type skip_globals: boolean
1812
    @param skip_globals: if True, the global hypervisor parameters will
1813
        not be filled
1814
    @rtype: dict
1815
    @return: a copy of the instance's hvparams with missing keys filled from
1816
        the cluster defaults
1817

1818
    """
1819
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1820
                             instance.hvparams, skip_globals)
1821

    
1822
  def SimpleFillBE(self, beparams):
1823
    """Fill a given beparams dict with cluster defaults.
1824

1825
    @type beparams: dict
1826
    @param beparams: the dict to fill
1827
    @rtype: dict
1828
    @return: a copy of the passed in beparams with missing keys filled
1829
        from the cluster defaults
1830

1831
    """
1832
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1833

    
1834
  def FillBE(self, instance):
1835
    """Fill an instance's beparams dict with cluster defaults.
1836

1837
    @type instance: L{objects.Instance}
1838
    @param instance: the instance parameter to fill
1839
    @rtype: dict
1840
    @return: a copy of the instance's beparams with missing keys filled from
1841
        the cluster defaults
1842

1843
    """
1844
    return self.SimpleFillBE(instance.beparams)
1845

    
1846
  def SimpleFillNIC(self, nicparams):
1847
    """Fill a given nicparams dict with cluster defaults.
1848

1849
    @type nicparams: dict
1850
    @param nicparams: the dict to fill
1851
    @rtype: dict
1852
    @return: a copy of the passed in nicparams with missing keys filled
1853
        from the cluster defaults
1854

1855
    """
1856
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1857

    
1858
  def SimpleFillOS(self, os_name, os_params):
1859
    """Fill an instance's osparams dict with cluster defaults.
1860

1861
    @type os_name: string
1862
    @param os_name: the OS name to use
1863
    @type os_params: dict
1864
    @param os_params: the dict to fill with default values
1865
    @rtype: dict
1866
    @return: a copy of the instance's osparams with missing keys filled from
1867
        the cluster defaults
1868

1869
    """
1870
    name_only = os_name.split("+", 1)[0]
1871
    # base OS
1872
    result = self.osparams.get(name_only, {})
1873
    # OS with variant
1874
    result = FillDict(result, self.osparams.get(os_name, {}))
1875
    # specified params
1876
    return FillDict(result, os_params)
1877

    
1878
  @staticmethod
1879
  def SimpleFillHvState(hv_state):
1880
    """Fill an hv_state sub dict with cluster defaults.
1881

1882
    """
1883
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1884

    
1885
  @staticmethod
1886
  def SimpleFillDiskState(disk_state):
1887
    """Fill an disk_state sub dict with cluster defaults.
1888

1889
    """
1890
    return FillDict(constants.DS_DEFAULTS, disk_state)
1891

    
1892
  def FillND(self, node, nodegroup):
1893
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1894

1895
    @type node: L{objects.Node}
1896
    @param node: A Node object to fill
1897
    @type nodegroup: L{objects.NodeGroup}
1898
    @param nodegroup: A Node object to fill
1899
    @return a copy of the node's ndparams with defaults filled
1900

1901
    """
1902
    return self.SimpleFillND(nodegroup.FillND(node))
1903

    
1904
  def SimpleFillND(self, ndparams):
1905
    """Fill a given ndparams dict with defaults.
1906

1907
    @type ndparams: dict
1908
    @param ndparams: the dict to fill
1909
    @rtype: dict
1910
    @return: a copy of the passed in ndparams with missing keys filled
1911
        from the cluster defaults
1912

1913
    """
1914
    return FillDict(self.ndparams, ndparams)
1915

    
1916
  def SimpleFillIPolicy(self, ipolicy):
1917
    """ Fill instance policy dict with defaults.
1918

1919
    @type ipolicy: dict
1920
    @param ipolicy: the dict to fill
1921
    @rtype: dict
1922
    @return: a copy of passed ipolicy with missing keys filled from
1923
      the cluster defaults
1924

1925
    """
1926
    return FillIPolicy(self.ipolicy, ipolicy)
1927

    
1928
  def IsDiskTemplateEnabled(self, disk_template):
1929
    """Checks if a particular disk template is enabled.
1930

1931
    """
1932
    return utils.storage.IsDiskTemplateEnabled(
1933
        disk_template, self.enabled_disk_templates)
1934

    
1935
  def IsFileStorageEnabled(self):
1936
    """Checks if file storage is enabled.
1937

1938
    """
1939
    return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
1940

    
1941
  def IsSharedFileStorageEnabled(self):
1942
    """Checks if shared file storage is enabled.
1943

1944
    """
1945
    return utils.storage.IsSharedFileStorageEnabled(
1946
        self.enabled_disk_templates)
1947

    
1948

    
1949
class BlockDevStatus(ConfigObject):
1950
  """Config object representing the status of a block device."""
1951
  __slots__ = [
1952
    "dev_path",
1953
    "major",
1954
    "minor",
1955
    "sync_percent",
1956
    "estimated_time",
1957
    "is_degraded",
1958
    "ldisk_status",
1959
    ]
1960

    
1961

    
1962
class ImportExportStatus(ConfigObject):
1963
  """Config object representing the status of an import or export."""
1964
  __slots__ = [
1965
    "recent_output",
1966
    "listen_port",
1967
    "connected",
1968
    "progress_mbytes",
1969
    "progress_throughput",
1970
    "progress_eta",
1971
    "progress_percent",
1972
    "exit_status",
1973
    "error_message",
1974
    ] + _TIMESTAMPS
1975

    
1976

    
1977
class ImportExportOptions(ConfigObject):
1978
  """Options for import/export daemon
1979

1980
  @ivar key_name: X509 key name (None for cluster certificate)
1981
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1982
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1983
  @ivar magic: Used to ensure the connection goes to the right disk
1984
  @ivar ipv6: Whether to use IPv6
1985
  @ivar connect_timeout: Number of seconds for establishing connection
1986

1987
  """
1988
  __slots__ = [
1989
    "key_name",
1990
    "ca_pem",
1991
    "compress",
1992
    "magic",
1993
    "ipv6",
1994
    "connect_timeout",
1995
    ]
1996

    
1997

    
1998
class ConfdRequest(ConfigObject):
1999
  """Object holding a confd request.
2000

2001
  @ivar protocol: confd protocol version
2002
  @ivar type: confd query type
2003
  @ivar query: query request
2004
  @ivar rsalt: requested reply salt
2005

2006
  """
2007
  __slots__ = [
2008
    "protocol",
2009
    "type",
2010
    "query",
2011
    "rsalt",
2012
    ]
2013

    
2014

    
2015
class ConfdReply(ConfigObject):
2016
  """Object holding a confd reply.
2017

2018
  @ivar protocol: confd protocol version
2019
  @ivar status: reply status code (ok, error)
2020
  @ivar answer: confd query reply
2021
  @ivar serial: configuration serial number
2022

2023
  """
2024
  __slots__ = [
2025
    "protocol",
2026
    "status",
2027
    "answer",
2028
    "serial",
2029
    ]
2030

    
2031

    
2032
class QueryFieldDefinition(ConfigObject):
2033
  """Object holding a query field definition.
2034

2035
  @ivar name: Field name
2036
  @ivar title: Human-readable title
2037
  @ivar kind: Field type
2038
  @ivar doc: Human-readable description
2039

2040
  """
2041
  __slots__ = [
2042
    "name",
2043
    "title",
2044
    "kind",
2045
    "doc",
2046
    ]
2047

    
2048

    
2049
class _QueryResponseBase(ConfigObject):
2050
  __slots__ = [
2051
    "fields",
2052
    ]
2053

    
2054
  def ToDict(self):
2055
    """Custom function for serializing.
2056

2057
    """
2058
    mydict = super(_QueryResponseBase, self).ToDict()
2059
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2060
    return mydict
2061

    
2062
  @classmethod
2063
  def FromDict(cls, val):
2064
    """Custom function for de-serializing.
2065

2066
    """
2067
    obj = super(_QueryResponseBase, cls).FromDict(val)
2068
    obj.fields = \
2069
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2070
    return obj
2071

    
2072

    
2073
class QueryResponse(_QueryResponseBase):
2074
  """Object holding the response to a query.
2075

2076
  @ivar fields: List of L{QueryFieldDefinition} objects
2077
  @ivar data: Requested data
2078

2079
  """
2080
  __slots__ = [
2081
    "data",
2082
    ]
2083

    
2084

    
2085
class QueryFieldsRequest(ConfigObject):
2086
  """Object holding a request for querying available fields.
2087

2088
  """
2089
  __slots__ = [
2090
    "what",
2091
    "fields",
2092
    ]
2093

    
2094

    
2095
class QueryFieldsResponse(_QueryResponseBase):
2096
  """Object holding the response to a query for fields.
2097

2098
  @ivar fields: List of L{QueryFieldDefinition} objects
2099

2100
  """
2101
  __slots__ = []
2102

    
2103

    
2104
class MigrationStatus(ConfigObject):
2105
  """Object holding the status of a migration.
2106

2107
  """
2108
  __slots__ = [
2109
    "status",
2110
    "transferred_ram",
2111
    "total_ram",
2112
    ]
2113

    
2114

    
2115
class InstanceConsole(ConfigObject):
2116
  """Object describing how to access the console of an instance.
2117

2118
  """
2119
  __slots__ = [
2120
    "instance",
2121
    "kind",
2122
    "message",
2123
    "host",
2124
    "port",
2125
    "user",
2126
    "command",
2127
    "display",
2128
    ]
2129

    
2130
  def Validate(self):
2131
    """Validates contents of this object.
2132

2133
    """
2134
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2135
    assert self.instance, "Missing instance name"
2136
    assert self.message or self.kind in [constants.CONS_SSH,
2137
                                         constants.CONS_SPICE,
2138
                                         constants.CONS_VNC]
2139
    assert self.host or self.kind == constants.CONS_MESSAGE
2140
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2141
                                      constants.CONS_SSH]
2142
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2143
                                      constants.CONS_SPICE,
2144
                                      constants.CONS_VNC]
2145
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2146
                                         constants.CONS_SPICE,
2147
                                         constants.CONS_VNC]
2148
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2149
                                         constants.CONS_SPICE,
2150
                                         constants.CONS_SSH]
2151
    return True
2152

    
2153

    
2154
class Network(TaggableObject):
2155
  """Object representing a network definition for ganeti.
2156

2157
  """
2158
  __slots__ = [
2159
    "name",
2160
    "serial_no",
2161
    "mac_prefix",
2162
    "network",
2163
    "network6",
2164
    "gateway",
2165
    "gateway6",
2166
    "reservations",
2167
    "ext_reservations",
2168
    ] + _TIMESTAMPS + _UUID
2169

    
2170
  def HooksDict(self, prefix=""):
2171
    """Export a dictionary used by hooks with a network's information.
2172

2173
    @type prefix: String
2174
    @param prefix: Prefix to prepend to the dict entries
2175

2176
    """
2177
    result = {
2178
      "%sNETWORK_NAME" % prefix: self.name,
2179
      "%sNETWORK_UUID" % prefix: self.uuid,
2180
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2181
    }
2182
    if self.network:
2183
      result["%sNETWORK_SUBNET" % prefix] = self.network
2184
    if self.gateway:
2185
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2186
    if self.network6:
2187
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2188
    if self.gateway6:
2189
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2190
    if self.mac_prefix:
2191
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2192

    
2193
    return result
2194

    
2195
  @classmethod
2196
  def FromDict(cls, val):
2197
    """Custom function for networks.
2198

2199
    Remove deprecated network_type and family.
2200

2201
    """
2202
    if "network_type" in val:
2203
      del val["network_type"]
2204
    if "family" in val:
2205
      del val["family"]
2206
    obj = super(Network, cls).FromDict(val)
2207
    return obj
2208

    
2209

    
2210
# need to inherit object in order to use super()
2211
class SerializableConfigParser(ConfigParser.SafeConfigParser, object):
2212
  """Simple wrapper over ConfigParse that allows serialization.
2213

2214
  This class is basically ConfigParser.SafeConfigParser with two
2215
  additional methods that allow it to serialize/unserialize to/from a
2216
  buffer.
2217

2218
  """
2219
  def Dumps(self):
2220
    """Dump this instance and return the string representation."""
2221
    buf = StringIO()
2222
    self.write(buf)
2223
    return buf.getvalue()
2224

    
2225
  @classmethod
2226
  def Loads(cls, data):
2227
    """Load data from a string."""
2228
    buf = StringIO(data)
2229
    cfp = cls()
2230
    cfp.readfp(buf)
2231
    return cfp
2232

    
2233
  def get(self, section, option, **kwargs):
2234
    value = None
2235
    try:
2236
      value = super(SerializableConfigParser, self).get(section, option,
2237
                                                        **kwargs)
2238
      if value.lower() == constants.VALUE_NONE:
2239
        value = None
2240
    except ConfigParser.NoOptionError:
2241
      r = re.compile(r"(disk|nic)\d+_name|nic\d+_(network|vlan)")
2242
      match = r.match(option)
2243
      if match:
2244
        pass
2245
      else:
2246
        raise
2247

    
2248
    return value
2249

    
2250

    
2251
class LvmPvInfo(ConfigObject):
2252
  """Information about an LVM physical volume (PV).
2253

2254
  @type name: string
2255
  @ivar name: name of the PV
2256
  @type vg_name: string
2257
  @ivar vg_name: name of the volume group containing the PV
2258
  @type size: float
2259
  @ivar size: size of the PV in MiB
2260
  @type free: float
2261
  @ivar free: free space in the PV, in MiB
2262
  @type attributes: string
2263
  @ivar attributes: PV attributes
2264
  @type lv_list: list of strings
2265
  @ivar lv_list: names of the LVs hosted on the PV
2266
  """
2267
  __slots__ = [
2268
    "name",
2269
    "vg_name",
2270
    "size",
2271
    "free",
2272
    "attributes",
2273
    "lv_list"
2274
    ]
2275

    
2276
  def IsEmpty(self):
2277
    """Is this PV empty?
2278

2279
    """
2280
    return self.size <= (self.free + 1)
2281

    
2282
  def IsAllocatable(self):
2283
    """Is this PV allocatable?
2284

2285
    """
2286
    return ("a" in self.attributes)