Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 0c3d9c7c

History | View | Annotate | Download (65.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def FillIPolicy(default_ipolicy, custom_ipolicy):
86
  """Fills an instance policy with defaults.
87

88
  """
89
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90
  ret_dict = copy.deepcopy(custom_ipolicy)
91
  for key in default_ipolicy:
92
    if key not in ret_dict:
93
      ret_dict[key] = copy.deepcopy(default_ipolicy[key])
94
    elif key == constants.ISPECS_STD:
95
      ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
96
  return ret_dict
97

    
98

    
99
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
100
  """Fills the disk parameter defaults.
101

102
  @see: L{FillDict} for parameters and return value
103

104
  """
105
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
106

    
107
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
108
                             skip_keys=skip_keys))
109
              for dt in constants.DISK_TEMPLATES)
110

    
111

    
112
def UpgradeGroupedParams(target, defaults):
113
  """Update all groups for the target parameter.
114

115
  @type target: dict of dicts
116
  @param target: {group: {parameter: value}}
117
  @type defaults: dict
118
  @param defaults: default parameter values
119

120
  """
121
  if target is None:
122
    target = {constants.PP_DEFAULT: defaults}
123
  else:
124
    for group in target:
125
      target[group] = FillDict(defaults, target[group])
126
  return target
127

    
128

    
129
def UpgradeBeParams(target):
130
  """Update the be parameters dict to the new format.
131

132
  @type target: dict
133
  @param target: "be" parameters dict
134

135
  """
136
  if constants.BE_MEMORY in target:
137
    memory = target[constants.BE_MEMORY]
138
    target[constants.BE_MAXMEM] = memory
139
    target[constants.BE_MINMEM] = memory
140
    del target[constants.BE_MEMORY]
141

    
142

    
143
def UpgradeDiskParams(diskparams):
144
  """Upgrade the disk parameters.
145

146
  @type diskparams: dict
147
  @param diskparams: disk parameters to upgrade
148
  @rtype: dict
149
  @return: the upgraded disk parameters dict
150

151
  """
152
  if not diskparams:
153
    result = {}
154
  else:
155
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
156

    
157
  return result
158

    
159

    
160
def UpgradeNDParams(ndparams):
161
  """Upgrade ndparams structure.
162

163
  @type ndparams: dict
164
  @param ndparams: disk parameters to upgrade
165
  @rtype: dict
166
  @return: the upgraded node parameters dict
167

168
  """
169
  if ndparams is None:
170
    ndparams = {}
171

    
172
  if (constants.ND_OOB_PROGRAM in ndparams and
173
      ndparams[constants.ND_OOB_PROGRAM] is None):
174
    # will be reset by the line below
175
    del ndparams[constants.ND_OOB_PROGRAM]
176
  return FillDict(constants.NDC_DEFAULTS, ndparams)
177

    
178

    
179
def MakeEmptyIPolicy():
180
  """Create empty IPolicy dictionary.
181

182
  """
183
  return {}
184

    
185

    
186
class ConfigObject(outils.ValidatedSlots):
187
  """A generic config object.
188

189
  It has the following properties:
190

191
    - provides somewhat safe recursive unpickling and pickling for its classes
192
    - unset attributes which are defined in slots are always returned
193
      as None instead of raising an error
194

195
  Classes derived from this must always declare __slots__ (we use many
196
  config objects and the memory reduction is useful)
197

198
  """
199
  __slots__ = []
200

    
201
  def __getattr__(self, name):
202
    if name not in self.GetAllSlots():
203
      raise AttributeError("Invalid object attribute %s.%s" %
204
                           (type(self).__name__, name))
205
    return None
206

    
207
  def __setstate__(self, state):
208
    slots = self.GetAllSlots()
209
    for name in state:
210
      if name in slots:
211
        setattr(self, name, state[name])
212

    
213
  def Validate(self):
214
    """Validates the slots.
215

216
    """
217

    
218
  def ToDict(self):
219
    """Convert to a dict holding only standard python types.
220

221
    The generic routine just dumps all of this object's attributes in
222
    a dict. It does not work if the class has children who are
223
    ConfigObjects themselves (e.g. the nics list in an Instance), in
224
    which case the object should subclass the function in order to
225
    make sure all objects returned are only standard python types.
226

227
    """
228
    result = {}
229
    for name in self.GetAllSlots():
230
      value = getattr(self, name, None)
231
      if value is not None:
232
        result[name] = value
233
    return result
234

    
235
  __getstate__ = ToDict
236

    
237
  @classmethod
238
  def FromDict(cls, val):
239
    """Create an object from a dictionary.
240

241
    This generic routine takes a dict, instantiates a new instance of
242
    the given class, and sets attributes based on the dict content.
243

244
    As for `ToDict`, this does not work if the class has children
245
    who are ConfigObjects themselves (e.g. the nics list in an
246
    Instance), in which case the object should subclass the function
247
    and alter the objects.
248

249
    """
250
    if not isinstance(val, dict):
251
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
252
                                      " expected dict, got %s" % type(val))
253
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
254
    obj = cls(**val_str) # pylint: disable=W0142
255
    return obj
256

    
257
  def Copy(self):
258
    """Makes a deep copy of the current object and its children.
259

260
    """
261
    dict_form = self.ToDict()
262
    clone_obj = self.__class__.FromDict(dict_form)
263
    return clone_obj
264

    
265
  def __repr__(self):
266
    """Implement __repr__ for ConfigObjects."""
267
    return repr(self.ToDict())
268

    
269
  def __eq__(self, other):
270
    """Implement __eq__ for ConfigObjects."""
271
    return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
272

    
273
  def UpgradeConfig(self):
274
    """Fill defaults for missing configuration values.
275

276
    This method will be called at configuration load time, and its
277
    implementation will be object dependent.
278

279
    """
280
    pass
281

    
282

    
283
class TaggableObject(ConfigObject):
284
  """An generic class supporting tags.
285

286
  """
287
  __slots__ = ["tags"]
288
  VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$")
289

    
290
  @classmethod
291
  def ValidateTag(cls, tag):
292
    """Check if a tag is valid.
293

294
    If the tag is invalid, an errors.TagError will be raised. The
295
    function has no return value.
296

297
    """
298
    if not isinstance(tag, basestring):
299
      raise errors.TagError("Invalid tag type (not a string)")
300
    if len(tag) > constants.MAX_TAG_LEN:
301
      raise errors.TagError("Tag too long (>%d characters)" %
302
                            constants.MAX_TAG_LEN)
303
    if not tag:
304
      raise errors.TagError("Tags cannot be empty")
305
    if not cls.VALID_TAG_RE.match(tag):
306
      raise errors.TagError("Tag contains invalid characters")
307

    
308
  def GetTags(self):
309
    """Return the tags list.
310

311
    """
312
    tags = getattr(self, "tags", None)
313
    if tags is None:
314
      tags = self.tags = set()
315
    return tags
316

    
317
  def AddTag(self, tag):
318
    """Add a new tag.
319

320
    """
321
    self.ValidateTag(tag)
322
    tags = self.GetTags()
323
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
324
      raise errors.TagError("Too many tags")
325
    self.GetTags().add(tag)
326

    
327
  def RemoveTag(self, tag):
328
    """Remove a tag.
329

330
    """
331
    self.ValidateTag(tag)
332
    tags = self.GetTags()
333
    try:
334
      tags.remove(tag)
335
    except KeyError:
336
      raise errors.TagError("Tag not found")
337

    
338
  def ToDict(self):
339
    """Taggable-object-specific conversion to standard python types.
340

341
    This replaces the tags set with a list.
342

343
    """
344
    bo = super(TaggableObject, self).ToDict()
345

    
346
    tags = bo.get("tags", None)
347
    if isinstance(tags, set):
348
      bo["tags"] = list(tags)
349
    return bo
350

    
351
  @classmethod
352
  def FromDict(cls, val):
353
    """Custom function for instances.
354

355
    """
356
    obj = super(TaggableObject, cls).FromDict(val)
357
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
358
      obj.tags = set(obj.tags)
359
    return obj
360

    
361

    
362
class MasterNetworkParameters(ConfigObject):
363
  """Network configuration parameters for the master
364

365
  @ivar uuid: master nodes UUID
366
  @ivar ip: master IP
367
  @ivar netmask: master netmask
368
  @ivar netdev: master network device
369
  @ivar ip_family: master IP family
370

371
  """
372
  __slots__ = [
373
    "uuid",
374
    "ip",
375
    "netmask",
376
    "netdev",
377
    "ip_family",
378
    ]
379

    
380

    
381
class ConfigData(ConfigObject):
382
  """Top-level config object."""
383
  __slots__ = [
384
    "version",
385
    "cluster",
386
    "nodes",
387
    "nodegroups",
388
    "instances",
389
    "networks",
390
    "serial_no",
391
    ] + _TIMESTAMPS
392

    
393
  def ToDict(self):
394
    """Custom function for top-level config data.
395

396
    This just replaces the list of instances, nodes and the cluster
397
    with standard python types.
398

399
    """
400
    mydict = super(ConfigData, self).ToDict()
401
    mydict["cluster"] = mydict["cluster"].ToDict()
402
    for key in "nodes", "instances", "nodegroups", "networks":
403
      mydict[key] = outils.ContainerToDicts(mydict[key])
404

    
405
    return mydict
406

    
407
  @classmethod
408
  def FromDict(cls, val):
409
    """Custom function for top-level config data
410

411
    """
412
    obj = super(ConfigData, cls).FromDict(val)
413
    obj.cluster = Cluster.FromDict(obj.cluster)
414
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
415
    obj.instances = \
416
      outils.ContainerFromDicts(obj.instances, dict, Instance)
417
    obj.nodegroups = \
418
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
419
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
420
    return obj
421

    
422
  def HasAnyDiskOfType(self, dev_type):
423
    """Check if in there is at disk of the given type in the configuration.
424

425
    @type dev_type: L{constants.DTS_BLOCK}
426
    @param dev_type: the type to look for
427
    @rtype: boolean
428
    @return: boolean indicating if a disk of the given type was found or not
429

430
    """
431
    for instance in self.instances.values():
432
      for disk in instance.disks:
433
        if disk.IsBasedOnDiskType(dev_type):
434
          return True
435
    return False
436

    
437
  def UpgradeConfig(self):
438
    """Fill defaults for missing configuration values.
439

440
    """
441
    self.cluster.UpgradeConfig()
442
    for node in self.nodes.values():
443
      node.UpgradeConfig()
444
    for instance in self.instances.values():
445
      instance.UpgradeConfig()
446
    if self.nodegroups is None:
447
      self.nodegroups = {}
448
    for nodegroup in self.nodegroups.values():
449
      nodegroup.UpgradeConfig()
450
    if self.cluster.drbd_usermode_helper is None:
451
      if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
452
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
453
    if self.networks is None:
454
      self.networks = {}
455
    for network in self.networks.values():
456
      network.UpgradeConfig()
457
    self._UpgradeEnabledDiskTemplates()
458

    
459
  def _UpgradeEnabledDiskTemplates(self):
460
    """Upgrade the cluster's enabled disk templates by inspecting the currently
461
       enabled and/or used disk templates.
462

463
    """
464
    # enabled_disk_templates in the cluster config were introduced in 2.8.
465
    # Remove this code once upgrading from earlier versions is deprecated.
466
    if not self.cluster.enabled_disk_templates:
467
      template_set = \
468
        set([inst.disk_template for inst in self.instances.values()])
469
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
470
      if self.cluster.volume_group_name:
471
        template_set.add(constants.DT_DRBD8)
472
        template_set.add(constants.DT_PLAIN)
473
      # Set enabled_disk_templates to the inferred disk templates. Order them
474
      # according to a preference list that is based on Ganeti's history of
475
      # supported disk templates.
476
      self.cluster.enabled_disk_templates = []
477
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
478
        if preferred_template in template_set:
479
          self.cluster.enabled_disk_templates.append(preferred_template)
480
          template_set.remove(preferred_template)
481
      self.cluster.enabled_disk_templates.extend(list(template_set))
482

    
483

    
484
class NIC(ConfigObject):
485
  """Config object representing a network card."""
486
  __slots__ = ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID
487

    
488
  @classmethod
489
  def CheckParameterSyntax(cls, nicparams):
490
    """Check the given parameters for validity.
491

492
    @type nicparams:  dict
493
    @param nicparams: dictionary with parameter names/value
494
    @raise errors.ConfigurationError: when a parameter is not valid
495

496
    """
497
    mode = nicparams[constants.NIC_MODE]
498
    if (mode not in constants.NIC_VALID_MODES and
499
        mode != constants.VALUE_AUTO):
500
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
501

    
502
    if (mode == constants.NIC_MODE_BRIDGED and
503
        not nicparams[constants.NIC_LINK]):
504
      raise errors.ConfigurationError("Missing bridged NIC link")
505

    
506

    
507
class Disk(ConfigObject):
508
  """Config object representing a block device."""
509
  __slots__ = (["name", "dev_type", "logical_id", "physical_id", "children", "iv_name",
510
                "size", "mode", "params", "spindles"] + _UUID +
511
               # dynamic_params is special. It depends on the node this instance
512
               # is sent to, and should not be persisted.
513
               ["dynamic_params"])
514

    
515
  def CreateOnSecondary(self):
516
    """Test if this device needs to be created on a secondary node."""
517
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
518

    
519
  def AssembleOnSecondary(self):
520
    """Test if this device needs to be assembled on a secondary node."""
521
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
522

    
523
  def OpenOnSecondary(self):
524
    """Test if this device needs to be opened on a secondary node."""
525
    return self.dev_type in (constants.DT_PLAIN,)
526

    
527
  def StaticDevPath(self):
528
    """Return the device path if this device type has a static one.
529

530
    Some devices (LVM for example) live always at the same /dev/ path,
531
    irrespective of their status. For such devices, we return this
532
    path, for others we return None.
533

534
    @warning: The path returned is not a normalized pathname; callers
535
        should check that it is a valid path.
536

537
    """
538
    if self.dev_type == constants.DT_PLAIN:
539
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
540
    elif self.dev_type == constants.DT_BLOCK:
541
      return self.logical_id[1]
542
    elif self.dev_type == constants.DT_RBD:
543
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
544
    return None
545

    
546
  def ChildrenNeeded(self):
547
    """Compute the needed number of children for activation.
548

549
    This method will return either -1 (all children) or a positive
550
    number denoting the minimum number of children needed for
551
    activation (only mirrored devices will usually return >=0).
552

553
    Currently, only DRBD8 supports diskless activation (therefore we
554
    return 0), for all other we keep the previous semantics and return
555
    -1.
556

557
    """
558
    if self.dev_type == constants.DT_DRBD8:
559
      return 0
560
    return -1
561

    
562
  def IsBasedOnDiskType(self, dev_type):
563
    """Check if the disk or its children are based on the given type.
564

565
    @type dev_type: L{constants.DTS_BLOCK}
566
    @param dev_type: the type to look for
567
    @rtype: boolean
568
    @return: boolean indicating if a device of the given type was found or not
569

570
    """
571
    if self.children:
572
      for child in self.children:
573
        if child.IsBasedOnDiskType(dev_type):
574
          return True
575
    return self.dev_type == dev_type
576

    
577
  def GetNodes(self, node_uuid):
578
    """This function returns the nodes this device lives on.
579

580
    Given the node on which the parent of the device lives on (or, in
581
    case of a top-level device, the primary node of the devices'
582
    instance), this function will return a list of nodes on which this
583
    devices needs to (or can) be assembled.
584

585
    """
586
    if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
587
                         constants.DT_BLOCK, constants.DT_RBD,
588
                         constants.DT_EXT, constants.DT_SHARED_FILE]:
589
      result = [node_uuid]
590
    elif self.dev_type in constants.LDS_DRBD:
591
      result = [self.logical_id[0], self.logical_id[1]]
592
      if node_uuid not in result:
593
        raise errors.ConfigurationError("DRBD device passed unknown node")
594
    else:
595
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
596
    return result
597

    
598
  def ComputeNodeTree(self, parent_node_uuid):
599
    """Compute the node/disk tree for this disk and its children.
600

601
    This method, given the node on which the parent disk lives, will
602
    return the list of all (node UUID, disk) pairs which describe the disk
603
    tree in the most compact way. For example, a drbd/lvm stack
604
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
605
    which represents all the top-level devices on the nodes.
606

607
    """
608
    my_nodes = self.GetNodes(parent_node_uuid)
609
    result = [(node, self) for node in my_nodes]
610
    if not self.children:
611
      # leaf device
612
      return result
613
    for node in my_nodes:
614
      for child in self.children:
615
        child_result = child.ComputeNodeTree(node)
616
        if len(child_result) == 1:
617
          # child (and all its descendants) is simple, doesn't split
618
          # over multiple hosts, so we don't need to describe it, our
619
          # own entry for this node describes it completely
620
          continue
621
        else:
622
          # check if child nodes differ from my nodes; note that
623
          # subdisk can differ from the child itself, and be instead
624
          # one of its descendants
625
          for subnode, subdisk in child_result:
626
            if subnode not in my_nodes:
627
              result.append((subnode, subdisk))
628
            # otherwise child is under our own node, so we ignore this
629
            # entry (but probably the other results in the list will
630
            # be different)
631
    return result
632

    
633
  def ComputeGrowth(self, amount):
634
    """Compute the per-VG growth requirements.
635

636
    This only works for VG-based disks.
637

638
    @type amount: integer
639
    @param amount: the desired increase in (user-visible) disk space
640
    @rtype: dict
641
    @return: a dictionary of volume-groups and the required size
642

643
    """
644
    if self.dev_type == constants.DT_PLAIN:
645
      return {self.logical_id[0]: amount}
646
    elif self.dev_type == constants.DT_DRBD8:
647
      if self.children:
648
        return self.children[0].ComputeGrowth(amount)
649
      else:
650
        return {}
651
    else:
652
      # Other disk types do not require VG space
653
      return {}
654

    
655
  def RecordGrow(self, amount):
656
    """Update the size of this disk after growth.
657

658
    This method recurses over the disks's children and updates their
659
    size correspondigly. The method needs to be kept in sync with the
660
    actual algorithms from bdev.
661

662
    """
663
    if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE,
664
                         constants.DT_RBD, constants.DT_EXT,
665
                         constants.DT_SHARED_FILE):
666
      self.size += amount
667
    elif self.dev_type == constants.DT_DRBD8:
668
      if self.children:
669
        self.children[0].RecordGrow(amount)
670
      self.size += amount
671
    else:
672
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
673
                                   " disk type %s" % self.dev_type)
674

    
675
  def Update(self, size=None, mode=None, spindles=None):
676
    """Apply changes to size, spindles and mode.
677

678
    """
679
    if self.dev_type == constants.DT_DRBD8:
680
      if self.children:
681
        self.children[0].Update(size=size, mode=mode)
682
    else:
683
      assert not self.children
684

    
685
    if size is not None:
686
      self.size = size
687
    if mode is not None:
688
      self.mode = mode
689
    if spindles is not None:
690
      self.spindles = spindles
691

    
692
  def UnsetSize(self):
693
    """Sets recursively the size to zero for the disk and its children.
694

695
    """
696
    if self.children:
697
      for child in self.children:
698
        child.UnsetSize()
699
    self.size = 0
700

    
701
  def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
702
    """Updates the dynamic disk params for the given node.
703

704
    This is mainly used for drbd, which needs ip/port configuration.
705

706
    Arguments:
707
      - target_node_uuid: the node UUID we wish to configure for
708
      - nodes_ip: a mapping of node name to ip
709

710
    The target_node must exist in nodes_ip, and should be one of the
711
    nodes in the logical ID if this device is a DRBD device.
712

713
    """
714
    if self.children:
715
      for child in self.children:
716
        child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip)
717

    
718
    dyn_disk_params = {}
719
    if self.logical_id is not None and self.dev_type in constants.LDS_DRBD:
720
      pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id
721
      if target_node_uuid not in (pnode_uuid, snode_uuid):
722
        # disk object is being sent to neither the primary nor the secondary
723
        # node. reset the dynamic parameters, the target node is not
724
        # supposed to use them.
725
        self.dynamic_params = dyn_disk_params
726
        return
727

    
728
      pnode_ip = nodes_ip.get(pnode_uuid, None)
729
      snode_ip = nodes_ip.get(snode_uuid, None)
730
      if pnode_ip is None or snode_ip is None:
731
        raise errors.ConfigurationError("Can't find primary or secondary node"
732
                                        " for %s" % str(self))
733
      if pnode_uuid == target_node_uuid:
734
        dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip
735
        dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip
736
        dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor
737
        dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor
738
      else: # it must be secondary, we tested above
739
        dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip
740
        dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip
741
        dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor
742
        dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor
743

    
744
    self.dynamic_params = dyn_disk_params
745

    
746
  def ToDict(self):
747
    """Disk-specific conversion to standard python types.
748

749
    This replaces the children lists of objects with lists of
750
    standard python types.
751

752
    """
753
    bo = super(Disk, self).ToDict()
754

    
755
    for attr in ("children",):
756
      alist = bo.get(attr, None)
757
      if alist:
758
        bo[attr] = outils.ContainerToDicts(alist)
759
    return bo
760

    
761
  @classmethod
762
  def FromDict(cls, val):
763
    """Custom function for Disks
764

765
    """
766
    obj = super(Disk, cls).FromDict(val)
767
    if obj.children:
768
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
769
    if obj.logical_id and isinstance(obj.logical_id, list):
770
      obj.logical_id = tuple(obj.logical_id)
771
    if obj.physical_id and isinstance(obj.physical_id, list):
772
      obj.physical_id = tuple(obj.physical_id)
773
    if obj.dev_type in constants.LDS_DRBD:
774
      # we need a tuple of length six here
775
      if len(obj.logical_id) < 6:
776
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
777
    return obj
778

    
779
  def __str__(self):
780
    """Custom str() formatter for disks.
781

782
    """
783
    if self.dev_type == constants.DT_PLAIN:
784
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
785
    elif self.dev_type in constants.LDS_DRBD:
786
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
787
      val = "<DRBD8("
788
      if self.physical_id is None:
789
        phy = "unconfigured"
790
      else:
791
        phy = ("configured as %s:%s %s:%s" %
792
               (self.physical_id[0], self.physical_id[1],
793
                self.physical_id[2], self.physical_id[3]))
794

    
795
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
796
              (node_a, minor_a, node_b, minor_b, port, phy))
797
      if self.children and self.children.count(None) == 0:
798
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
799
      else:
800
        val += "no local storage"
801
    else:
802
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
803
             (self.dev_type, self.logical_id, self.physical_id, self.children))
804
    if self.iv_name is None:
805
      val += ", not visible"
806
    else:
807
      val += ", visible as /dev/%s" % self.iv_name
808
    if self.spindles is not None:
809
      val += ", spindles=%s" % self.spindles
810
    if isinstance(self.size, int):
811
      val += ", size=%dm)>" % self.size
812
    else:
813
      val += ", size='%s')>" % (self.size,)
814
    return val
815

    
816
  def Verify(self):
817
    """Checks that this disk is correctly configured.
818

819
    """
820
    all_errors = []
821
    if self.mode not in constants.DISK_ACCESS_SET:
822
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
823
    return all_errors
824

    
825
  def UpgradeConfig(self):
826
    """Fill defaults for missing configuration values.
827

828
    """
829
    if self.children:
830
      for child in self.children:
831
        child.UpgradeConfig()
832

    
833
    # FIXME: Make this configurable in Ganeti 2.7
834
    self.params = {}
835
    # add here config upgrade for this disk
836

    
837
    # map of legacy device types (mapping differing LD constants to new
838
    # DT constants)
839
    LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
840
    if self.dev_type in LEG_DEV_TYPE_MAP:
841
      self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
842

    
843
  @staticmethod
844
  def ComputeLDParams(disk_template, disk_params):
845
    """Computes Logical Disk parameters from Disk Template parameters.
846

847
    @type disk_template: string
848
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
849
    @type disk_params: dict
850
    @param disk_params: disk template parameters;
851
                        dict(template_name -> parameters
852
    @rtype: list(dict)
853
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
854
      contains the LD parameters of the node. The tree is flattened in-order.
855

856
    """
857
    if disk_template not in constants.DISK_TEMPLATES:
858
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
859

    
860
    assert disk_template in disk_params
861

    
862
    result = list()
863
    dt_params = disk_params[disk_template]
864
    if disk_template == constants.DT_DRBD8:
865
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], {
866
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
867
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
868
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
869
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
870
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
871
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
872
        constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
873
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
874
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
875
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
876
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
877
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
878
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
879
        }))
880

    
881
      # data LV
882
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
883
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
884
        }))
885

    
886
      # metadata LV
887
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
888
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
889
        }))
890

    
891
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
892
      result.append(constants.DISK_LD_DEFAULTS[disk_template])
893

    
894
    elif disk_template == constants.DT_PLAIN:
895
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
896
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
897
        }))
898

    
899
    elif disk_template == constants.DT_BLOCK:
900
      result.append(constants.DISK_LD_DEFAULTS[constants.DT_BLOCK])
901

    
902
    elif disk_template == constants.DT_RBD:
903
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_RBD], {
904
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
905
        }))
906

    
907
    elif disk_template == constants.DT_EXT:
908
      result.append(constants.DISK_LD_DEFAULTS[constants.DT_EXT])
909

    
910
    return result
911

    
912

    
913
class InstancePolicy(ConfigObject):
914
  """Config object representing instance policy limits dictionary.
915

916
  Note that this object is not actually used in the config, it's just
917
  used as a placeholder for a few functions.
918

919
  """
920
  @classmethod
921
  def CheckParameterSyntax(cls, ipolicy, check_std):
922
    """ Check the instance policy for validity.
923

924
    @type ipolicy: dict
925
    @param ipolicy: dictionary with min/max/std specs and policies
926
    @type check_std: bool
927
    @param check_std: Whether to check std value or just assume compliance
928
    @raise errors.ConfigurationError: when the policy is not legal
929

930
    """
931
    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
932
    if constants.IPOLICY_DTS in ipolicy:
933
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
934
    for key in constants.IPOLICY_PARAMETERS:
935
      if key in ipolicy:
936
        InstancePolicy.CheckParameter(key, ipolicy[key])
937
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
938
    if wrong_keys:
939
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
940
                                      utils.CommaJoin(wrong_keys))
941

    
942
  @classmethod
943
  def _CheckIncompleteSpec(cls, spec, keyname):
944
    missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
945
    if missing_params:
946
      msg = ("Missing instance specs parameters for %s: %s" %
947
             (keyname, utils.CommaJoin(missing_params)))
948
      raise errors.ConfigurationError(msg)
949

    
950
  @classmethod
951
  def CheckISpecSyntax(cls, ipolicy, check_std):
952
    """Check the instance policy specs for validity.
953

954
    @type ipolicy: dict
955
    @param ipolicy: dictionary with min/max/std specs
956
    @type check_std: bool
957
    @param check_std: Whether to check std value or just assume compliance
958
    @raise errors.ConfigurationError: when specs are not valid
959

960
    """
961
    if constants.ISPECS_MINMAX not in ipolicy:
962
      # Nothing to check
963
      return
964

    
965
    if check_std and constants.ISPECS_STD not in ipolicy:
966
      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
967
      raise errors.ConfigurationError(msg)
968
    stdspec = ipolicy.get(constants.ISPECS_STD)
969
    if check_std:
970
      InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
971

    
972
    if not ipolicy[constants.ISPECS_MINMAX]:
973
      raise errors.ConfigurationError("Empty minmax specifications")
974
    std_is_good = False
975
    for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
976
      missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
977
      if missing:
978
        msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
979
        raise errors.ConfigurationError(msg)
980
      for (key, spec) in minmaxspecs.items():
981
        InstancePolicy._CheckIncompleteSpec(spec, key)
982

    
983
      spec_std_ok = True
984
      for param in constants.ISPECS_PARAMETERS:
985
        par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
986
                                                           param, check_std)
987
        spec_std_ok = spec_std_ok and par_std_ok
988
      std_is_good = std_is_good or spec_std_ok
989
    if not std_is_good:
990
      raise errors.ConfigurationError("Invalid std specifications")
991

    
992
  @classmethod
993
  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
994
    """Check the instance policy specs for validity on a given key.
995

996
    We check if the instance specs makes sense for a given key, that is
997
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
998

999
    @type minmaxspecs: dict
1000
    @param minmaxspecs: dictionary with min and max instance spec
1001
    @type stdspec: dict
1002
    @param stdspec: dictionary with standard instance spec
1003
    @type name: string
1004
    @param name: what are the limits for
1005
    @type check_std: bool
1006
    @param check_std: Whether to check std value or just assume compliance
1007
    @rtype: bool
1008
    @return: C{True} when specs are valid, C{False} when standard spec for the
1009
        given name is not valid
1010
    @raise errors.ConfigurationError: when min/max specs for the given name
1011
        are not valid
1012

1013
    """
1014
    minspec = minmaxspecs[constants.ISPECS_MIN]
1015
    maxspec = minmaxspecs[constants.ISPECS_MAX]
1016
    min_v = minspec[name]
1017
    max_v = maxspec[name]
1018

    
1019
    if min_v > max_v:
1020
      err = ("Invalid specification of min/max values for %s: %s/%s" %
1021
             (name, min_v, max_v))
1022
      raise errors.ConfigurationError(err)
1023
    elif check_std:
1024
      std_v = stdspec.get(name, min_v)
1025
      return std_v >= min_v and std_v <= max_v
1026
    else:
1027
      return True
1028

    
1029
  @classmethod
1030
  def CheckDiskTemplates(cls, disk_templates):
1031
    """Checks the disk templates for validity.
1032

1033
    """
1034
    if not disk_templates:
1035
      raise errors.ConfigurationError("Instance policy must contain" +
1036
                                      " at least one disk template")
1037
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1038
    if wrong:
1039
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1040
                                      utils.CommaJoin(wrong))
1041

    
1042
  @classmethod
1043
  def CheckParameter(cls, key, value):
1044
    """Checks a parameter.
1045

1046
    Currently we expect all parameters to be float values.
1047

1048
    """
1049
    try:
1050
      float(value)
1051
    except (TypeError, ValueError), err:
1052
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1053
                                      " '%s', error: %s" % (key, value, err))
1054

    
1055

    
1056
class Instance(TaggableObject):
1057
  """Config object representing an instance."""
1058
  __slots__ = [
1059
    "name",
1060
    "primary_node",
1061
    "os",
1062
    "hypervisor",
1063
    "hvparams",
1064
    "beparams",
1065
    "osparams",
1066
    "admin_state",
1067
    "nics",
1068
    "disks",
1069
    "disk_template",
1070
    "disks_active",
1071
    "network_port",
1072
    "serial_no",
1073
    ] + _TIMESTAMPS + _UUID
1074

    
1075
  def _ComputeSecondaryNodes(self):
1076
    """Compute the list of secondary nodes.
1077

1078
    This is a simple wrapper over _ComputeAllNodes.
1079

1080
    """
1081
    all_nodes = set(self._ComputeAllNodes())
1082
    all_nodes.discard(self.primary_node)
1083
    return tuple(all_nodes)
1084

    
1085
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1086
                             "List of names of secondary nodes")
1087

    
1088
  def _ComputeAllNodes(self):
1089
    """Compute the list of all nodes.
1090

1091
    Since the data is already there (in the drbd disks), keeping it as
1092
    a separate normal attribute is redundant and if not properly
1093
    synchronised can cause problems. Thus it's better to compute it
1094
    dynamically.
1095

1096
    """
1097
    def _Helper(nodes, device):
1098
      """Recursively computes nodes given a top device."""
1099
      if device.dev_type in constants.LDS_DRBD:
1100
        nodea, nodeb = device.logical_id[:2]
1101
        nodes.add(nodea)
1102
        nodes.add(nodeb)
1103
      if device.children:
1104
        for child in device.children:
1105
          _Helper(nodes, child)
1106

    
1107
    all_nodes = set()
1108
    all_nodes.add(self.primary_node)
1109
    for device in self.disks:
1110
      _Helper(all_nodes, device)
1111
    return tuple(all_nodes)
1112

    
1113
  all_nodes = property(_ComputeAllNodes, None, None,
1114
                       "List of names of all the nodes of the instance")
1115

    
1116
  def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1117
    """Provide a mapping of nodes to LVs this instance owns.
1118

1119
    This function figures out what logical volumes should belong on
1120
    which nodes, recursing through a device tree.
1121

1122
    @type lvmap: dict
1123
    @param lvmap: optional dictionary to receive the
1124
        'node' : ['lv', ...] data.
1125
    @type devs: list of L{Disk}
1126
    @param devs: disks to get the LV name for. If None, all disk of this
1127
        instance are used.
1128
    @type node_uuid: string
1129
    @param node_uuid: UUID of the node to get the LV names for. If None, the
1130
        primary node of this instance is used.
1131
    @return: None if lvmap arg is given, otherwise, a dictionary of
1132
        the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
1133
        volumeN is of the form "vg_name/lv_name", compatible with
1134
        GetVolumeList()
1135

1136
    """
1137
    if node_uuid is None:
1138
      node_uuid = self.primary_node
1139

    
1140
    if lvmap is None:
1141
      lvmap = {
1142
        node_uuid: [],
1143
        }
1144
      ret = lvmap
1145
    else:
1146
      if not node_uuid in lvmap:
1147
        lvmap[node_uuid] = []
1148
      ret = None
1149

    
1150
    if not devs:
1151
      devs = self.disks
1152

    
1153
    for dev in devs:
1154
      if dev.dev_type == constants.DT_PLAIN:
1155
        lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1156

    
1157
      elif dev.dev_type in constants.LDS_DRBD:
1158
        if dev.children:
1159
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1160
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1161

    
1162
      elif dev.children:
1163
        self.MapLVsByNode(lvmap, dev.children, node_uuid)
1164

    
1165
    return ret
1166

    
1167
  def FindDisk(self, idx):
1168
    """Find a disk given having a specified index.
1169

1170
    This is just a wrapper that does validation of the index.
1171

1172
    @type idx: int
1173
    @param idx: the disk index
1174
    @rtype: L{Disk}
1175
    @return: the corresponding disk
1176
    @raise errors.OpPrereqError: when the given index is not valid
1177

1178
    """
1179
    try:
1180
      idx = int(idx)
1181
      return self.disks[idx]
1182
    except (TypeError, ValueError), err:
1183
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1184
                                 errors.ECODE_INVAL)
1185
    except IndexError:
1186
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1187
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1188
                                 errors.ECODE_INVAL)
1189

    
1190
  def ToDict(self):
1191
    """Instance-specific conversion to standard python types.
1192

1193
    This replaces the children lists of objects with lists of standard
1194
    python types.
1195

1196
    """
1197
    bo = super(Instance, self).ToDict()
1198

    
1199
    for attr in "nics", "disks":
1200
      alist = bo.get(attr, None)
1201
      if alist:
1202
        nlist = outils.ContainerToDicts(alist)
1203
      else:
1204
        nlist = []
1205
      bo[attr] = nlist
1206
    return bo
1207

    
1208
  @classmethod
1209
  def FromDict(cls, val):
1210
    """Custom function for instances.
1211

1212
    """
1213
    if "admin_state" not in val:
1214
      if val.get("admin_up", False):
1215
        val["admin_state"] = constants.ADMINST_UP
1216
      else:
1217
        val["admin_state"] = constants.ADMINST_DOWN
1218
    if "admin_up" in val:
1219
      del val["admin_up"]
1220
    obj = super(Instance, cls).FromDict(val)
1221
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1222
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1223
    return obj
1224

    
1225
  def UpgradeConfig(self):
1226
    """Fill defaults for missing configuration values.
1227

1228
    """
1229
    for nic in self.nics:
1230
      nic.UpgradeConfig()
1231
    for disk in self.disks:
1232
      disk.UpgradeConfig()
1233
    if self.hvparams:
1234
      for key in constants.HVC_GLOBALS:
1235
        try:
1236
          del self.hvparams[key]
1237
        except KeyError:
1238
          pass
1239
    if self.osparams is None:
1240
      self.osparams = {}
1241
    UpgradeBeParams(self.beparams)
1242
    if self.disks_active is None:
1243
      self.disks_active = self.admin_state == constants.ADMINST_UP
1244

    
1245

    
1246
class OS(ConfigObject):
1247
  """Config object representing an operating system.
1248

1249
  @type supported_parameters: list
1250
  @ivar supported_parameters: a list of tuples, name and description,
1251
      containing the supported parameters by this OS
1252

1253
  @type VARIANT_DELIM: string
1254
  @cvar VARIANT_DELIM: the variant delimiter
1255

1256
  """
1257
  __slots__ = [
1258
    "name",
1259
    "path",
1260
    "api_versions",
1261
    "create_script",
1262
    "export_script",
1263
    "import_script",
1264
    "rename_script",
1265
    "verify_script",
1266
    "supported_variants",
1267
    "supported_parameters",
1268
    ]
1269

    
1270
  VARIANT_DELIM = "+"
1271

    
1272
  @classmethod
1273
  def SplitNameVariant(cls, name):
1274
    """Splits the name into the proper name and variant.
1275

1276
    @param name: the OS (unprocessed) name
1277
    @rtype: list
1278
    @return: a list of two elements; if the original name didn't
1279
        contain a variant, it's returned as an empty string
1280

1281
    """
1282
    nv = name.split(cls.VARIANT_DELIM, 1)
1283
    if len(nv) == 1:
1284
      nv.append("")
1285
    return nv
1286

    
1287
  @classmethod
1288
  def GetName(cls, name):
1289
    """Returns the proper name of the os (without the variant).
1290

1291
    @param name: the OS (unprocessed) name
1292

1293
    """
1294
    return cls.SplitNameVariant(name)[0]
1295

    
1296
  @classmethod
1297
  def GetVariant(cls, name):
1298
    """Returns the variant the os (without the base name).
1299

1300
    @param name: the OS (unprocessed) name
1301

1302
    """
1303
    return cls.SplitNameVariant(name)[1]
1304

    
1305

    
1306
class ExtStorage(ConfigObject):
1307
  """Config object representing an External Storage Provider.
1308

1309
  """
1310
  __slots__ = [
1311
    "name",
1312
    "path",
1313
    "create_script",
1314
    "remove_script",
1315
    "grow_script",
1316
    "attach_script",
1317
    "detach_script",
1318
    "setinfo_script",
1319
    "verify_script",
1320
    "supported_parameters",
1321
    ]
1322

    
1323

    
1324
class NodeHvState(ConfigObject):
1325
  """Hypvervisor state on a node.
1326

1327
  @ivar mem_total: Total amount of memory
1328
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1329
    available)
1330
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1331
    rounding
1332
  @ivar mem_inst: Memory used by instances living on node
1333
  @ivar cpu_total: Total node CPU core count
1334
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1335

1336
  """
1337
  __slots__ = [
1338
    "mem_total",
1339
    "mem_node",
1340
    "mem_hv",
1341
    "mem_inst",
1342
    "cpu_total",
1343
    "cpu_node",
1344
    ] + _TIMESTAMPS
1345

    
1346

    
1347
class NodeDiskState(ConfigObject):
1348
  """Disk state on a node.
1349

1350
  """
1351
  __slots__ = [
1352
    "total",
1353
    "reserved",
1354
    "overhead",
1355
    ] + _TIMESTAMPS
1356

    
1357

    
1358
class Node(TaggableObject):
1359
  """Config object representing a node.
1360

1361
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1362
  @ivar hv_state_static: Hypervisor state overriden by user
1363
  @ivar disk_state: Disk state (e.g. free space)
1364
  @ivar disk_state_static: Disk state overriden by user
1365

1366
  """
1367
  __slots__ = [
1368
    "name",
1369
    "primary_ip",
1370
    "secondary_ip",
1371
    "serial_no",
1372
    "master_candidate",
1373
    "offline",
1374
    "drained",
1375
    "group",
1376
    "master_capable",
1377
    "vm_capable",
1378
    "ndparams",
1379
    "powered",
1380
    "hv_state",
1381
    "hv_state_static",
1382
    "disk_state",
1383
    "disk_state_static",
1384
    ] + _TIMESTAMPS + _UUID
1385

    
1386
  def UpgradeConfig(self):
1387
    """Fill defaults for missing configuration values.
1388

1389
    """
1390
    # pylint: disable=E0203
1391
    # because these are "defined" via slots, not manually
1392
    if self.master_capable is None:
1393
      self.master_capable = True
1394

    
1395
    if self.vm_capable is None:
1396
      self.vm_capable = True
1397

    
1398
    if self.ndparams is None:
1399
      self.ndparams = {}
1400
    # And remove any global parameter
1401
    for key in constants.NDC_GLOBALS:
1402
      if key in self.ndparams:
1403
        logging.warning("Ignoring %s node parameter for node %s",
1404
                        key, self.name)
1405
        del self.ndparams[key]
1406

    
1407
    if self.powered is None:
1408
      self.powered = True
1409

    
1410
  def ToDict(self):
1411
    """Custom function for serializing.
1412

1413
    """
1414
    data = super(Node, self).ToDict()
1415

    
1416
    hv_state = data.get("hv_state", None)
1417
    if hv_state is not None:
1418
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1419

    
1420
    disk_state = data.get("disk_state", None)
1421
    if disk_state is not None:
1422
      data["disk_state"] = \
1423
        dict((key, outils.ContainerToDicts(value))
1424
             for (key, value) in disk_state.items())
1425

    
1426
    return data
1427

    
1428
  @classmethod
1429
  def FromDict(cls, val):
1430
    """Custom function for deserializing.
1431

1432
    """
1433
    obj = super(Node, cls).FromDict(val)
1434

    
1435
    if obj.hv_state is not None:
1436
      obj.hv_state = \
1437
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1438

    
1439
    if obj.disk_state is not None:
1440
      obj.disk_state = \
1441
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1442
             for (key, value) in obj.disk_state.items())
1443

    
1444
    return obj
1445

    
1446

    
1447
class NodeGroup(TaggableObject):
1448
  """Config object representing a node group."""
1449
  __slots__ = [
1450
    "name",
1451
    "members",
1452
    "ndparams",
1453
    "diskparams",
1454
    "ipolicy",
1455
    "serial_no",
1456
    "hv_state_static",
1457
    "disk_state_static",
1458
    "alloc_policy",
1459
    "networks",
1460
    ] + _TIMESTAMPS + _UUID
1461

    
1462
  def ToDict(self):
1463
    """Custom function for nodegroup.
1464

1465
    This discards the members object, which gets recalculated and is only kept
1466
    in memory.
1467

1468
    """
1469
    mydict = super(NodeGroup, self).ToDict()
1470
    del mydict["members"]
1471
    return mydict
1472

    
1473
  @classmethod
1474
  def FromDict(cls, val):
1475
    """Custom function for nodegroup.
1476

1477
    The members slot is initialized to an empty list, upon deserialization.
1478

1479
    """
1480
    obj = super(NodeGroup, cls).FromDict(val)
1481
    obj.members = []
1482
    return obj
1483

    
1484
  def UpgradeConfig(self):
1485
    """Fill defaults for missing configuration values.
1486

1487
    """
1488
    if self.ndparams is None:
1489
      self.ndparams = {}
1490

    
1491
    if self.serial_no is None:
1492
      self.serial_no = 1
1493

    
1494
    if self.alloc_policy is None:
1495
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1496

    
1497
    # We only update mtime, and not ctime, since we would not be able
1498
    # to provide a correct value for creation time.
1499
    if self.mtime is None:
1500
      self.mtime = time.time()
1501

    
1502
    if self.diskparams is None:
1503
      self.diskparams = {}
1504
    if self.ipolicy is None:
1505
      self.ipolicy = MakeEmptyIPolicy()
1506

    
1507
    if self.networks is None:
1508
      self.networks = {}
1509

    
1510
  def FillND(self, node):
1511
    """Return filled out ndparams for L{objects.Node}
1512

1513
    @type node: L{objects.Node}
1514
    @param node: A Node object to fill
1515
    @return a copy of the node's ndparams with defaults filled
1516

1517
    """
1518
    return self.SimpleFillND(node.ndparams)
1519

    
1520
  def SimpleFillND(self, ndparams):
1521
    """Fill a given ndparams dict with defaults.
1522

1523
    @type ndparams: dict
1524
    @param ndparams: the dict to fill
1525
    @rtype: dict
1526
    @return: a copy of the passed in ndparams with missing keys filled
1527
        from the node group defaults
1528

1529
    """
1530
    return FillDict(self.ndparams, ndparams)
1531

    
1532

    
1533
class Cluster(TaggableObject):
1534
  """Config object representing the cluster."""
1535
  __slots__ = [
1536
    "serial_no",
1537
    "rsahostkeypub",
1538
    "dsahostkeypub",
1539
    "highest_used_port",
1540
    "tcpudp_port_pool",
1541
    "mac_prefix",
1542
    "volume_group_name",
1543
    "reserved_lvs",
1544
    "drbd_usermode_helper",
1545
    "default_bridge",
1546
    "default_hypervisor",
1547
    "master_node",
1548
    "master_ip",
1549
    "master_netdev",
1550
    "master_netmask",
1551
    "use_external_mip_script",
1552
    "cluster_name",
1553
    "file_storage_dir",
1554
    "shared_file_storage_dir",
1555
    "enabled_hypervisors",
1556
    "hvparams",
1557
    "ipolicy",
1558
    "os_hvp",
1559
    "beparams",
1560
    "osparams",
1561
    "nicparams",
1562
    "ndparams",
1563
    "diskparams",
1564
    "candidate_pool_size",
1565
    "modify_etc_hosts",
1566
    "modify_ssh_setup",
1567
    "maintain_node_health",
1568
    "uid_pool",
1569
    "default_iallocator",
1570
    "hidden_os",
1571
    "blacklisted_os",
1572
    "primary_ip_family",
1573
    "prealloc_wipe_disks",
1574
    "hv_state_static",
1575
    "disk_state_static",
1576
    "enabled_disk_templates",
1577
    ] + _TIMESTAMPS + _UUID
1578

    
1579
  def UpgradeConfig(self):
1580
    """Fill defaults for missing configuration values.
1581

1582
    """
1583
    # pylint: disable=E0203
1584
    # because these are "defined" via slots, not manually
1585
    if self.hvparams is None:
1586
      self.hvparams = constants.HVC_DEFAULTS
1587
    else:
1588
      for hypervisor in self.hvparams:
1589
        self.hvparams[hypervisor] = FillDict(
1590
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1591

    
1592
    if self.os_hvp is None:
1593
      self.os_hvp = {}
1594

    
1595
    # osparams added before 2.2
1596
    if self.osparams is None:
1597
      self.osparams = {}
1598

    
1599
    self.ndparams = UpgradeNDParams(self.ndparams)
1600

    
1601
    self.beparams = UpgradeGroupedParams(self.beparams,
1602
                                         constants.BEC_DEFAULTS)
1603
    for beparams_group in self.beparams:
1604
      UpgradeBeParams(self.beparams[beparams_group])
1605

    
1606
    migrate_default_bridge = not self.nicparams
1607
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1608
                                          constants.NICC_DEFAULTS)
1609
    if migrate_default_bridge:
1610
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1611
        self.default_bridge
1612

    
1613
    if self.modify_etc_hosts is None:
1614
      self.modify_etc_hosts = True
1615

    
1616
    if self.modify_ssh_setup is None:
1617
      self.modify_ssh_setup = True
1618

    
1619
    # default_bridge is no longer used in 2.1. The slot is left there to
1620
    # support auto-upgrading. It can be removed once we decide to deprecate
1621
    # upgrading straight from 2.0.
1622
    if self.default_bridge is not None:
1623
      self.default_bridge = None
1624

    
1625
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1626
    # code can be removed once upgrading straight from 2.0 is deprecated.
1627
    if self.default_hypervisor is not None:
1628
      self.enabled_hypervisors = ([self.default_hypervisor] +
1629
                                  [hvname for hvname in self.enabled_hypervisors
1630
                                   if hvname != self.default_hypervisor])
1631
      self.default_hypervisor = None
1632

    
1633
    # maintain_node_health added after 2.1.1
1634
    if self.maintain_node_health is None:
1635
      self.maintain_node_health = False
1636

    
1637
    if self.uid_pool is None:
1638
      self.uid_pool = []
1639

    
1640
    if self.default_iallocator is None:
1641
      self.default_iallocator = ""
1642

    
1643
    # reserved_lvs added before 2.2
1644
    if self.reserved_lvs is None:
1645
      self.reserved_lvs = []
1646

    
1647
    # hidden and blacklisted operating systems added before 2.2.1
1648
    if self.hidden_os is None:
1649
      self.hidden_os = []
1650

    
1651
    if self.blacklisted_os is None:
1652
      self.blacklisted_os = []
1653

    
1654
    # primary_ip_family added before 2.3
1655
    if self.primary_ip_family is None:
1656
      self.primary_ip_family = AF_INET
1657

    
1658
    if self.master_netmask is None:
1659
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1660
      self.master_netmask = ipcls.iplen
1661

    
1662
    if self.prealloc_wipe_disks is None:
1663
      self.prealloc_wipe_disks = False
1664

    
1665
    # shared_file_storage_dir added before 2.5
1666
    if self.shared_file_storage_dir is None:
1667
      self.shared_file_storage_dir = ""
1668

    
1669
    if self.use_external_mip_script is None:
1670
      self.use_external_mip_script = False
1671

    
1672
    if self.diskparams:
1673
      self.diskparams = UpgradeDiskParams(self.diskparams)
1674
    else:
1675
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1676

    
1677
    # instance policy added before 2.6
1678
    if self.ipolicy is None:
1679
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1680
    else:
1681
      # we can either make sure to upgrade the ipolicy always, or only
1682
      # do it in some corner cases (e.g. missing keys); note that this
1683
      # will break any removal of keys from the ipolicy dict
1684
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1685
      if wrongkeys:
1686
        # These keys would be silently removed by FillIPolicy()
1687
        msg = ("Cluster instance policy contains spurious keys: %s" %
1688
               utils.CommaJoin(wrongkeys))
1689
        raise errors.ConfigurationError(msg)
1690
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1691

    
1692
  @property
1693
  def primary_hypervisor(self):
1694
    """The first hypervisor is the primary.
1695

1696
    Useful, for example, for L{Node}'s hv/disk state.
1697

1698
    """
1699
    return self.enabled_hypervisors[0]
1700

    
1701
  def ToDict(self):
1702
    """Custom function for cluster.
1703

1704
    """
1705
    mydict = super(Cluster, self).ToDict()
1706

    
1707
    if self.tcpudp_port_pool is None:
1708
      tcpudp_port_pool = []
1709
    else:
1710
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1711

    
1712
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1713

    
1714
    return mydict
1715

    
1716
  @classmethod
1717
  def FromDict(cls, val):
1718
    """Custom function for cluster.
1719

1720
    """
1721
    obj = super(Cluster, cls).FromDict(val)
1722

    
1723
    if obj.tcpudp_port_pool is None:
1724
      obj.tcpudp_port_pool = set()
1725
    elif not isinstance(obj.tcpudp_port_pool, set):
1726
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1727

    
1728
    return obj
1729

    
1730
  def SimpleFillDP(self, diskparams):
1731
    """Fill a given diskparams dict with cluster defaults.
1732

1733
    @param diskparams: The diskparams
1734
    @return: The defaults dict
1735

1736
    """
1737
    return FillDiskParams(self.diskparams, diskparams)
1738

    
1739
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1740
    """Get the default hypervisor parameters for the cluster.
1741

1742
    @param hypervisor: the hypervisor name
1743
    @param os_name: if specified, we'll also update the defaults for this OS
1744
    @param skip_keys: if passed, list of keys not to use
1745
    @return: the defaults dict
1746

1747
    """
1748
    if skip_keys is None:
1749
      skip_keys = []
1750

    
1751
    fill_stack = [self.hvparams.get(hypervisor, {})]
1752
    if os_name is not None:
1753
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1754
      fill_stack.append(os_hvp)
1755

    
1756
    ret_dict = {}
1757
    for o_dict in fill_stack:
1758
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1759

    
1760
    return ret_dict
1761

    
1762
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1763
    """Fill a given hvparams dict with cluster defaults.
1764

1765
    @type hv_name: string
1766
    @param hv_name: the hypervisor to use
1767
    @type os_name: string
1768
    @param os_name: the OS to use for overriding the hypervisor defaults
1769
    @type skip_globals: boolean
1770
    @param skip_globals: if True, the global hypervisor parameters will
1771
        not be filled
1772
    @rtype: dict
1773
    @return: a copy of the given hvparams with missing keys filled from
1774
        the cluster defaults
1775

1776
    """
1777
    if skip_globals:
1778
      skip_keys = constants.HVC_GLOBALS
1779
    else:
1780
      skip_keys = []
1781

    
1782
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1783
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1784

    
1785
  def FillHV(self, instance, skip_globals=False):
1786
    """Fill an instance's hvparams dict with cluster defaults.
1787

1788
    @type instance: L{objects.Instance}
1789
    @param instance: the instance parameter to fill
1790
    @type skip_globals: boolean
1791
    @param skip_globals: if True, the global hypervisor parameters will
1792
        not be filled
1793
    @rtype: dict
1794
    @return: a copy of the instance's hvparams with missing keys filled from
1795
        the cluster defaults
1796

1797
    """
1798
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1799
                             instance.hvparams, skip_globals)
1800

    
1801
  def SimpleFillBE(self, beparams):
1802
    """Fill a given beparams dict with cluster defaults.
1803

1804
    @type beparams: dict
1805
    @param beparams: the dict to fill
1806
    @rtype: dict
1807
    @return: a copy of the passed in beparams with missing keys filled
1808
        from the cluster defaults
1809

1810
    """
1811
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1812

    
1813
  def FillBE(self, instance):
1814
    """Fill an instance's beparams dict with cluster defaults.
1815

1816
    @type instance: L{objects.Instance}
1817
    @param instance: the instance parameter to fill
1818
    @rtype: dict
1819
    @return: a copy of the instance's beparams with missing keys filled from
1820
        the cluster defaults
1821

1822
    """
1823
    return self.SimpleFillBE(instance.beparams)
1824

    
1825
  def SimpleFillNIC(self, nicparams):
1826
    """Fill a given nicparams dict with cluster defaults.
1827

1828
    @type nicparams: dict
1829
    @param nicparams: the dict to fill
1830
    @rtype: dict
1831
    @return: a copy of the passed in nicparams with missing keys filled
1832
        from the cluster defaults
1833

1834
    """
1835
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1836

    
1837
  def SimpleFillOS(self, os_name, os_params):
1838
    """Fill an instance's osparams dict with cluster defaults.
1839

1840
    @type os_name: string
1841
    @param os_name: the OS name to use
1842
    @type os_params: dict
1843
    @param os_params: the dict to fill with default values
1844
    @rtype: dict
1845
    @return: a copy of the instance's osparams with missing keys filled from
1846
        the cluster defaults
1847

1848
    """
1849
    name_only = os_name.split("+", 1)[0]
1850
    # base OS
1851
    result = self.osparams.get(name_only, {})
1852
    # OS with variant
1853
    result = FillDict(result, self.osparams.get(os_name, {}))
1854
    # specified params
1855
    return FillDict(result, os_params)
1856

    
1857
  @staticmethod
1858
  def SimpleFillHvState(hv_state):
1859
    """Fill an hv_state sub dict with cluster defaults.
1860

1861
    """
1862
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1863

    
1864
  @staticmethod
1865
  def SimpleFillDiskState(disk_state):
1866
    """Fill an disk_state sub dict with cluster defaults.
1867

1868
    """
1869
    return FillDict(constants.DS_DEFAULTS, disk_state)
1870

    
1871
  def FillND(self, node, nodegroup):
1872
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1873

1874
    @type node: L{objects.Node}
1875
    @param node: A Node object to fill
1876
    @type nodegroup: L{objects.NodeGroup}
1877
    @param nodegroup: A Node object to fill
1878
    @return a copy of the node's ndparams with defaults filled
1879

1880
    """
1881
    return self.SimpleFillND(nodegroup.FillND(node))
1882

    
1883
  def SimpleFillND(self, ndparams):
1884
    """Fill a given ndparams dict with defaults.
1885

1886
    @type ndparams: dict
1887
    @param ndparams: the dict to fill
1888
    @rtype: dict
1889
    @return: a copy of the passed in ndparams with missing keys filled
1890
        from the cluster defaults
1891

1892
    """
1893
    return FillDict(self.ndparams, ndparams)
1894

    
1895
  def SimpleFillIPolicy(self, ipolicy):
1896
    """ Fill instance policy dict with defaults.
1897

1898
    @type ipolicy: dict
1899
    @param ipolicy: the dict to fill
1900
    @rtype: dict
1901
    @return: a copy of passed ipolicy with missing keys filled from
1902
      the cluster defaults
1903

1904
    """
1905
    return FillIPolicy(self.ipolicy, ipolicy)
1906

    
1907
  def IsDiskTemplateEnabled(self, disk_template):
1908
    """Checks if a particular disk template is enabled.
1909

1910
    """
1911
    return utils.storage.IsDiskTemplateEnabled(
1912
        disk_template, self.enabled_disk_templates)
1913

    
1914
  def IsFileStorageEnabled(self):
1915
    """Checks if file storage is enabled.
1916

1917
    """
1918
    return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
1919

    
1920
  def IsSharedFileStorageEnabled(self):
1921
    """Checks if shared file storage is enabled.
1922

1923
    """
1924
    return utils.storage.IsSharedFileStorageEnabled(
1925
        self.enabled_disk_templates)
1926

    
1927

    
1928
class BlockDevStatus(ConfigObject):
1929
  """Config object representing the status of a block device."""
1930
  __slots__ = [
1931
    "dev_path",
1932
    "major",
1933
    "minor",
1934
    "sync_percent",
1935
    "estimated_time",
1936
    "is_degraded",
1937
    "ldisk_status",
1938
    ]
1939

    
1940

    
1941
class ImportExportStatus(ConfigObject):
1942
  """Config object representing the status of an import or export."""
1943
  __slots__ = [
1944
    "recent_output",
1945
    "listen_port",
1946
    "connected",
1947
    "progress_mbytes",
1948
    "progress_throughput",
1949
    "progress_eta",
1950
    "progress_percent",
1951
    "exit_status",
1952
    "error_message",
1953
    ] + _TIMESTAMPS
1954

    
1955

    
1956
class ImportExportOptions(ConfigObject):
1957
  """Options for import/export daemon
1958

1959
  @ivar key_name: X509 key name (None for cluster certificate)
1960
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1961
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1962
  @ivar magic: Used to ensure the connection goes to the right disk
1963
  @ivar ipv6: Whether to use IPv6
1964
  @ivar connect_timeout: Number of seconds for establishing connection
1965

1966
  """
1967
  __slots__ = [
1968
    "key_name",
1969
    "ca_pem",
1970
    "compress",
1971
    "magic",
1972
    "ipv6",
1973
    "connect_timeout",
1974
    ]
1975

    
1976

    
1977
class ConfdRequest(ConfigObject):
1978
  """Object holding a confd request.
1979

1980
  @ivar protocol: confd protocol version
1981
  @ivar type: confd query type
1982
  @ivar query: query request
1983
  @ivar rsalt: requested reply salt
1984

1985
  """
1986
  __slots__ = [
1987
    "protocol",
1988
    "type",
1989
    "query",
1990
    "rsalt",
1991
    ]
1992

    
1993

    
1994
class ConfdReply(ConfigObject):
1995
  """Object holding a confd reply.
1996

1997
  @ivar protocol: confd protocol version
1998
  @ivar status: reply status code (ok, error)
1999
  @ivar answer: confd query reply
2000
  @ivar serial: configuration serial number
2001

2002
  """
2003
  __slots__ = [
2004
    "protocol",
2005
    "status",
2006
    "answer",
2007
    "serial",
2008
    ]
2009

    
2010

    
2011
class QueryFieldDefinition(ConfigObject):
2012
  """Object holding a query field definition.
2013

2014
  @ivar name: Field name
2015
  @ivar title: Human-readable title
2016
  @ivar kind: Field type
2017
  @ivar doc: Human-readable description
2018

2019
  """
2020
  __slots__ = [
2021
    "name",
2022
    "title",
2023
    "kind",
2024
    "doc",
2025
    ]
2026

    
2027

    
2028
class _QueryResponseBase(ConfigObject):
2029
  __slots__ = [
2030
    "fields",
2031
    ]
2032

    
2033
  def ToDict(self):
2034
    """Custom function for serializing.
2035

2036
    """
2037
    mydict = super(_QueryResponseBase, self).ToDict()
2038
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2039
    return mydict
2040

    
2041
  @classmethod
2042
  def FromDict(cls, val):
2043
    """Custom function for de-serializing.
2044

2045
    """
2046
    obj = super(_QueryResponseBase, cls).FromDict(val)
2047
    obj.fields = \
2048
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2049
    return obj
2050

    
2051

    
2052
class QueryResponse(_QueryResponseBase):
2053
  """Object holding the response to a query.
2054

2055
  @ivar fields: List of L{QueryFieldDefinition} objects
2056
  @ivar data: Requested data
2057

2058
  """
2059
  __slots__ = [
2060
    "data",
2061
    ]
2062

    
2063

    
2064
class QueryFieldsRequest(ConfigObject):
2065
  """Object holding a request for querying available fields.
2066

2067
  """
2068
  __slots__ = [
2069
    "what",
2070
    "fields",
2071
    ]
2072

    
2073

    
2074
class QueryFieldsResponse(_QueryResponseBase):
2075
  """Object holding the response to a query for fields.
2076

2077
  @ivar fields: List of L{QueryFieldDefinition} objects
2078

2079
  """
2080
  __slots__ = []
2081

    
2082

    
2083
class MigrationStatus(ConfigObject):
2084
  """Object holding the status of a migration.
2085

2086
  """
2087
  __slots__ = [
2088
    "status",
2089
    "transferred_ram",
2090
    "total_ram",
2091
    ]
2092

    
2093

    
2094
class InstanceConsole(ConfigObject):
2095
  """Object describing how to access the console of an instance.
2096

2097
  """
2098
  __slots__ = [
2099
    "instance",
2100
    "kind",
2101
    "message",
2102
    "host",
2103
    "port",
2104
    "user",
2105
    "command",
2106
    "display",
2107
    ]
2108

    
2109
  def Validate(self):
2110
    """Validates contents of this object.
2111

2112
    """
2113
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2114
    assert self.instance, "Missing instance name"
2115
    assert self.message or self.kind in [constants.CONS_SSH,
2116
                                         constants.CONS_SPICE,
2117
                                         constants.CONS_VNC]
2118
    assert self.host or self.kind == constants.CONS_MESSAGE
2119
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2120
                                      constants.CONS_SSH]
2121
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2122
                                      constants.CONS_SPICE,
2123
                                      constants.CONS_VNC]
2124
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2125
                                         constants.CONS_SPICE,
2126
                                         constants.CONS_VNC]
2127
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2128
                                         constants.CONS_SPICE,
2129
                                         constants.CONS_SSH]
2130
    return True
2131

    
2132

    
2133
class Network(TaggableObject):
2134
  """Object representing a network definition for ganeti.
2135

2136
  """
2137
  __slots__ = [
2138
    "name",
2139
    "serial_no",
2140
    "mac_prefix",
2141
    "network",
2142
    "network6",
2143
    "gateway",
2144
    "gateway6",
2145
    "reservations",
2146
    "ext_reservations",
2147
    ] + _TIMESTAMPS + _UUID
2148

    
2149
  def HooksDict(self, prefix=""):
2150
    """Export a dictionary used by hooks with a network's information.
2151

2152
    @type prefix: String
2153
    @param prefix: Prefix to prepend to the dict entries
2154

2155
    """
2156
    result = {
2157
      "%sNETWORK_NAME" % prefix: self.name,
2158
      "%sNETWORK_UUID" % prefix: self.uuid,
2159
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2160
    }
2161
    if self.network:
2162
      result["%sNETWORK_SUBNET" % prefix] = self.network
2163
    if self.gateway:
2164
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2165
    if self.network6:
2166
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2167
    if self.gateway6:
2168
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2169
    if self.mac_prefix:
2170
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2171

    
2172
    return result
2173

    
2174
  @classmethod
2175
  def FromDict(cls, val):
2176
    """Custom function for networks.
2177

2178
    Remove deprecated network_type and family.
2179

2180
    """
2181
    if "network_type" in val:
2182
      del val["network_type"]
2183
    if "family" in val:
2184
      del val["family"]
2185
    obj = super(Network, cls).FromDict(val)
2186
    return obj
2187

    
2188

    
2189
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2190
  """Simple wrapper over ConfigParse that allows serialization.
2191

2192
  This class is basically ConfigParser.SafeConfigParser with two
2193
  additional methods that allow it to serialize/unserialize to/from a
2194
  buffer.
2195

2196
  """
2197
  def Dumps(self):
2198
    """Dump this instance and return the string representation."""
2199
    buf = StringIO()
2200
    self.write(buf)
2201
    return buf.getvalue()
2202

    
2203
  @classmethod
2204
  def Loads(cls, data):
2205
    """Load data from a string."""
2206
    buf = StringIO(data)
2207
    cfp = cls()
2208
    cfp.readfp(buf)
2209
    return cfp
2210

    
2211

    
2212
class LvmPvInfo(ConfigObject):
2213
  """Information about an LVM physical volume (PV).
2214

2215
  @type name: string
2216
  @ivar name: name of the PV
2217
  @type vg_name: string
2218
  @ivar vg_name: name of the volume group containing the PV
2219
  @type size: float
2220
  @ivar size: size of the PV in MiB
2221
  @type free: float
2222
  @ivar free: free space in the PV, in MiB
2223
  @type attributes: string
2224
  @ivar attributes: PV attributes
2225
  @type lv_list: list of strings
2226
  @ivar lv_list: names of the LVs hosted on the PV
2227
  """
2228
  __slots__ = [
2229
    "name",
2230
    "vg_name",
2231
    "size",
2232
    "free",
2233
    "attributes",
2234
    "lv_list"
2235
    ]
2236

    
2237
  def IsEmpty(self):
2238
    """Is this PV empty?
2239

2240
    """
2241
    return self.size <= (self.free + 1)
2242

    
2243
  def IsAllocatable(self):
2244
    """Is this PV allocatable?
2245

2246
    """
2247
    return ("a" in self.attributes)