Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 355d1f32

History | View | Annotate | Download (64.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def FillIPolicy(default_ipolicy, custom_ipolicy):
86
  """Fills an instance policy with defaults.
87

88
  """
89
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90
  ret_dict = copy.deepcopy(custom_ipolicy)
91
  for key in default_ipolicy:
92
    if key not in ret_dict:
93
      ret_dict[key] = copy.deepcopy(default_ipolicy[key])
94
    elif key == constants.ISPECS_STD:
95
      ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
96
  return ret_dict
97

    
98

    
99
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
100
  """Fills the disk parameter defaults.
101

102
  @see: L{FillDict} for parameters and return value
103

104
  """
105
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
106

    
107
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
108
                             skip_keys=skip_keys))
109
              for dt in constants.DISK_TEMPLATES)
110

    
111

    
112
def UpgradeGroupedParams(target, defaults):
113
  """Update all groups for the target parameter.
114

115
  @type target: dict of dicts
116
  @param target: {group: {parameter: value}}
117
  @type defaults: dict
118
  @param defaults: default parameter values
119

120
  """
121
  if target is None:
122
    target = {constants.PP_DEFAULT: defaults}
123
  else:
124
    for group in target:
125
      target[group] = FillDict(defaults, target[group])
126
  return target
127

    
128

    
129
def UpgradeBeParams(target):
130
  """Update the be parameters dict to the new format.
131

132
  @type target: dict
133
  @param target: "be" parameters dict
134

135
  """
136
  if constants.BE_MEMORY in target:
137
    memory = target[constants.BE_MEMORY]
138
    target[constants.BE_MAXMEM] = memory
139
    target[constants.BE_MINMEM] = memory
140
    del target[constants.BE_MEMORY]
141

    
142

    
143
def UpgradeDiskParams(diskparams):
144
  """Upgrade the disk parameters.
145

146
  @type diskparams: dict
147
  @param diskparams: disk parameters to upgrade
148
  @rtype: dict
149
  @return: the upgraded disk parameters dict
150

151
  """
152
  if not diskparams:
153
    result = {}
154
  else:
155
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
156

    
157
  return result
158

    
159

    
160
def UpgradeNDParams(ndparams):
161
  """Upgrade ndparams structure.
162

163
  @type ndparams: dict
164
  @param ndparams: disk parameters to upgrade
165
  @rtype: dict
166
  @return: the upgraded node parameters dict
167

168
  """
169
  if ndparams is None:
170
    ndparams = {}
171

    
172
  if (constants.ND_OOB_PROGRAM in ndparams and
173
      ndparams[constants.ND_OOB_PROGRAM] is None):
174
    # will be reset by the line below
175
    del ndparams[constants.ND_OOB_PROGRAM]
176
  return FillDict(constants.NDC_DEFAULTS, ndparams)
177

    
178

    
179
def MakeEmptyIPolicy():
180
  """Create empty IPolicy dictionary.
181

182
  """
183
  return {}
184

    
185

    
186
class ConfigObject(outils.ValidatedSlots):
187
  """A generic config object.
188

189
  It has the following properties:
190

191
    - provides somewhat safe recursive unpickling and pickling for its classes
192
    - unset attributes which are defined in slots are always returned
193
      as None instead of raising an error
194

195
  Classes derived from this must always declare __slots__ (we use many
196
  config objects and the memory reduction is useful)
197

198
  """
199
  __slots__ = []
200

    
201
  def __getattr__(self, name):
202
    if name not in self.GetAllSlots():
203
      raise AttributeError("Invalid object attribute %s.%s" %
204
                           (type(self).__name__, name))
205
    return None
206

    
207
  def __setstate__(self, state):
208
    slots = self.GetAllSlots()
209
    for name in state:
210
      if name in slots:
211
        setattr(self, name, state[name])
212

    
213
  def Validate(self):
214
    """Validates the slots.
215

216
    """
217

    
218
  def ToDict(self):
219
    """Convert to a dict holding only standard python types.
220

221
    The generic routine just dumps all of this object's attributes in
222
    a dict. It does not work if the class has children who are
223
    ConfigObjects themselves (e.g. the nics list in an Instance), in
224
    which case the object should subclass the function in order to
225
    make sure all objects returned are only standard python types.
226

227
    """
228
    result = {}
229
    for name in self.GetAllSlots():
230
      value = getattr(self, name, None)
231
      if value is not None:
232
        result[name] = value
233
    return result
234

    
235
  __getstate__ = ToDict
236

    
237
  @classmethod
238
  def FromDict(cls, val):
239
    """Create an object from a dictionary.
240

241
    This generic routine takes a dict, instantiates a new instance of
242
    the given class, and sets attributes based on the dict content.
243

244
    As for `ToDict`, this does not work if the class has children
245
    who are ConfigObjects themselves (e.g. the nics list in an
246
    Instance), in which case the object should subclass the function
247
    and alter the objects.
248

249
    """
250
    if not isinstance(val, dict):
251
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
252
                                      " expected dict, got %s" % type(val))
253
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
254
    obj = cls(**val_str) # pylint: disable=W0142
255
    return obj
256

    
257
  def Copy(self):
258
    """Makes a deep copy of the current object and its children.
259

260
    """
261
    dict_form = self.ToDict()
262
    clone_obj = self.__class__.FromDict(dict_form)
263
    return clone_obj
264

    
265
  def __repr__(self):
266
    """Implement __repr__ for ConfigObjects."""
267
    return repr(self.ToDict())
268

    
269
  def __eq__(self, other):
270
    """Implement __eq__ for ConfigObjects."""
271
    return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
272

    
273
  def UpgradeConfig(self):
274
    """Fill defaults for missing configuration values.
275

276
    This method will be called at configuration load time, and its
277
    implementation will be object dependent.
278

279
    """
280
    pass
281

    
282

    
283
class TaggableObject(ConfigObject):
284
  """An generic class supporting tags.
285

286
  """
287
  __slots__ = ["tags"]
288
  VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$")
289

    
290
  @classmethod
291
  def ValidateTag(cls, tag):
292
    """Check if a tag is valid.
293

294
    If the tag is invalid, an errors.TagError will be raised. The
295
    function has no return value.
296

297
    """
298
    if not isinstance(tag, basestring):
299
      raise errors.TagError("Invalid tag type (not a string)")
300
    if len(tag) > constants.MAX_TAG_LEN:
301
      raise errors.TagError("Tag too long (>%d characters)" %
302
                            constants.MAX_TAG_LEN)
303
    if not tag:
304
      raise errors.TagError("Tags cannot be empty")
305
    if not cls.VALID_TAG_RE.match(tag):
306
      raise errors.TagError("Tag contains invalid characters")
307

    
308
  def GetTags(self):
309
    """Return the tags list.
310

311
    """
312
    tags = getattr(self, "tags", None)
313
    if tags is None:
314
      tags = self.tags = set()
315
    return tags
316

    
317
  def AddTag(self, tag):
318
    """Add a new tag.
319

320
    """
321
    self.ValidateTag(tag)
322
    tags = self.GetTags()
323
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
324
      raise errors.TagError("Too many tags")
325
    self.GetTags().add(tag)
326

    
327
  def RemoveTag(self, tag):
328
    """Remove a tag.
329

330
    """
331
    self.ValidateTag(tag)
332
    tags = self.GetTags()
333
    try:
334
      tags.remove(tag)
335
    except KeyError:
336
      raise errors.TagError("Tag not found")
337

    
338
  def ToDict(self):
339
    """Taggable-object-specific conversion to standard python types.
340

341
    This replaces the tags set with a list.
342

343
    """
344
    bo = super(TaggableObject, self).ToDict()
345

    
346
    tags = bo.get("tags", None)
347
    if isinstance(tags, set):
348
      bo["tags"] = list(tags)
349
    return bo
350

    
351
  @classmethod
352
  def FromDict(cls, val):
353
    """Custom function for instances.
354

355
    """
356
    obj = super(TaggableObject, cls).FromDict(val)
357
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
358
      obj.tags = set(obj.tags)
359
    return obj
360

    
361

    
362
class MasterNetworkParameters(ConfigObject):
363
  """Network configuration parameters for the master
364

365
  @ivar uuid: master nodes UUID
366
  @ivar ip: master IP
367
  @ivar netmask: master netmask
368
  @ivar netdev: master network device
369
  @ivar ip_family: master IP family
370

371
  """
372
  __slots__ = [
373
    "uuid",
374
    "ip",
375
    "netmask",
376
    "netdev",
377
    "ip_family",
378
    ]
379

    
380

    
381
class ConfigData(ConfigObject):
382
  """Top-level config object."""
383
  __slots__ = [
384
    "version",
385
    "cluster",
386
    "nodes",
387
    "nodegroups",
388
    "instances",
389
    "networks",
390
    "serial_no",
391
    ] + _TIMESTAMPS
392

    
393
  def ToDict(self):
394
    """Custom function for top-level config data.
395

396
    This just replaces the list of instances, nodes and the cluster
397
    with standard python types.
398

399
    """
400
    mydict = super(ConfigData, self).ToDict()
401
    mydict["cluster"] = mydict["cluster"].ToDict()
402
    for key in "nodes", "instances", "nodegroups", "networks":
403
      mydict[key] = outils.ContainerToDicts(mydict[key])
404

    
405
    return mydict
406

    
407
  @classmethod
408
  def FromDict(cls, val):
409
    """Custom function for top-level config data
410

411
    """
412
    obj = super(ConfigData, cls).FromDict(val)
413
    obj.cluster = Cluster.FromDict(obj.cluster)
414
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
415
    obj.instances = \
416
      outils.ContainerFromDicts(obj.instances, dict, Instance)
417
    obj.nodegroups = \
418
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
419
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
420
    return obj
421

    
422
  def HasAnyDiskOfType(self, dev_type):
423
    """Check if in there is at disk of the given type in the configuration.
424

425
    @type dev_type: L{constants.DTS_BLOCK}
426
    @param dev_type: the type to look for
427
    @rtype: boolean
428
    @return: boolean indicating if a disk of the given type was found or not
429

430
    """
431
    for instance in self.instances.values():
432
      for disk in instance.disks:
433
        if disk.IsBasedOnDiskType(dev_type):
434
          return True
435
    return False
436

    
437
  def UpgradeConfig(self):
438
    """Fill defaults for missing configuration values.
439

440
    """
441
    self.cluster.UpgradeConfig()
442
    for node in self.nodes.values():
443
      node.UpgradeConfig()
444
    for instance in self.instances.values():
445
      instance.UpgradeConfig()
446
    if self.nodegroups is None:
447
      self.nodegroups = {}
448
    for nodegroup in self.nodegroups.values():
449
      nodegroup.UpgradeConfig()
450
    if self.cluster.drbd_usermode_helper is None:
451
      if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
452
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
453
    if self.networks is None:
454
      self.networks = {}
455
    for network in self.networks.values():
456
      network.UpgradeConfig()
457
    self._UpgradeEnabledDiskTemplates()
458

    
459
  def _UpgradeEnabledDiskTemplates(self):
460
    """Upgrade the cluster's enabled disk templates by inspecting the currently
461
       enabled and/or used disk templates.
462

463
    """
464
    # enabled_disk_templates in the cluster config were introduced in 2.8.
465
    # Remove this code once upgrading from earlier versions is deprecated.
466
    if not self.cluster.enabled_disk_templates:
467
      template_set = \
468
        set([inst.disk_template for inst in self.instances.values()])
469
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
470
      if self.cluster.volume_group_name:
471
        template_set.add(constants.DT_DRBD8)
472
        template_set.add(constants.DT_PLAIN)
473
      # Set enabled_disk_templates to the inferred disk templates. Order them
474
      # according to a preference list that is based on Ganeti's history of
475
      # supported disk templates.
476
      self.cluster.enabled_disk_templates = []
477
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
478
        if preferred_template in template_set:
479
          self.cluster.enabled_disk_templates.append(preferred_template)
480
          template_set.remove(preferred_template)
481
      self.cluster.enabled_disk_templates.extend(list(template_set))
482

    
483

    
484
class NIC(ConfigObject):
485
  """Config object representing a network card."""
486
  __slots__ = ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID
487

    
488
  @classmethod
489
  def CheckParameterSyntax(cls, nicparams):
490
    """Check the given parameters for validity.
491

492
    @type nicparams:  dict
493
    @param nicparams: dictionary with parameter names/value
494
    @raise errors.ConfigurationError: when a parameter is not valid
495

496
    """
497
    mode = nicparams[constants.NIC_MODE]
498
    if (mode not in constants.NIC_VALID_MODES and
499
        mode != constants.VALUE_AUTO):
500
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
501

    
502
    if (mode == constants.NIC_MODE_BRIDGED and
503
        not nicparams[constants.NIC_LINK]):
504
      raise errors.ConfigurationError("Missing bridged NIC link")
505

    
506

    
507
class Disk(ConfigObject):
508
  """Config object representing a block device."""
509
  __slots__ = (["name", "dev_type", "logical_id", "physical_id",
510
                "children", "iv_name", "size", "mode", "params", "spindles"] +
511
               _UUID)
512

    
513
  def CreateOnSecondary(self):
514
    """Test if this device needs to be created on a secondary node."""
515
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
516

    
517
  def AssembleOnSecondary(self):
518
    """Test if this device needs to be assembled on a secondary node."""
519
    return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
520

    
521
  def OpenOnSecondary(self):
522
    """Test if this device needs to be opened on a secondary node."""
523
    return self.dev_type in (constants.DT_PLAIN,)
524

    
525
  def StaticDevPath(self):
526
    """Return the device path if this device type has a static one.
527

528
    Some devices (LVM for example) live always at the same /dev/ path,
529
    irrespective of their status. For such devices, we return this
530
    path, for others we return None.
531

532
    @warning: The path returned is not a normalized pathname; callers
533
        should check that it is a valid path.
534

535
    """
536
    if self.dev_type == constants.DT_PLAIN:
537
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
538
    elif self.dev_type == constants.DT_BLOCK:
539
      return self.logical_id[1]
540
    elif self.dev_type == constants.DT_RBD:
541
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
542
    return None
543

    
544
  def ChildrenNeeded(self):
545
    """Compute the needed number of children for activation.
546

547
    This method will return either -1 (all children) or a positive
548
    number denoting the minimum number of children needed for
549
    activation (only mirrored devices will usually return >=0).
550

551
    Currently, only DRBD8 supports diskless activation (therefore we
552
    return 0), for all other we keep the previous semantics and return
553
    -1.
554

555
    """
556
    if self.dev_type == constants.DT_DRBD8:
557
      return 0
558
    return -1
559

    
560
  def IsBasedOnDiskType(self, dev_type):
561
    """Check if the disk or its children are based on the given type.
562

563
    @type dev_type: L{constants.DTS_BLOCK}
564
    @param dev_type: the type to look for
565
    @rtype: boolean
566
    @return: boolean indicating if a device of the given type was found or not
567

568
    """
569
    if self.children:
570
      for child in self.children:
571
        if child.IsBasedOnDiskType(dev_type):
572
          return True
573
    return self.dev_type == dev_type
574

    
575
  def GetNodes(self, node_uuid):
576
    """This function returns the nodes this device lives on.
577

578
    Given the node on which the parent of the device lives on (or, in
579
    case of a top-level device, the primary node of the devices'
580
    instance), this function will return a list of nodes on which this
581
    devices needs to (or can) be assembled.
582

583
    """
584
    if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
585
                         constants.DT_BLOCK, constants.DT_RBD,
586
                         constants.DT_EXT, constants.DT_SHARED_FILE]:
587
      result = [node_uuid]
588
    elif self.dev_type in constants.LDS_DRBD:
589
      result = [self.logical_id[0], self.logical_id[1]]
590
      if node_uuid not in result:
591
        raise errors.ConfigurationError("DRBD device passed unknown node")
592
    else:
593
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
594
    return result
595

    
596
  def ComputeNodeTree(self, parent_node_uuid):
597
    """Compute the node/disk tree for this disk and its children.
598

599
    This method, given the node on which the parent disk lives, will
600
    return the list of all (node UUID, disk) pairs which describe the disk
601
    tree in the most compact way. For example, a drbd/lvm stack
602
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
603
    which represents all the top-level devices on the nodes.
604

605
    """
606
    my_nodes = self.GetNodes(parent_node_uuid)
607
    result = [(node, self) for node in my_nodes]
608
    if not self.children:
609
      # leaf device
610
      return result
611
    for node in my_nodes:
612
      for child in self.children:
613
        child_result = child.ComputeNodeTree(node)
614
        if len(child_result) == 1:
615
          # child (and all its descendants) is simple, doesn't split
616
          # over multiple hosts, so we don't need to describe it, our
617
          # own entry for this node describes it completely
618
          continue
619
        else:
620
          # check if child nodes differ from my nodes; note that
621
          # subdisk can differ from the child itself, and be instead
622
          # one of its descendants
623
          for subnode, subdisk in child_result:
624
            if subnode not in my_nodes:
625
              result.append((subnode, subdisk))
626
            # otherwise child is under our own node, so we ignore this
627
            # entry (but probably the other results in the list will
628
            # be different)
629
    return result
630

    
631
  def ComputeGrowth(self, amount):
632
    """Compute the per-VG growth requirements.
633

634
    This only works for VG-based disks.
635

636
    @type amount: integer
637
    @param amount: the desired increase in (user-visible) disk space
638
    @rtype: dict
639
    @return: a dictionary of volume-groups and the required size
640

641
    """
642
    if self.dev_type == constants.DT_PLAIN:
643
      return {self.logical_id[0]: amount}
644
    elif self.dev_type == constants.DT_DRBD8:
645
      if self.children:
646
        return self.children[0].ComputeGrowth(amount)
647
      else:
648
        return {}
649
    else:
650
      # Other disk types do not require VG space
651
      return {}
652

    
653
  def RecordGrow(self, amount):
654
    """Update the size of this disk after growth.
655

656
    This method recurses over the disks's children and updates their
657
    size correspondigly. The method needs to be kept in sync with the
658
    actual algorithms from bdev.
659

660
    """
661
    if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE,
662
                         constants.DT_RBD, constants.DT_EXT,
663
                         constants.DT_SHARED_FILE):
664
      self.size += amount
665
    elif self.dev_type == constants.DT_DRBD8:
666
      if self.children:
667
        self.children[0].RecordGrow(amount)
668
      self.size += amount
669
    else:
670
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
671
                                   " disk type %s" % self.dev_type)
672

    
673
  def Update(self, size=None, mode=None, spindles=None):
674
    """Apply changes to size, spindles and mode.
675

676
    """
677
    if self.dev_type == constants.DT_DRBD8:
678
      if self.children:
679
        self.children[0].Update(size=size, mode=mode)
680
    else:
681
      assert not self.children
682

    
683
    if size is not None:
684
      self.size = size
685
    if mode is not None:
686
      self.mode = mode
687
    if spindles is not None:
688
      self.spindles = spindles
689

    
690
  def UnsetSize(self):
691
    """Sets recursively the size to zero for the disk and its children.
692

693
    """
694
    if self.children:
695
      for child in self.children:
696
        child.UnsetSize()
697
    self.size = 0
698

    
699
  def SetPhysicalID(self, target_node_uuid, nodes_ip):
700
    """Convert the logical ID to the physical ID.
701

702
    This is used only for drbd, which needs ip/port configuration.
703

704
    The routine descends down and updates its children also, because
705
    this helps when the only the top device is passed to the remote
706
    node.
707

708
    Arguments:
709
      - target_node_uuid: the node UUID we wish to configure for
710
      - nodes_ip: a mapping of node name to ip
711

712
    The target_node must exist in in nodes_ip, and must be one of the
713
    nodes in the logical ID for each of the DRBD devices encountered
714
    in the disk tree.
715

716
    """
717
    if self.children:
718
      for child in self.children:
719
        child.SetPhysicalID(target_node_uuid, nodes_ip)
720

    
721
    if self.logical_id is None and self.physical_id is not None:
722
      return
723
    if self.dev_type in constants.LDS_DRBD:
724
      pnode_uuid, snode_uuid, port, pminor, sminor, secret = self.logical_id
725
      if target_node_uuid not in (pnode_uuid, snode_uuid):
726
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
727
                                        target_node_uuid)
728
      pnode_ip = nodes_ip.get(pnode_uuid, None)
729
      snode_ip = nodes_ip.get(snode_uuid, None)
730
      if pnode_ip is None or snode_ip is None:
731
        raise errors.ConfigurationError("Can't find primary or secondary node"
732
                                        " for %s" % str(self))
733
      p_data = (pnode_ip, port)
734
      s_data = (snode_ip, port)
735
      if pnode_uuid == target_node_uuid:
736
        self.physical_id = p_data + s_data + (pminor, secret)
737
      else: # it must be secondary, we tested above
738
        self.physical_id = s_data + p_data + (sminor, secret)
739
    else:
740
      self.physical_id = self.logical_id
741
    return
742

    
743
  def ToDict(self):
744
    """Disk-specific conversion to standard python types.
745

746
    This replaces the children lists of objects with lists of
747
    standard python types.
748

749
    """
750
    bo = super(Disk, self).ToDict()
751

    
752
    for attr in ("children",):
753
      alist = bo.get(attr, None)
754
      if alist:
755
        bo[attr] = outils.ContainerToDicts(alist)
756
    return bo
757

    
758
  @classmethod
759
  def FromDict(cls, val):
760
    """Custom function for Disks
761

762
    """
763
    obj = super(Disk, cls).FromDict(val)
764
    if obj.children:
765
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
766
    if obj.logical_id and isinstance(obj.logical_id, list):
767
      obj.logical_id = tuple(obj.logical_id)
768
    if obj.physical_id and isinstance(obj.physical_id, list):
769
      obj.physical_id = tuple(obj.physical_id)
770
    if obj.dev_type in constants.LDS_DRBD:
771
      # we need a tuple of length six here
772
      if len(obj.logical_id) < 6:
773
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
774
    return obj
775

    
776
  def __str__(self):
777
    """Custom str() formatter for disks.
778

779
    """
780
    if self.dev_type == constants.DT_PLAIN:
781
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
782
    elif self.dev_type in constants.LDS_DRBD:
783
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
784
      val = "<DRBD8("
785
      if self.physical_id is None:
786
        phy = "unconfigured"
787
      else:
788
        phy = ("configured as %s:%s %s:%s" %
789
               (self.physical_id[0], self.physical_id[1],
790
                self.physical_id[2], self.physical_id[3]))
791

    
792
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
793
              (node_a, minor_a, node_b, minor_b, port, phy))
794
      if self.children and self.children.count(None) == 0:
795
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
796
      else:
797
        val += "no local storage"
798
    else:
799
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
800
             (self.dev_type, self.logical_id, self.physical_id, self.children))
801
    if self.iv_name is None:
802
      val += ", not visible"
803
    else:
804
      val += ", visible as /dev/%s" % self.iv_name
805
    if self.spindles is not None:
806
      val += ", spindles=%s" % self.spindles
807
    if isinstance(self.size, int):
808
      val += ", size=%dm)>" % self.size
809
    else:
810
      val += ", size='%s')>" % (self.size,)
811
    return val
812

    
813
  def Verify(self):
814
    """Checks that this disk is correctly configured.
815

816
    """
817
    all_errors = []
818
    if self.mode not in constants.DISK_ACCESS_SET:
819
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
820
    return all_errors
821

    
822
  def UpgradeConfig(self):
823
    """Fill defaults for missing configuration values.
824

825
    """
826
    if self.children:
827
      for child in self.children:
828
        child.UpgradeConfig()
829

    
830
    # FIXME: Make this configurable in Ganeti 2.7
831
    self.params = {}
832
    # add here config upgrade for this disk
833

    
834
    # map of legacy device types (mapping differing LD constants to new
835
    # DT constants)
836
    LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
837
    if self.dev_type in LEG_DEV_TYPE_MAP:
838
      self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
839

    
840
  @staticmethod
841
  def ComputeLDParams(disk_template, disk_params):
842
    """Computes Logical Disk parameters from Disk Template parameters.
843

844
    @type disk_template: string
845
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
846
    @type disk_params: dict
847
    @param disk_params: disk template parameters;
848
                        dict(template_name -> parameters
849
    @rtype: list(dict)
850
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
851
      contains the LD parameters of the node. The tree is flattened in-order.
852

853
    """
854
    if disk_template not in constants.DISK_TEMPLATES:
855
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
856

    
857
    assert disk_template in disk_params
858

    
859
    result = list()
860
    dt_params = disk_params[disk_template]
861
    if disk_template == constants.DT_DRBD8:
862
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], {
863
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
864
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
865
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
866
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
867
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
868
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
869
        constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
870
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
871
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
872
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
873
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
874
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
875
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
876
        }))
877

    
878
      # data LV
879
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
880
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
881
        }))
882

    
883
      # metadata LV
884
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
885
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
886
        }))
887

    
888
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
889
      result.append(constants.DISK_LD_DEFAULTS[disk_template])
890

    
891
    elif disk_template == constants.DT_PLAIN:
892
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
893
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
894
        }))
895

    
896
    elif disk_template == constants.DT_BLOCK:
897
      result.append(constants.DISK_LD_DEFAULTS[constants.DT_BLOCK])
898

    
899
    elif disk_template == constants.DT_RBD:
900
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_RBD], {
901
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
902
        }))
903

    
904
    elif disk_template == constants.DT_EXT:
905
      result.append(constants.DISK_LD_DEFAULTS[constants.DT_EXT])
906

    
907
    return result
908

    
909

    
910
class InstancePolicy(ConfigObject):
911
  """Config object representing instance policy limits dictionary.
912

913
  Note that this object is not actually used in the config, it's just
914
  used as a placeholder for a few functions.
915

916
  """
917
  @classmethod
918
  def CheckParameterSyntax(cls, ipolicy, check_std):
919
    """ Check the instance policy for validity.
920

921
    @type ipolicy: dict
922
    @param ipolicy: dictionary with min/max/std specs and policies
923
    @type check_std: bool
924
    @param check_std: Whether to check std value or just assume compliance
925
    @raise errors.ConfigurationError: when the policy is not legal
926

927
    """
928
    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
929
    if constants.IPOLICY_DTS in ipolicy:
930
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
931
    for key in constants.IPOLICY_PARAMETERS:
932
      if key in ipolicy:
933
        InstancePolicy.CheckParameter(key, ipolicy[key])
934
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
935
    if wrong_keys:
936
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
937
                                      utils.CommaJoin(wrong_keys))
938

    
939
  @classmethod
940
  def _CheckIncompleteSpec(cls, spec, keyname):
941
    missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
942
    if missing_params:
943
      msg = ("Missing instance specs parameters for %s: %s" %
944
             (keyname, utils.CommaJoin(missing_params)))
945
      raise errors.ConfigurationError(msg)
946

    
947
  @classmethod
948
  def CheckISpecSyntax(cls, ipolicy, check_std):
949
    """Check the instance policy specs for validity.
950

951
    @type ipolicy: dict
952
    @param ipolicy: dictionary with min/max/std specs
953
    @type check_std: bool
954
    @param check_std: Whether to check std value or just assume compliance
955
    @raise errors.ConfigurationError: when specs are not valid
956

957
    """
958
    if constants.ISPECS_MINMAX not in ipolicy:
959
      # Nothing to check
960
      return
961

    
962
    if check_std and constants.ISPECS_STD not in ipolicy:
963
      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
964
      raise errors.ConfigurationError(msg)
965
    stdspec = ipolicy.get(constants.ISPECS_STD)
966
    if check_std:
967
      InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
968

    
969
    if not ipolicy[constants.ISPECS_MINMAX]:
970
      raise errors.ConfigurationError("Empty minmax specifications")
971
    std_is_good = False
972
    for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
973
      missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
974
      if missing:
975
        msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
976
        raise errors.ConfigurationError(msg)
977
      for (key, spec) in minmaxspecs.items():
978
        InstancePolicy._CheckIncompleteSpec(spec, key)
979

    
980
      spec_std_ok = True
981
      for param in constants.ISPECS_PARAMETERS:
982
        par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
983
                                                           param, check_std)
984
        spec_std_ok = spec_std_ok and par_std_ok
985
      std_is_good = std_is_good or spec_std_ok
986
    if not std_is_good:
987
      raise errors.ConfigurationError("Invalid std specifications")
988

    
989
  @classmethod
990
  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
991
    """Check the instance policy specs for validity on a given key.
992

993
    We check if the instance specs makes sense for a given key, that is
994
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
995

996
    @type minmaxspecs: dict
997
    @param minmaxspecs: dictionary with min and max instance spec
998
    @type stdspec: dict
999
    @param stdspec: dictionary with standard instance spec
1000
    @type name: string
1001
    @param name: what are the limits for
1002
    @type check_std: bool
1003
    @param check_std: Whether to check std value or just assume compliance
1004
    @rtype: bool
1005
    @return: C{True} when specs are valid, C{False} when standard spec for the
1006
        given name is not valid
1007
    @raise errors.ConfigurationError: when min/max specs for the given name
1008
        are not valid
1009

1010
    """
1011
    minspec = minmaxspecs[constants.ISPECS_MIN]
1012
    maxspec = minmaxspecs[constants.ISPECS_MAX]
1013
    min_v = minspec[name]
1014
    max_v = maxspec[name]
1015

    
1016
    if min_v > max_v:
1017
      err = ("Invalid specification of min/max values for %s: %s/%s" %
1018
             (name, min_v, max_v))
1019
      raise errors.ConfigurationError(err)
1020
    elif check_std:
1021
      std_v = stdspec.get(name, min_v)
1022
      return std_v >= min_v and std_v <= max_v
1023
    else:
1024
      return True
1025

    
1026
  @classmethod
1027
  def CheckDiskTemplates(cls, disk_templates):
1028
    """Checks the disk templates for validity.
1029

1030
    """
1031
    if not disk_templates:
1032
      raise errors.ConfigurationError("Instance policy must contain" +
1033
                                      " at least one disk template")
1034
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1035
    if wrong:
1036
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1037
                                      utils.CommaJoin(wrong))
1038

    
1039
  @classmethod
1040
  def CheckParameter(cls, key, value):
1041
    """Checks a parameter.
1042

1043
    Currently we expect all parameters to be float values.
1044

1045
    """
1046
    try:
1047
      float(value)
1048
    except (TypeError, ValueError), err:
1049
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1050
                                      " '%s', error: %s" % (key, value, err))
1051

    
1052

    
1053
class Instance(TaggableObject):
1054
  """Config object representing an instance."""
1055
  __slots__ = [
1056
    "name",
1057
    "primary_node",
1058
    "os",
1059
    "hypervisor",
1060
    "hvparams",
1061
    "beparams",
1062
    "osparams",
1063
    "admin_state",
1064
    "nics",
1065
    "disks",
1066
    "disk_template",
1067
    "disks_active",
1068
    "network_port",
1069
    "serial_no",
1070
    ] + _TIMESTAMPS + _UUID
1071

    
1072
  def _ComputeSecondaryNodes(self):
1073
    """Compute the list of secondary nodes.
1074

1075
    This is a simple wrapper over _ComputeAllNodes.
1076

1077
    """
1078
    all_nodes = set(self._ComputeAllNodes())
1079
    all_nodes.discard(self.primary_node)
1080
    return tuple(all_nodes)
1081

    
1082
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1083
                             "List of names of secondary nodes")
1084

    
1085
  def _ComputeAllNodes(self):
1086
    """Compute the list of all nodes.
1087

1088
    Since the data is already there (in the drbd disks), keeping it as
1089
    a separate normal attribute is redundant and if not properly
1090
    synchronised can cause problems. Thus it's better to compute it
1091
    dynamically.
1092

1093
    """
1094
    def _Helper(nodes, device):
1095
      """Recursively computes nodes given a top device."""
1096
      if device.dev_type in constants.LDS_DRBD:
1097
        nodea, nodeb = device.logical_id[:2]
1098
        nodes.add(nodea)
1099
        nodes.add(nodeb)
1100
      if device.children:
1101
        for child in device.children:
1102
          _Helper(nodes, child)
1103

    
1104
    all_nodes = set()
1105
    all_nodes.add(self.primary_node)
1106
    for device in self.disks:
1107
      _Helper(all_nodes, device)
1108
    return tuple(all_nodes)
1109

    
1110
  all_nodes = property(_ComputeAllNodes, None, None,
1111
                       "List of names of all the nodes of the instance")
1112

    
1113
  def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1114
    """Provide a mapping of nodes to LVs this instance owns.
1115

1116
    This function figures out what logical volumes should belong on
1117
    which nodes, recursing through a device tree.
1118

1119
    @type lvmap: dict
1120
    @param lvmap: optional dictionary to receive the
1121
        'node' : ['lv', ...] data.
1122
    @type devs: list of L{Disk}
1123
    @param devs: disks to get the LV name for. If None, all disk of this
1124
        instance are used.
1125
    @type node_uuid: string
1126
    @param node_uuid: UUID of the node to get the LV names for. If None, the
1127
        primary node of this instance is used.
1128
    @return: None if lvmap arg is given, otherwise, a dictionary of
1129
        the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
1130
        volumeN is of the form "vg_name/lv_name", compatible with
1131
        GetVolumeList()
1132

1133
    """
1134
    if node_uuid is None:
1135
      node_uuid = self.primary_node
1136

    
1137
    if lvmap is None:
1138
      lvmap = {
1139
        node_uuid: [],
1140
        }
1141
      ret = lvmap
1142
    else:
1143
      if not node_uuid in lvmap:
1144
        lvmap[node_uuid] = []
1145
      ret = None
1146

    
1147
    if not devs:
1148
      devs = self.disks
1149

    
1150
    for dev in devs:
1151
      if dev.dev_type == constants.DT_PLAIN:
1152
        lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1153

    
1154
      elif dev.dev_type in constants.LDS_DRBD:
1155
        if dev.children:
1156
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1157
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1158

    
1159
      elif dev.children:
1160
        self.MapLVsByNode(lvmap, dev.children, node_uuid)
1161

    
1162
    return ret
1163

    
1164
  def FindDisk(self, idx):
1165
    """Find a disk given having a specified index.
1166

1167
    This is just a wrapper that does validation of the index.
1168

1169
    @type idx: int
1170
    @param idx: the disk index
1171
    @rtype: L{Disk}
1172
    @return: the corresponding disk
1173
    @raise errors.OpPrereqError: when the given index is not valid
1174

1175
    """
1176
    try:
1177
      idx = int(idx)
1178
      return self.disks[idx]
1179
    except (TypeError, ValueError), err:
1180
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1181
                                 errors.ECODE_INVAL)
1182
    except IndexError:
1183
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1184
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1185
                                 errors.ECODE_INVAL)
1186

    
1187
  def ToDict(self):
1188
    """Instance-specific conversion to standard python types.
1189

1190
    This replaces the children lists of objects with lists of standard
1191
    python types.
1192

1193
    """
1194
    bo = super(Instance, self).ToDict()
1195

    
1196
    for attr in "nics", "disks":
1197
      alist = bo.get(attr, None)
1198
      if alist:
1199
        nlist = outils.ContainerToDicts(alist)
1200
      else:
1201
        nlist = []
1202
      bo[attr] = nlist
1203
    return bo
1204

    
1205
  @classmethod
1206
  def FromDict(cls, val):
1207
    """Custom function for instances.
1208

1209
    """
1210
    if "admin_state" not in val:
1211
      if val.get("admin_up", False):
1212
        val["admin_state"] = constants.ADMINST_UP
1213
      else:
1214
        val["admin_state"] = constants.ADMINST_DOWN
1215
    if "admin_up" in val:
1216
      del val["admin_up"]
1217
    obj = super(Instance, cls).FromDict(val)
1218
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1219
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1220
    return obj
1221

    
1222
  def UpgradeConfig(self):
1223
    """Fill defaults for missing configuration values.
1224

1225
    """
1226
    for nic in self.nics:
1227
      nic.UpgradeConfig()
1228
    for disk in self.disks:
1229
      disk.UpgradeConfig()
1230
    if self.hvparams:
1231
      for key in constants.HVC_GLOBALS:
1232
        try:
1233
          del self.hvparams[key]
1234
        except KeyError:
1235
          pass
1236
    if self.osparams is None:
1237
      self.osparams = {}
1238
    UpgradeBeParams(self.beparams)
1239
    if self.disks_active is None:
1240
      self.disks_active = self.admin_state == constants.ADMINST_UP
1241

    
1242

    
1243
class OS(ConfigObject):
1244
  """Config object representing an operating system.
1245

1246
  @type supported_parameters: list
1247
  @ivar supported_parameters: a list of tuples, name and description,
1248
      containing the supported parameters by this OS
1249

1250
  @type VARIANT_DELIM: string
1251
  @cvar VARIANT_DELIM: the variant delimiter
1252

1253
  """
1254
  __slots__ = [
1255
    "name",
1256
    "path",
1257
    "api_versions",
1258
    "create_script",
1259
    "export_script",
1260
    "import_script",
1261
    "rename_script",
1262
    "verify_script",
1263
    "supported_variants",
1264
    "supported_parameters",
1265
    ]
1266

    
1267
  VARIANT_DELIM = "+"
1268

    
1269
  @classmethod
1270
  def SplitNameVariant(cls, name):
1271
    """Splits the name into the proper name and variant.
1272

1273
    @param name: the OS (unprocessed) name
1274
    @rtype: list
1275
    @return: a list of two elements; if the original name didn't
1276
        contain a variant, it's returned as an empty string
1277

1278
    """
1279
    nv = name.split(cls.VARIANT_DELIM, 1)
1280
    if len(nv) == 1:
1281
      nv.append("")
1282
    return nv
1283

    
1284
  @classmethod
1285
  def GetName(cls, name):
1286
    """Returns the proper name of the os (without the variant).
1287

1288
    @param name: the OS (unprocessed) name
1289

1290
    """
1291
    return cls.SplitNameVariant(name)[0]
1292

    
1293
  @classmethod
1294
  def GetVariant(cls, name):
1295
    """Returns the variant the os (without the base name).
1296

1297
    @param name: the OS (unprocessed) name
1298

1299
    """
1300
    return cls.SplitNameVariant(name)[1]
1301

    
1302

    
1303
class ExtStorage(ConfigObject):
1304
  """Config object representing an External Storage Provider.
1305

1306
  """
1307
  __slots__ = [
1308
    "name",
1309
    "path",
1310
    "create_script",
1311
    "remove_script",
1312
    "grow_script",
1313
    "attach_script",
1314
    "detach_script",
1315
    "setinfo_script",
1316
    "verify_script",
1317
    "supported_parameters",
1318
    ]
1319

    
1320

    
1321
class NodeHvState(ConfigObject):
1322
  """Hypvervisor state on a node.
1323

1324
  @ivar mem_total: Total amount of memory
1325
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1326
    available)
1327
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1328
    rounding
1329
  @ivar mem_inst: Memory used by instances living on node
1330
  @ivar cpu_total: Total node CPU core count
1331
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1332

1333
  """
1334
  __slots__ = [
1335
    "mem_total",
1336
    "mem_node",
1337
    "mem_hv",
1338
    "mem_inst",
1339
    "cpu_total",
1340
    "cpu_node",
1341
    ] + _TIMESTAMPS
1342

    
1343

    
1344
class NodeDiskState(ConfigObject):
1345
  """Disk state on a node.
1346

1347
  """
1348
  __slots__ = [
1349
    "total",
1350
    "reserved",
1351
    "overhead",
1352
    ] + _TIMESTAMPS
1353

    
1354

    
1355
class Node(TaggableObject):
1356
  """Config object representing a node.
1357

1358
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1359
  @ivar hv_state_static: Hypervisor state overriden by user
1360
  @ivar disk_state: Disk state (e.g. free space)
1361
  @ivar disk_state_static: Disk state overriden by user
1362

1363
  """
1364
  __slots__ = [
1365
    "name",
1366
    "primary_ip",
1367
    "secondary_ip",
1368
    "serial_no",
1369
    "master_candidate",
1370
    "offline",
1371
    "drained",
1372
    "group",
1373
    "master_capable",
1374
    "vm_capable",
1375
    "ndparams",
1376
    "powered",
1377
    "hv_state",
1378
    "hv_state_static",
1379
    "disk_state",
1380
    "disk_state_static",
1381
    ] + _TIMESTAMPS + _UUID
1382

    
1383
  def UpgradeConfig(self):
1384
    """Fill defaults for missing configuration values.
1385

1386
    """
1387
    # pylint: disable=E0203
1388
    # because these are "defined" via slots, not manually
1389
    if self.master_capable is None:
1390
      self.master_capable = True
1391

    
1392
    if self.vm_capable is None:
1393
      self.vm_capable = True
1394

    
1395
    if self.ndparams is None:
1396
      self.ndparams = {}
1397
    # And remove any global parameter
1398
    for key in constants.NDC_GLOBALS:
1399
      if key in self.ndparams:
1400
        logging.warning("Ignoring %s node parameter for node %s",
1401
                        key, self.name)
1402
        del self.ndparams[key]
1403

    
1404
    if self.powered is None:
1405
      self.powered = True
1406

    
1407
  def ToDict(self):
1408
    """Custom function for serializing.
1409

1410
    """
1411
    data = super(Node, self).ToDict()
1412

    
1413
    hv_state = data.get("hv_state", None)
1414
    if hv_state is not None:
1415
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1416

    
1417
    disk_state = data.get("disk_state", None)
1418
    if disk_state is not None:
1419
      data["disk_state"] = \
1420
        dict((key, outils.ContainerToDicts(value))
1421
             for (key, value) in disk_state.items())
1422

    
1423
    return data
1424

    
1425
  @classmethod
1426
  def FromDict(cls, val):
1427
    """Custom function for deserializing.
1428

1429
    """
1430
    obj = super(Node, cls).FromDict(val)
1431

    
1432
    if obj.hv_state is not None:
1433
      obj.hv_state = \
1434
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1435

    
1436
    if obj.disk_state is not None:
1437
      obj.disk_state = \
1438
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1439
             for (key, value) in obj.disk_state.items())
1440

    
1441
    return obj
1442

    
1443

    
1444
class NodeGroup(TaggableObject):
1445
  """Config object representing a node group."""
1446
  __slots__ = [
1447
    "name",
1448
    "members",
1449
    "ndparams",
1450
    "diskparams",
1451
    "ipolicy",
1452
    "serial_no",
1453
    "hv_state_static",
1454
    "disk_state_static",
1455
    "alloc_policy",
1456
    "networks",
1457
    ] + _TIMESTAMPS + _UUID
1458

    
1459
  def ToDict(self):
1460
    """Custom function for nodegroup.
1461

1462
    This discards the members object, which gets recalculated and is only kept
1463
    in memory.
1464

1465
    """
1466
    mydict = super(NodeGroup, self).ToDict()
1467
    del mydict["members"]
1468
    return mydict
1469

    
1470
  @classmethod
1471
  def FromDict(cls, val):
1472
    """Custom function for nodegroup.
1473

1474
    The members slot is initialized to an empty list, upon deserialization.
1475

1476
    """
1477
    obj = super(NodeGroup, cls).FromDict(val)
1478
    obj.members = []
1479
    return obj
1480

    
1481
  def UpgradeConfig(self):
1482
    """Fill defaults for missing configuration values.
1483

1484
    """
1485
    if self.ndparams is None:
1486
      self.ndparams = {}
1487

    
1488
    if self.serial_no is None:
1489
      self.serial_no = 1
1490

    
1491
    if self.alloc_policy is None:
1492
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1493

    
1494
    # We only update mtime, and not ctime, since we would not be able
1495
    # to provide a correct value for creation time.
1496
    if self.mtime is None:
1497
      self.mtime = time.time()
1498

    
1499
    if self.diskparams is None:
1500
      self.diskparams = {}
1501
    if self.ipolicy is None:
1502
      self.ipolicy = MakeEmptyIPolicy()
1503

    
1504
    if self.networks is None:
1505
      self.networks = {}
1506

    
1507
  def FillND(self, node):
1508
    """Return filled out ndparams for L{objects.Node}
1509

1510
    @type node: L{objects.Node}
1511
    @param node: A Node object to fill
1512
    @return a copy of the node's ndparams with defaults filled
1513

1514
    """
1515
    return self.SimpleFillND(node.ndparams)
1516

    
1517
  def SimpleFillND(self, ndparams):
1518
    """Fill a given ndparams dict with defaults.
1519

1520
    @type ndparams: dict
1521
    @param ndparams: the dict to fill
1522
    @rtype: dict
1523
    @return: a copy of the passed in ndparams with missing keys filled
1524
        from the node group defaults
1525

1526
    """
1527
    return FillDict(self.ndparams, ndparams)
1528

    
1529

    
1530
class Cluster(TaggableObject):
1531
  """Config object representing the cluster."""
1532
  __slots__ = [
1533
    "serial_no",
1534
    "rsahostkeypub",
1535
    "dsahostkeypub",
1536
    "highest_used_port",
1537
    "tcpudp_port_pool",
1538
    "mac_prefix",
1539
    "volume_group_name",
1540
    "reserved_lvs",
1541
    "drbd_usermode_helper",
1542
    "default_bridge",
1543
    "default_hypervisor",
1544
    "master_node",
1545
    "master_ip",
1546
    "master_netdev",
1547
    "master_netmask",
1548
    "use_external_mip_script",
1549
    "cluster_name",
1550
    "file_storage_dir",
1551
    "shared_file_storage_dir",
1552
    "enabled_hypervisors",
1553
    "hvparams",
1554
    "ipolicy",
1555
    "os_hvp",
1556
    "beparams",
1557
    "osparams",
1558
    "nicparams",
1559
    "ndparams",
1560
    "diskparams",
1561
    "candidate_pool_size",
1562
    "modify_etc_hosts",
1563
    "modify_ssh_setup",
1564
    "maintain_node_health",
1565
    "uid_pool",
1566
    "default_iallocator",
1567
    "hidden_os",
1568
    "blacklisted_os",
1569
    "primary_ip_family",
1570
    "prealloc_wipe_disks",
1571
    "hv_state_static",
1572
    "disk_state_static",
1573
    "enabled_disk_templates",
1574
    ] + _TIMESTAMPS + _UUID
1575

    
1576
  def UpgradeConfig(self):
1577
    """Fill defaults for missing configuration values.
1578

1579
    """
1580
    # pylint: disable=E0203
1581
    # because these are "defined" via slots, not manually
1582
    if self.hvparams is None:
1583
      self.hvparams = constants.HVC_DEFAULTS
1584
    else:
1585
      for hypervisor in self.hvparams:
1586
        self.hvparams[hypervisor] = FillDict(
1587
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1588

    
1589
    if self.os_hvp is None:
1590
      self.os_hvp = {}
1591

    
1592
    # osparams added before 2.2
1593
    if self.osparams is None:
1594
      self.osparams = {}
1595

    
1596
    self.ndparams = UpgradeNDParams(self.ndparams)
1597

    
1598
    self.beparams = UpgradeGroupedParams(self.beparams,
1599
                                         constants.BEC_DEFAULTS)
1600
    for beparams_group in self.beparams:
1601
      UpgradeBeParams(self.beparams[beparams_group])
1602

    
1603
    migrate_default_bridge = not self.nicparams
1604
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1605
                                          constants.NICC_DEFAULTS)
1606
    if migrate_default_bridge:
1607
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1608
        self.default_bridge
1609

    
1610
    if self.modify_etc_hosts is None:
1611
      self.modify_etc_hosts = True
1612

    
1613
    if self.modify_ssh_setup is None:
1614
      self.modify_ssh_setup = True
1615

    
1616
    # default_bridge is no longer used in 2.1. The slot is left there to
1617
    # support auto-upgrading. It can be removed once we decide to deprecate
1618
    # upgrading straight from 2.0.
1619
    if self.default_bridge is not None:
1620
      self.default_bridge = None
1621

    
1622
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1623
    # code can be removed once upgrading straight from 2.0 is deprecated.
1624
    if self.default_hypervisor is not None:
1625
      self.enabled_hypervisors = ([self.default_hypervisor] +
1626
                                  [hvname for hvname in self.enabled_hypervisors
1627
                                   if hvname != self.default_hypervisor])
1628
      self.default_hypervisor = None
1629

    
1630
    # maintain_node_health added after 2.1.1
1631
    if self.maintain_node_health is None:
1632
      self.maintain_node_health = False
1633

    
1634
    if self.uid_pool is None:
1635
      self.uid_pool = []
1636

    
1637
    if self.default_iallocator is None:
1638
      self.default_iallocator = ""
1639

    
1640
    # reserved_lvs added before 2.2
1641
    if self.reserved_lvs is None:
1642
      self.reserved_lvs = []
1643

    
1644
    # hidden and blacklisted operating systems added before 2.2.1
1645
    if self.hidden_os is None:
1646
      self.hidden_os = []
1647

    
1648
    if self.blacklisted_os is None:
1649
      self.blacklisted_os = []
1650

    
1651
    # primary_ip_family added before 2.3
1652
    if self.primary_ip_family is None:
1653
      self.primary_ip_family = AF_INET
1654

    
1655
    if self.master_netmask is None:
1656
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1657
      self.master_netmask = ipcls.iplen
1658

    
1659
    if self.prealloc_wipe_disks is None:
1660
      self.prealloc_wipe_disks = False
1661

    
1662
    # shared_file_storage_dir added before 2.5
1663
    if self.shared_file_storage_dir is None:
1664
      self.shared_file_storage_dir = ""
1665

    
1666
    if self.use_external_mip_script is None:
1667
      self.use_external_mip_script = False
1668

    
1669
    if self.diskparams:
1670
      self.diskparams = UpgradeDiskParams(self.diskparams)
1671
    else:
1672
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1673

    
1674
    # instance policy added before 2.6
1675
    if self.ipolicy is None:
1676
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1677
    else:
1678
      # we can either make sure to upgrade the ipolicy always, or only
1679
      # do it in some corner cases (e.g. missing keys); note that this
1680
      # will break any removal of keys from the ipolicy dict
1681
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1682
      if wrongkeys:
1683
        # These keys would be silently removed by FillIPolicy()
1684
        msg = ("Cluster instance policy contains spurious keys: %s" %
1685
               utils.CommaJoin(wrongkeys))
1686
        raise errors.ConfigurationError(msg)
1687
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1688

    
1689
  @property
1690
  def primary_hypervisor(self):
1691
    """The first hypervisor is the primary.
1692

1693
    Useful, for example, for L{Node}'s hv/disk state.
1694

1695
    """
1696
    return self.enabled_hypervisors[0]
1697

    
1698
  def ToDict(self):
1699
    """Custom function for cluster.
1700

1701
    """
1702
    mydict = super(Cluster, self).ToDict()
1703

    
1704
    if self.tcpudp_port_pool is None:
1705
      tcpudp_port_pool = []
1706
    else:
1707
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1708

    
1709
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1710

    
1711
    return mydict
1712

    
1713
  @classmethod
1714
  def FromDict(cls, val):
1715
    """Custom function for cluster.
1716

1717
    """
1718
    obj = super(Cluster, cls).FromDict(val)
1719

    
1720
    if obj.tcpudp_port_pool is None:
1721
      obj.tcpudp_port_pool = set()
1722
    elif not isinstance(obj.tcpudp_port_pool, set):
1723
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1724

    
1725
    return obj
1726

    
1727
  def SimpleFillDP(self, diskparams):
1728
    """Fill a given diskparams dict with cluster defaults.
1729

1730
    @param diskparams: The diskparams
1731
    @return: The defaults dict
1732

1733
    """
1734
    return FillDiskParams(self.diskparams, diskparams)
1735

    
1736
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1737
    """Get the default hypervisor parameters for the cluster.
1738

1739
    @param hypervisor: the hypervisor name
1740
    @param os_name: if specified, we'll also update the defaults for this OS
1741
    @param skip_keys: if passed, list of keys not to use
1742
    @return: the defaults dict
1743

1744
    """
1745
    if skip_keys is None:
1746
      skip_keys = []
1747

    
1748
    fill_stack = [self.hvparams.get(hypervisor, {})]
1749
    if os_name is not None:
1750
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1751
      fill_stack.append(os_hvp)
1752

    
1753
    ret_dict = {}
1754
    for o_dict in fill_stack:
1755
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1756

    
1757
    return ret_dict
1758

    
1759
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1760
    """Fill a given hvparams dict with cluster defaults.
1761

1762
    @type hv_name: string
1763
    @param hv_name: the hypervisor to use
1764
    @type os_name: string
1765
    @param os_name: the OS to use for overriding the hypervisor defaults
1766
    @type skip_globals: boolean
1767
    @param skip_globals: if True, the global hypervisor parameters will
1768
        not be filled
1769
    @rtype: dict
1770
    @return: a copy of the given hvparams with missing keys filled from
1771
        the cluster defaults
1772

1773
    """
1774
    if skip_globals:
1775
      skip_keys = constants.HVC_GLOBALS
1776
    else:
1777
      skip_keys = []
1778

    
1779
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1780
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1781

    
1782
  def FillHV(self, instance, skip_globals=False):
1783
    """Fill an instance's hvparams dict with cluster defaults.
1784

1785
    @type instance: L{objects.Instance}
1786
    @param instance: the instance parameter to fill
1787
    @type skip_globals: boolean
1788
    @param skip_globals: if True, the global hypervisor parameters will
1789
        not be filled
1790
    @rtype: dict
1791
    @return: a copy of the instance's hvparams with missing keys filled from
1792
        the cluster defaults
1793

1794
    """
1795
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1796
                             instance.hvparams, skip_globals)
1797

    
1798
  def SimpleFillBE(self, beparams):
1799
    """Fill a given beparams dict with cluster defaults.
1800

1801
    @type beparams: dict
1802
    @param beparams: the dict to fill
1803
    @rtype: dict
1804
    @return: a copy of the passed in beparams with missing keys filled
1805
        from the cluster defaults
1806

1807
    """
1808
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1809

    
1810
  def FillBE(self, instance):
1811
    """Fill an instance's beparams dict with cluster defaults.
1812

1813
    @type instance: L{objects.Instance}
1814
    @param instance: the instance parameter to fill
1815
    @rtype: dict
1816
    @return: a copy of the instance's beparams with missing keys filled from
1817
        the cluster defaults
1818

1819
    """
1820
    return self.SimpleFillBE(instance.beparams)
1821

    
1822
  def SimpleFillNIC(self, nicparams):
1823
    """Fill a given nicparams dict with cluster defaults.
1824

1825
    @type nicparams: dict
1826
    @param nicparams: the dict to fill
1827
    @rtype: dict
1828
    @return: a copy of the passed in nicparams with missing keys filled
1829
        from the cluster defaults
1830

1831
    """
1832
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1833

    
1834
  def SimpleFillOS(self, os_name, os_params):
1835
    """Fill an instance's osparams dict with cluster defaults.
1836

1837
    @type os_name: string
1838
    @param os_name: the OS name to use
1839
    @type os_params: dict
1840
    @param os_params: the dict to fill with default values
1841
    @rtype: dict
1842
    @return: a copy of the instance's osparams with missing keys filled from
1843
        the cluster defaults
1844

1845
    """
1846
    name_only = os_name.split("+", 1)[0]
1847
    # base OS
1848
    result = self.osparams.get(name_only, {})
1849
    # OS with variant
1850
    result = FillDict(result, self.osparams.get(os_name, {}))
1851
    # specified params
1852
    return FillDict(result, os_params)
1853

    
1854
  @staticmethod
1855
  def SimpleFillHvState(hv_state):
1856
    """Fill an hv_state sub dict with cluster defaults.
1857

1858
    """
1859
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1860

    
1861
  @staticmethod
1862
  def SimpleFillDiskState(disk_state):
1863
    """Fill an disk_state sub dict with cluster defaults.
1864

1865
    """
1866
    return FillDict(constants.DS_DEFAULTS, disk_state)
1867

    
1868
  def FillND(self, node, nodegroup):
1869
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1870

1871
    @type node: L{objects.Node}
1872
    @param node: A Node object to fill
1873
    @type nodegroup: L{objects.NodeGroup}
1874
    @param nodegroup: A Node object to fill
1875
    @return a copy of the node's ndparams with defaults filled
1876

1877
    """
1878
    return self.SimpleFillND(nodegroup.FillND(node))
1879

    
1880
  def SimpleFillND(self, ndparams):
1881
    """Fill a given ndparams dict with defaults.
1882

1883
    @type ndparams: dict
1884
    @param ndparams: the dict to fill
1885
    @rtype: dict
1886
    @return: a copy of the passed in ndparams with missing keys filled
1887
        from the cluster defaults
1888

1889
    """
1890
    return FillDict(self.ndparams, ndparams)
1891

    
1892
  def SimpleFillIPolicy(self, ipolicy):
1893
    """ Fill instance policy dict with defaults.
1894

1895
    @type ipolicy: dict
1896
    @param ipolicy: the dict to fill
1897
    @rtype: dict
1898
    @return: a copy of passed ipolicy with missing keys filled from
1899
      the cluster defaults
1900

1901
    """
1902
    return FillIPolicy(self.ipolicy, ipolicy)
1903

    
1904
  def IsDiskTemplateEnabled(self, disk_template):
1905
    """Checks if a particular disk template is enabled.
1906

1907
    """
1908
    return utils.storage.IsDiskTemplateEnabled(
1909
        disk_template, self.enabled_disk_templates)
1910

    
1911
  def IsFileStorageEnabled(self):
1912
    """Checks if file storage is enabled.
1913

1914
    """
1915
    return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
1916

    
1917
  def IsSharedFileStorageEnabled(self):
1918
    """Checks if shared file storage is enabled.
1919

1920
    """
1921
    return utils.storage.IsSharedFileStorageEnabled(
1922
        self.enabled_disk_templates)
1923

    
1924

    
1925
class BlockDevStatus(ConfigObject):
1926
  """Config object representing the status of a block device."""
1927
  __slots__ = [
1928
    "dev_path",
1929
    "major",
1930
    "minor",
1931
    "sync_percent",
1932
    "estimated_time",
1933
    "is_degraded",
1934
    "ldisk_status",
1935
    ]
1936

    
1937

    
1938
class ImportExportStatus(ConfigObject):
1939
  """Config object representing the status of an import or export."""
1940
  __slots__ = [
1941
    "recent_output",
1942
    "listen_port",
1943
    "connected",
1944
    "progress_mbytes",
1945
    "progress_throughput",
1946
    "progress_eta",
1947
    "progress_percent",
1948
    "exit_status",
1949
    "error_message",
1950
    ] + _TIMESTAMPS
1951

    
1952

    
1953
class ImportExportOptions(ConfigObject):
1954
  """Options for import/export daemon
1955

1956
  @ivar key_name: X509 key name (None for cluster certificate)
1957
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1958
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1959
  @ivar magic: Used to ensure the connection goes to the right disk
1960
  @ivar ipv6: Whether to use IPv6
1961
  @ivar connect_timeout: Number of seconds for establishing connection
1962

1963
  """
1964
  __slots__ = [
1965
    "key_name",
1966
    "ca_pem",
1967
    "compress",
1968
    "magic",
1969
    "ipv6",
1970
    "connect_timeout",
1971
    ]
1972

    
1973

    
1974
class ConfdRequest(ConfigObject):
1975
  """Object holding a confd request.
1976

1977
  @ivar protocol: confd protocol version
1978
  @ivar type: confd query type
1979
  @ivar query: query request
1980
  @ivar rsalt: requested reply salt
1981

1982
  """
1983
  __slots__ = [
1984
    "protocol",
1985
    "type",
1986
    "query",
1987
    "rsalt",
1988
    ]
1989

    
1990

    
1991
class ConfdReply(ConfigObject):
1992
  """Object holding a confd reply.
1993

1994
  @ivar protocol: confd protocol version
1995
  @ivar status: reply status code (ok, error)
1996
  @ivar answer: confd query reply
1997
  @ivar serial: configuration serial number
1998

1999
  """
2000
  __slots__ = [
2001
    "protocol",
2002
    "status",
2003
    "answer",
2004
    "serial",
2005
    ]
2006

    
2007

    
2008
class QueryFieldDefinition(ConfigObject):
2009
  """Object holding a query field definition.
2010

2011
  @ivar name: Field name
2012
  @ivar title: Human-readable title
2013
  @ivar kind: Field type
2014
  @ivar doc: Human-readable description
2015

2016
  """
2017
  __slots__ = [
2018
    "name",
2019
    "title",
2020
    "kind",
2021
    "doc",
2022
    ]
2023

    
2024

    
2025
class _QueryResponseBase(ConfigObject):
2026
  __slots__ = [
2027
    "fields",
2028
    ]
2029

    
2030
  def ToDict(self):
2031
    """Custom function for serializing.
2032

2033
    """
2034
    mydict = super(_QueryResponseBase, self).ToDict()
2035
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2036
    return mydict
2037

    
2038
  @classmethod
2039
  def FromDict(cls, val):
2040
    """Custom function for de-serializing.
2041

2042
    """
2043
    obj = super(_QueryResponseBase, cls).FromDict(val)
2044
    obj.fields = \
2045
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2046
    return obj
2047

    
2048

    
2049
class QueryResponse(_QueryResponseBase):
2050
  """Object holding the response to a query.
2051

2052
  @ivar fields: List of L{QueryFieldDefinition} objects
2053
  @ivar data: Requested data
2054

2055
  """
2056
  __slots__ = [
2057
    "data",
2058
    ]
2059

    
2060

    
2061
class QueryFieldsRequest(ConfigObject):
2062
  """Object holding a request for querying available fields.
2063

2064
  """
2065
  __slots__ = [
2066
    "what",
2067
    "fields",
2068
    ]
2069

    
2070

    
2071
class QueryFieldsResponse(_QueryResponseBase):
2072
  """Object holding the response to a query for fields.
2073

2074
  @ivar fields: List of L{QueryFieldDefinition} objects
2075

2076
  """
2077
  __slots__ = []
2078

    
2079

    
2080
class MigrationStatus(ConfigObject):
2081
  """Object holding the status of a migration.
2082

2083
  """
2084
  __slots__ = [
2085
    "status",
2086
    "transferred_ram",
2087
    "total_ram",
2088
    ]
2089

    
2090

    
2091
class InstanceConsole(ConfigObject):
2092
  """Object describing how to access the console of an instance.
2093

2094
  """
2095
  __slots__ = [
2096
    "instance",
2097
    "kind",
2098
    "message",
2099
    "host",
2100
    "port",
2101
    "user",
2102
    "command",
2103
    "display",
2104
    ]
2105

    
2106
  def Validate(self):
2107
    """Validates contents of this object.
2108

2109
    """
2110
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2111
    assert self.instance, "Missing instance name"
2112
    assert self.message or self.kind in [constants.CONS_SSH,
2113
                                         constants.CONS_SPICE,
2114
                                         constants.CONS_VNC]
2115
    assert self.host or self.kind == constants.CONS_MESSAGE
2116
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2117
                                      constants.CONS_SSH]
2118
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2119
                                      constants.CONS_SPICE,
2120
                                      constants.CONS_VNC]
2121
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2122
                                         constants.CONS_SPICE,
2123
                                         constants.CONS_VNC]
2124
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2125
                                         constants.CONS_SPICE,
2126
                                         constants.CONS_SSH]
2127
    return True
2128

    
2129

    
2130
class Network(TaggableObject):
2131
  """Object representing a network definition for ganeti.
2132

2133
  """
2134
  __slots__ = [
2135
    "name",
2136
    "serial_no",
2137
    "mac_prefix",
2138
    "network",
2139
    "network6",
2140
    "gateway",
2141
    "gateway6",
2142
    "reservations",
2143
    "ext_reservations",
2144
    ] + _TIMESTAMPS + _UUID
2145

    
2146
  def HooksDict(self, prefix=""):
2147
    """Export a dictionary used by hooks with a network's information.
2148

2149
    @type prefix: String
2150
    @param prefix: Prefix to prepend to the dict entries
2151

2152
    """
2153
    result = {
2154
      "%sNETWORK_NAME" % prefix: self.name,
2155
      "%sNETWORK_UUID" % prefix: self.uuid,
2156
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2157
    }
2158
    if self.network:
2159
      result["%sNETWORK_SUBNET" % prefix] = self.network
2160
    if self.gateway:
2161
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2162
    if self.network6:
2163
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2164
    if self.gateway6:
2165
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2166
    if self.mac_prefix:
2167
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2168

    
2169
    return result
2170

    
2171
  @classmethod
2172
  def FromDict(cls, val):
2173
    """Custom function for networks.
2174

2175
    Remove deprecated network_type and family.
2176

2177
    """
2178
    if "network_type" in val:
2179
      del val["network_type"]
2180
    if "family" in val:
2181
      del val["family"]
2182
    obj = super(Network, cls).FromDict(val)
2183
    return obj
2184

    
2185

    
2186
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2187
  """Simple wrapper over ConfigParse that allows serialization.
2188

2189
  This class is basically ConfigParser.SafeConfigParser with two
2190
  additional methods that allow it to serialize/unserialize to/from a
2191
  buffer.
2192

2193
  """
2194
  def Dumps(self):
2195
    """Dump this instance and return the string representation."""
2196
    buf = StringIO()
2197
    self.write(buf)
2198
    return buf.getvalue()
2199

    
2200
  @classmethod
2201
  def Loads(cls, data):
2202
    """Load data from a string."""
2203
    buf = StringIO(data)
2204
    cfp = cls()
2205
    cfp.readfp(buf)
2206
    return cfp
2207

    
2208

    
2209
class LvmPvInfo(ConfigObject):
2210
  """Information about an LVM physical volume (PV).
2211

2212
  @type name: string
2213
  @ivar name: name of the PV
2214
  @type vg_name: string
2215
  @ivar vg_name: name of the volume group containing the PV
2216
  @type size: float
2217
  @ivar size: size of the PV in MiB
2218
  @type free: float
2219
  @ivar free: free space in the PV, in MiB
2220
  @type attributes: string
2221
  @ivar attributes: PV attributes
2222
  @type lv_list: list of strings
2223
  @ivar lv_list: names of the LVs hosted on the PV
2224
  """
2225
  __slots__ = [
2226
    "name",
2227
    "vg_name",
2228
    "size",
2229
    "free",
2230
    "attributes",
2231
    "lv_list"
2232
    ]
2233

    
2234
  def IsEmpty(self):
2235
    """Is this PV empty?
2236

2237
    """
2238
    return self.size <= (self.free + 1)
2239

    
2240
  def IsAllocatable(self):
2241
    """Is this PV allocatable?
2242

2243
    """
2244
    return ("a" in self.attributes)