Statistics
| Branch: | Tag: | Revision:

root / lib / config.py @ da5f09ef

History | View | Annotate | Download (85.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Configuration management for Ganeti
23

24
This module provides the interface to the Ganeti cluster configuration.
25

26
The configuration data is stored on every node but is updated on the master
27
only. After each update, the master distributes the data to the other nodes.
28

29
Currently, the data storage format is JSON. YAML was slow and consuming too
30
much memory.
31

32
"""
33

    
34
# pylint: disable=R0904
35
# R0904: Too many public methods
36

    
37
import copy
38
import os
39
import random
40
import logging
41
import time
42
import itertools
43

    
44
from ganeti import errors
45
from ganeti import locking
46
from ganeti import utils
47
from ganeti import constants
48
from ganeti import rpc
49
from ganeti import objects
50
from ganeti import serializer
51
from ganeti import uidpool
52
from ganeti import netutils
53
from ganeti import runtime
54
from ganeti import pathutils
55
from ganeti import network
56

    
57

    
58
_config_lock = locking.SharedLock("ConfigWriter")
59

    
60
# job id used for resource management at config upgrade time
61
_UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
62

    
63

    
64
def _ValidateConfig(data):
65
  """Verifies that a configuration objects looks valid.
66

67
  This only verifies the version of the configuration.
68

69
  @raise errors.ConfigurationError: if the version differs from what
70
      we expect
71

72
  """
73
  if data.version != constants.CONFIG_VERSION:
74
    raise errors.ConfigVersionMismatch(constants.CONFIG_VERSION, data.version)
75

    
76

    
77
class TemporaryReservationManager:
78
  """A temporary resource reservation manager.
79

80
  This is used to reserve resources in a job, before using them, making sure
81
  other jobs cannot get them in the meantime.
82

83
  """
84
  def __init__(self):
85
    self._ec_reserved = {}
86

    
87
  def Reserved(self, resource):
88
    for holder_reserved in self._ec_reserved.values():
89
      if resource in holder_reserved:
90
        return True
91
    return False
92

    
93
  def Reserve(self, ec_id, resource):
94
    if self.Reserved(resource):
95
      raise errors.ReservationError("Duplicate reservation for resource '%s'"
96
                                    % str(resource))
97
    if ec_id not in self._ec_reserved:
98
      self._ec_reserved[ec_id] = set([resource])
99
    else:
100
      self._ec_reserved[ec_id].add(resource)
101

    
102
  def DropECReservations(self, ec_id):
103
    if ec_id in self._ec_reserved:
104
      del self._ec_reserved[ec_id]
105

    
106
  def GetReserved(self):
107
    all_reserved = set()
108
    for holder_reserved in self._ec_reserved.values():
109
      all_reserved.update(holder_reserved)
110
    return all_reserved
111

    
112
  def GetECReserved(self, ec_id):
113
    """ Used when you want to retrieve all reservations for a specific
114
        execution context. E.g when commiting reserved IPs for a specific
115
        network.
116

117
    """
118
    ec_reserved = set()
119
    if ec_id in self._ec_reserved:
120
      ec_reserved.update(self._ec_reserved[ec_id])
121
    return ec_reserved
122

    
123
  def Generate(self, existing, generate_one_fn, ec_id):
124
    """Generate a new resource of this type
125

126
    """
127
    assert callable(generate_one_fn)
128

    
129
    all_elems = self.GetReserved()
130
    all_elems.update(existing)
131
    retries = 64
132
    while retries > 0:
133
      new_resource = generate_one_fn()
134
      if new_resource is not None and new_resource not in all_elems:
135
        break
136
    else:
137
      raise errors.ConfigurationError("Not able generate new resource"
138
                                      " (last tried: %s)" % new_resource)
139
    self.Reserve(ec_id, new_resource)
140
    return new_resource
141

    
142

    
143
def _MatchNameComponentIgnoreCase(short_name, names):
144
  """Wrapper around L{utils.text.MatchNameComponent}.
145

146
  """
147
  return utils.MatchNameComponent(short_name, names, case_sensitive=False)
148

    
149

    
150
def _CheckInstanceDiskIvNames(disks):
151
  """Checks if instance's disks' C{iv_name} attributes are in order.
152

153
  @type disks: list of L{objects.Disk}
154
  @param disks: List of disks
155
  @rtype: list of tuples; (int, string, string)
156
  @return: List of wrongly named disks, each tuple contains disk index,
157
    expected and actual name
158

159
  """
160
  result = []
161

    
162
  for (idx, disk) in enumerate(disks):
163
    exp_iv_name = "disk/%s" % idx
164
    if disk.iv_name != exp_iv_name:
165
      result.append((idx, exp_iv_name, disk.iv_name))
166

    
167
  return result
168

    
169

    
170
class ConfigWriter:
171
  """The interface to the cluster configuration.
172

173
  @ivar _temporary_lvs: reservation manager for temporary LVs
174
  @ivar _all_rms: a list of all temporary reservation managers
175

176
  """
177
  def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts,
178
               accept_foreign=False):
179
    self.write_count = 0
180
    self._lock = _config_lock
181
    self._config_data = None
182
    self._offline = offline
183
    if cfg_file is None:
184
      self._cfg_file = pathutils.CLUSTER_CONF_FILE
185
    else:
186
      self._cfg_file = cfg_file
187
    self._getents = _getents
188
    self._temporary_ids = TemporaryReservationManager()
189
    self._temporary_drbds = {}
190
    self._temporary_macs = TemporaryReservationManager()
191
    self._temporary_secrets = TemporaryReservationManager()
192
    self._temporary_lvs = TemporaryReservationManager()
193
    self._temporary_ips = TemporaryReservationManager()
194
    self._all_rms = [self._temporary_ids, self._temporary_macs,
195
                     self._temporary_secrets, self._temporary_lvs,
196
                     self._temporary_ips]
197
    # Note: in order to prevent errors when resolving our name in
198
    # _DistributeConfig, we compute it here once and reuse it; it's
199
    # better to raise an error before starting to modify the config
200
    # file than after it was modified
201
    self._my_hostname = netutils.Hostname.GetSysName()
202
    self._last_cluster_serial = -1
203
    self._cfg_id = None
204
    self._context = None
205
    self._OpenConfig(accept_foreign)
206

    
207
  def _GetRpc(self, address_list):
208
    """Returns RPC runner for configuration.
209

210
    """
211
    return rpc.ConfigRunner(self._context, address_list)
212

    
213
  def SetContext(self, context):
214
    """Sets Ganeti context.
215

216
    """
217
    self._context = context
218

    
219
  # this method needs to be static, so that we can call it on the class
220
  @staticmethod
221
  def IsCluster():
222
    """Check if the cluster is configured.
223

224
    """
225
    return os.path.exists(pathutils.CLUSTER_CONF_FILE)
226

    
227
  @locking.ssynchronized(_config_lock, shared=1)
228
  def GetNdParams(self, node):
229
    """Get the node params populated with cluster defaults.
230

231
    @type node: L{objects.Node}
232
    @param node: The node we want to know the params for
233
    @return: A dict with the filled in node params
234

235
    """
236
    nodegroup = self._UnlockedGetNodeGroup(node.group)
237
    return self._config_data.cluster.FillND(node, nodegroup)
238

    
239
  @locking.ssynchronized(_config_lock, shared=1)
240
  def GetInstanceDiskParams(self, instance):
241
    """Get the disk params populated with inherit chain.
242

243
    @type instance: L{objects.Instance}
244
    @param instance: The instance we want to know the params for
245
    @return: A dict with the filled in disk params
246

247
    """
248
    node = self._UnlockedGetNodeInfo(instance.primary_node)
249
    nodegroup = self._UnlockedGetNodeGroup(node.group)
250
    return self._UnlockedGetGroupDiskParams(nodegroup)
251

    
252
  @locking.ssynchronized(_config_lock, shared=1)
253
  def GetGroupDiskParams(self, group):
254
    """Get the disk params populated with inherit chain.
255

256
    @type group: L{objects.NodeGroup}
257
    @param group: The group we want to know the params for
258
    @return: A dict with the filled in disk params
259

260
    """
261
    return self._UnlockedGetGroupDiskParams(group)
262

    
263
  def _UnlockedGetGroupDiskParams(self, group):
264
    """Get the disk params populated with inherit chain down to node-group.
265

266
    @type group: L{objects.NodeGroup}
267
    @param group: The group we want to know the params for
268
    @return: A dict with the filled in disk params
269

270
    """
271
    return self._config_data.cluster.SimpleFillDP(group.diskparams)
272

    
273
  def _UnlockedGetNetworkMACPrefix(self, net_uuid):
274
    """Return the network mac prefix if it exists or the cluster level default.
275

276
    """
277
    prefix = None
278
    if net_uuid:
279
      nobj = self._UnlockedGetNetwork(net_uuid)
280
      if nobj.mac_prefix:
281
        prefix = nobj.mac_prefix
282

    
283
    return prefix
284

    
285
  def _GenerateOneMAC(self, prefix=None):
286
    """Return a function that randomly generates a MAC suffic
287
       and appends it to the given prefix. If prefix is not given get
288
       the cluster level default.
289

290
    """
291
    if not prefix:
292
      prefix = self._config_data.cluster.mac_prefix
293

    
294
    def GenMac():
295
      byte1 = random.randrange(0, 256)
296
      byte2 = random.randrange(0, 256)
297
      byte3 = random.randrange(0, 256)
298
      mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
299
      return mac
300

    
301
    return GenMac
302

    
303
  @locking.ssynchronized(_config_lock, shared=1)
304
  def GenerateMAC(self, net_uuid, ec_id):
305
    """Generate a MAC for an instance.
306

307
    This should check the current instances for duplicates.
308

309
    """
310
    existing = self._AllMACs()
311
    prefix = self._UnlockedGetNetworkMACPrefix(net_uuid)
312
    gen_mac = self._GenerateOneMAC(prefix)
313
    return self._temporary_ids.Generate(existing, gen_mac, ec_id)
314

    
315
  @locking.ssynchronized(_config_lock, shared=1)
316
  def ReserveMAC(self, mac, ec_id):
317
    """Reserve a MAC for an instance.
318

319
    This only checks instances managed by this cluster, it does not
320
    check for potential collisions elsewhere.
321

322
    """
323
    all_macs = self._AllMACs()
324
    if mac in all_macs:
325
      raise errors.ReservationError("mac already in use")
326
    else:
327
      self._temporary_macs.Reserve(ec_id, mac)
328

    
329
  def _UnlockedCommitTemporaryIps(self, ec_id):
330
    """Commit all reserved IP address to their respective pools
331

332
    """
333
    for action, address, net_uuid in self._temporary_ips.GetECReserved(ec_id):
334
      self._UnlockedCommitIp(action, net_uuid, address)
335

    
336
  def _UnlockedCommitIp(self, action, net_uuid, address):
337
    """Commit a reserved IP address to an IP pool.
338

339
    The IP address is taken from the network's IP pool and marked as reserved.
340

341
    """
342
    nobj = self._UnlockedGetNetwork(net_uuid)
343
    pool = network.AddressPool(nobj)
344
    if action == constants.RESERVE_ACTION:
345
      pool.Reserve(address)
346
    elif action == constants.RELEASE_ACTION:
347
      pool.Release(address)
348

    
349
  def _UnlockedReleaseIp(self, net_uuid, address, ec_id):
350
    """Give a specific IP address back to an IP pool.
351

352
    The IP address is returned to the IP pool designated by pool_id and marked
353
    as reserved.
354

355
    """
356
    self._temporary_ips.Reserve(ec_id,
357
                                (constants.RELEASE_ACTION, address, net_uuid))
358

    
359
  @locking.ssynchronized(_config_lock, shared=1)
360
  def ReleaseIp(self, net_uuid, address, ec_id):
361
    """Give a specified IP address back to an IP pool.
362

363
    This is just a wrapper around _UnlockedReleaseIp.
364

365
    """
366
    if net_uuid:
367
      self._UnlockedReleaseIp(net_uuid, address, ec_id)
368

    
369
  @locking.ssynchronized(_config_lock, shared=1)
370
  def GenerateIp(self, net_uuid, ec_id):
371
    """Find a free IPv4 address for an instance.
372

373
    """
374
    nobj = self._UnlockedGetNetwork(net_uuid)
375
    pool = network.AddressPool(nobj)
376

    
377
    def gen_one():
378
      try:
379
        ip = pool.GenerateFree()
380
      except errors.AddressPoolError:
381
        raise errors.ReservationError("Cannot generate IP. Network is full")
382
      return (constants.RESERVE_ACTION, ip, net_uuid)
383

    
384
    _, address, _ = self._temporary_ips.Generate([], gen_one, ec_id)
385
    return address
386

    
387
  def _UnlockedReserveIp(self, net_uuid, address, ec_id):
388
    """Reserve a given IPv4 address for use by an instance.
389

390
    """
391
    nobj = self._UnlockedGetNetwork(net_uuid)
392
    pool = network.AddressPool(nobj)
393
    try:
394
      isreserved = pool.IsReserved(address)
395
    except errors.AddressPoolError:
396
      raise errors.ReservationError("IP address not in network")
397
    if isreserved:
398
      raise errors.ReservationError("IP address already in use")
399

    
400
    return self._temporary_ips.Reserve(ec_id,
401
                                       (constants.RESERVE_ACTION,
402
                                        address, net_uuid))
403

    
404
  @locking.ssynchronized(_config_lock, shared=1)
405
  def ReserveIp(self, net_uuid, address, ec_id):
406
    """Reserve a given IPv4 address for use by an instance.
407

408
    """
409
    if net_uuid:
410
      return self._UnlockedReserveIp(net_uuid, address, ec_id)
411

    
412
  @locking.ssynchronized(_config_lock, shared=1)
413
  def ReserveLV(self, lv_name, ec_id):
414
    """Reserve an VG/LV pair for an instance.
415

416
    @type lv_name: string
417
    @param lv_name: the logical volume name to reserve
418

419
    """
420
    all_lvs = self._AllLVs()
421
    if lv_name in all_lvs:
422
      raise errors.ReservationError("LV already in use")
423
    else:
424
      self._temporary_lvs.Reserve(ec_id, lv_name)
425

    
426
  @locking.ssynchronized(_config_lock, shared=1)
427
  def GenerateDRBDSecret(self, ec_id):
428
    """Generate a DRBD secret.
429

430
    This checks the current disks for duplicates.
431

432
    """
433
    return self._temporary_secrets.Generate(self._AllDRBDSecrets(),
434
                                            utils.GenerateSecret,
435
                                            ec_id)
436

    
437
  def _AllLVs(self):
438
    """Compute the list of all LVs.
439

440
    """
441
    lvnames = set()
442
    for instance in self._config_data.instances.values():
443
      node_data = instance.MapLVsByNode()
444
      for lv_list in node_data.values():
445
        lvnames.update(lv_list)
446
    return lvnames
447

    
448
  def _AllIDs(self, include_temporary):
449
    """Compute the list of all UUIDs and names we have.
450

451
    @type include_temporary: boolean
452
    @param include_temporary: whether to include the _temporary_ids set
453
    @rtype: set
454
    @return: a set of IDs
455

456
    """
457
    existing = set()
458
    if include_temporary:
459
      existing.update(self._temporary_ids.GetReserved())
460
    existing.update(self._AllLVs())
461
    existing.update(self._config_data.instances.keys())
462
    existing.update(self._config_data.nodes.keys())
463
    existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
464
    return existing
465

    
466
  def _GenerateUniqueID(self, ec_id):
467
    """Generate an unique UUID.
468

469
    This checks the current node, instances and disk names for
470
    duplicates.
471

472
    @rtype: string
473
    @return: the unique id
474

475
    """
476
    existing = self._AllIDs(include_temporary=False)
477
    return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
478

    
479
  @locking.ssynchronized(_config_lock, shared=1)
480
  def GenerateUniqueID(self, ec_id):
481
    """Generate an unique ID.
482

483
    This is just a wrapper over the unlocked version.
484

485
    @type ec_id: string
486
    @param ec_id: unique id for the job to reserve the id to
487

488
    """
489
    return self._GenerateUniqueID(ec_id)
490

    
491
  def _AllMACs(self):
492
    """Return all MACs present in the config.
493

494
    @rtype: list
495
    @return: the list of all MACs
496

497
    """
498
    result = []
499
    for instance in self._config_data.instances.values():
500
      for nic in instance.nics:
501
        result.append(nic.mac)
502

    
503
    return result
504

    
505
  def _AllDRBDSecrets(self):
506
    """Return all DRBD secrets present in the config.
507

508
    @rtype: list
509
    @return: the list of all DRBD secrets
510

511
    """
512
    def helper(disk, result):
513
      """Recursively gather secrets from this disk."""
514
      if disk.dev_type == constants.DT_DRBD8:
515
        result.append(disk.logical_id[5])
516
      if disk.children:
517
        for child in disk.children:
518
          helper(child, result)
519

    
520
    result = []
521
    for instance in self._config_data.instances.values():
522
      for disk in instance.disks:
523
        helper(disk, result)
524

    
525
    return result
526

    
527
  def _CheckDiskIDs(self, disk, l_ids, p_ids):
528
    """Compute duplicate disk IDs
529

530
    @type disk: L{objects.Disk}
531
    @param disk: the disk at which to start searching
532
    @type l_ids: list
533
    @param l_ids: list of current logical ids
534
    @type p_ids: list
535
    @param p_ids: list of current physical ids
536
    @rtype: list
537
    @return: a list of error messages
538

539
    """
540
    result = []
541
    if disk.logical_id is not None:
542
      if disk.logical_id in l_ids:
543
        result.append("duplicate logical id %s" % str(disk.logical_id))
544
      else:
545
        l_ids.append(disk.logical_id)
546
    if disk.physical_id is not None:
547
      if disk.physical_id in p_ids:
548
        result.append("duplicate physical id %s" % str(disk.physical_id))
549
      else:
550
        p_ids.append(disk.physical_id)
551

    
552
    if disk.children:
553
      for child in disk.children:
554
        result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
555
    return result
556

    
557
  def _UnlockedVerifyConfig(self):
558
    """Verify function.
559

560
    @rtype: list
561
    @return: a list of error messages; a non-empty list signifies
562
        configuration errors
563

564
    """
565
    # pylint: disable=R0914
566
    result = []
567
    seen_macs = []
568
    ports = {}
569
    data = self._config_data
570
    cluster = data.cluster
571
    seen_lids = []
572
    seen_pids = []
573

    
574
    # global cluster checks
575
    if not cluster.enabled_hypervisors:
576
      result.append("enabled hypervisors list doesn't have any entries")
577
    invalid_hvs = set(cluster.enabled_hypervisors) - constants.HYPER_TYPES
578
    if invalid_hvs:
579
      result.append("enabled hypervisors contains invalid entries: %s" %
580
                    invalid_hvs)
581
    missing_hvp = (set(cluster.enabled_hypervisors) -
582
                   set(cluster.hvparams.keys()))
583
    if missing_hvp:
584
      result.append("hypervisor parameters missing for the enabled"
585
                    " hypervisor(s) %s" % utils.CommaJoin(missing_hvp))
586

    
587
    if cluster.master_node not in data.nodes:
588
      result.append("cluster has invalid primary node '%s'" %
589
                    cluster.master_node)
590

    
591
    def _helper(owner, attr, value, template):
592
      try:
593
        utils.ForceDictType(value, template)
594
      except errors.GenericError, err:
595
        result.append("%s has invalid %s: %s" % (owner, attr, err))
596

    
597
    def _helper_nic(owner, params):
598
      try:
599
        objects.NIC.CheckParameterSyntax(params)
600
      except errors.ConfigurationError, err:
601
        result.append("%s has invalid nicparams: %s" % (owner, err))
602

    
603
    def _helper_ipolicy(owner, ipolicy, iscluster):
604
      try:
605
        objects.InstancePolicy.CheckParameterSyntax(ipolicy, iscluster)
606
      except errors.ConfigurationError, err:
607
        result.append("%s has invalid instance policy: %s" % (owner, err))
608
      for key, value in ipolicy.items():
609
        if key == constants.ISPECS_MINMAX:
610
          _helper_ispecs(owner, "ipolicy/" + key, value)
611
        elif key == constants.ISPECS_STD:
612
          _helper(owner, "ipolicy/" + key, value,
613
                  constants.ISPECS_PARAMETER_TYPES)
614
        else:
615
          # FIXME: assuming list type
616
          if key in constants.IPOLICY_PARAMETERS:
617
            exp_type = float
618
          else:
619
            exp_type = list
620
          if not isinstance(value, exp_type):
621
            result.append("%s has invalid instance policy: for %s,"
622
                          " expecting %s, got %s" %
623
                          (owner, key, exp_type.__name__, type(value)))
624

    
625
    def _helper_ispecs(owner, parentkey, params):
626
      for (key, value) in params.items():
627
        fullkey = "/".join([parentkey, key])
628
        _helper(owner, fullkey, value, constants.ISPECS_PARAMETER_TYPES)
629

    
630
    # check cluster parameters
631
    _helper("cluster", "beparams", cluster.SimpleFillBE({}),
632
            constants.BES_PARAMETER_TYPES)
633
    _helper("cluster", "nicparams", cluster.SimpleFillNIC({}),
634
            constants.NICS_PARAMETER_TYPES)
635
    _helper_nic("cluster", cluster.SimpleFillNIC({}))
636
    _helper("cluster", "ndparams", cluster.SimpleFillND({}),
637
            constants.NDS_PARAMETER_TYPES)
638
    _helper_ipolicy("cluster", cluster.ipolicy, True)
639

    
640
    # per-instance checks
641
    for instance_name in data.instances:
642
      instance = data.instances[instance_name]
643
      if instance.name != instance_name:
644
        result.append("instance '%s' is indexed by wrong name '%s'" %
645
                      (instance.name, instance_name))
646
      if instance.primary_node not in data.nodes:
647
        result.append("instance '%s' has invalid primary node '%s'" %
648
                      (instance_name, instance.primary_node))
649
      for snode in instance.secondary_nodes:
650
        if snode not in data.nodes:
651
          result.append("instance '%s' has invalid secondary node '%s'" %
652
                        (instance_name, snode))
653
      for idx, nic in enumerate(instance.nics):
654
        if nic.mac in seen_macs:
655
          result.append("instance '%s' has NIC %d mac %s duplicate" %
656
                        (instance_name, idx, nic.mac))
657
        else:
658
          seen_macs.append(nic.mac)
659
        if nic.nicparams:
660
          filled = cluster.SimpleFillNIC(nic.nicparams)
661
          owner = "instance %s nic %d" % (instance.name, idx)
662
          _helper(owner, "nicparams",
663
                  filled, constants.NICS_PARAMETER_TYPES)
664
          _helper_nic(owner, filled)
665

    
666
      # parameter checks
667
      if instance.beparams:
668
        _helper("instance %s" % instance.name, "beparams",
669
                cluster.FillBE(instance), constants.BES_PARAMETER_TYPES)
670

    
671
      # gather the drbd ports for duplicate checks
672
      for (idx, dsk) in enumerate(instance.disks):
673
        if dsk.dev_type in constants.LDS_DRBD:
674
          tcp_port = dsk.logical_id[2]
675
          if tcp_port not in ports:
676
            ports[tcp_port] = []
677
          ports[tcp_port].append((instance.name, "drbd disk %s" % idx))
678
      # gather network port reservation
679
      net_port = getattr(instance, "network_port", None)
680
      if net_port is not None:
681
        if net_port not in ports:
682
          ports[net_port] = []
683
        ports[net_port].append((instance.name, "network port"))
684

    
685
      # instance disk verify
686
      for idx, disk in enumerate(instance.disks):
687
        result.extend(["instance '%s' disk %d error: %s" %
688
                       (instance.name, idx, msg) for msg in disk.Verify()])
689
        result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
690

    
691
      wrong_names = _CheckInstanceDiskIvNames(instance.disks)
692
      if wrong_names:
693
        tmp = "; ".join(("name of disk %s should be '%s', but is '%s'" %
694
                         (idx, exp_name, actual_name))
695
                        for (idx, exp_name, actual_name) in wrong_names)
696

    
697
        result.append("Instance '%s' has wrongly named disks: %s" %
698
                      (instance.name, tmp))
699

    
700
    # cluster-wide pool of free ports
701
    for free_port in cluster.tcpudp_port_pool:
702
      if free_port not in ports:
703
        ports[free_port] = []
704
      ports[free_port].append(("cluster", "port marked as free"))
705

    
706
    # compute tcp/udp duplicate ports
707
    keys = ports.keys()
708
    keys.sort()
709
    for pnum in keys:
710
      pdata = ports[pnum]
711
      if len(pdata) > 1:
712
        txt = utils.CommaJoin(["%s/%s" % val for val in pdata])
713
        result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
714

    
715
    # highest used tcp port check
716
    if keys:
717
      if keys[-1] > cluster.highest_used_port:
718
        result.append("Highest used port mismatch, saved %s, computed %s" %
719
                      (cluster.highest_used_port, keys[-1]))
720

    
721
    if not data.nodes[cluster.master_node].master_candidate:
722
      result.append("Master node is not a master candidate")
723

    
724
    # master candidate checks
725
    mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
726
    if mc_now < mc_max:
727
      result.append("Not enough master candidates: actual %d, target %d" %
728
                    (mc_now, mc_max))
729

    
730
    # node checks
731
    for node_name, node in data.nodes.items():
732
      if node.name != node_name:
733
        result.append("Node '%s' is indexed by wrong name '%s'" %
734
                      (node.name, node_name))
735
      if [node.master_candidate, node.drained, node.offline].count(True) > 1:
736
        result.append("Node %s state is invalid: master_candidate=%s,"
737
                      " drain=%s, offline=%s" %
738
                      (node.name, node.master_candidate, node.drained,
739
                       node.offline))
740
      if node.group not in data.nodegroups:
741
        result.append("Node '%s' has invalid group '%s'" %
742
                      (node.name, node.group))
743
      else:
744
        _helper("node %s" % node.name, "ndparams",
745
                cluster.FillND(node, data.nodegroups[node.group]),
746
                constants.NDS_PARAMETER_TYPES)
747
      used_globals = constants.NDC_GLOBALS.intersection(node.ndparams)
748
      if used_globals:
749
        result.append("Node '%s' has some global parameters set: %s" %
750
                      (node.name, utils.CommaJoin(used_globals)))
751

    
752
    # nodegroups checks
753
    nodegroups_names = set()
754
    for nodegroup_uuid in data.nodegroups:
755
      nodegroup = data.nodegroups[nodegroup_uuid]
756
      if nodegroup.uuid != nodegroup_uuid:
757
        result.append("node group '%s' (uuid: '%s') indexed by wrong uuid '%s'"
758
                      % (nodegroup.name, nodegroup.uuid, nodegroup_uuid))
759
      if utils.UUID_RE.match(nodegroup.name.lower()):
760
        result.append("node group '%s' (uuid: '%s') has uuid-like name" %
761
                      (nodegroup.name, nodegroup.uuid))
762
      if nodegroup.name in nodegroups_names:
763
        result.append("duplicate node group name '%s'" % nodegroup.name)
764
      else:
765
        nodegroups_names.add(nodegroup.name)
766
      group_name = "group %s" % nodegroup.name
767
      _helper_ipolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy),
768
                      False)
769
      if nodegroup.ndparams:
770
        _helper(group_name, "ndparams",
771
                cluster.SimpleFillND(nodegroup.ndparams),
772
                constants.NDS_PARAMETER_TYPES)
773

    
774
    # drbd minors check
775
    _, duplicates = self._UnlockedComputeDRBDMap()
776
    for node, minor, instance_a, instance_b in duplicates:
777
      result.append("DRBD minor %d on node %s is assigned twice to instances"
778
                    " %s and %s" % (minor, node, instance_a, instance_b))
779

    
780
    # IP checks
781
    default_nicparams = cluster.nicparams[constants.PP_DEFAULT]
782
    ips = {}
783

    
784
    def _AddIpAddress(ip, name):
785
      ips.setdefault(ip, []).append(name)
786

    
787
    _AddIpAddress(cluster.master_ip, "cluster_ip")
788

    
789
    for node in data.nodes.values():
790
      _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name)
791
      if node.secondary_ip != node.primary_ip:
792
        _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name)
793

    
794
    for instance in data.instances.values():
795
      for idx, nic in enumerate(instance.nics):
796
        if nic.ip is None:
797
          continue
798

    
799
        nicparams = objects.FillDict(default_nicparams, nic.nicparams)
800
        nic_mode = nicparams[constants.NIC_MODE]
801
        nic_link = nicparams[constants.NIC_LINK]
802

    
803
        if nic_mode == constants.NIC_MODE_BRIDGED:
804
          link = "bridge:%s" % nic_link
805
        elif nic_mode == constants.NIC_MODE_ROUTED:
806
          link = "route:%s" % nic_link
807
        else:
808
          raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode)
809

    
810
        _AddIpAddress("%s/%s/%s" % (link, nic.ip, nic.network),
811
                      "instance:%s/nic:%d" % (instance.name, idx))
812

    
813
    for ip, owners in ips.items():
814
      if len(owners) > 1:
815
        result.append("IP address %s is used by multiple owners: %s" %
816
                      (ip, utils.CommaJoin(owners)))
817

    
818
    return result
819

    
820
  @locking.ssynchronized(_config_lock, shared=1)
821
  def VerifyConfig(self):
822
    """Verify function.
823

824
    This is just a wrapper over L{_UnlockedVerifyConfig}.
825

826
    @rtype: list
827
    @return: a list of error messages; a non-empty list signifies
828
        configuration errors
829

830
    """
831
    return self._UnlockedVerifyConfig()
832

    
833
  def _UnlockedSetDiskID(self, disk, node_name):
834
    """Convert the unique ID to the ID needed on the target nodes.
835

836
    This is used only for drbd, which needs ip/port configuration.
837

838
    The routine descends down and updates its children also, because
839
    this helps when the only the top device is passed to the remote
840
    node.
841

842
    This function is for internal use, when the config lock is already held.
843

844
    """
845
    if disk.children:
846
      for child in disk.children:
847
        self._UnlockedSetDiskID(child, node_name)
848

    
849
    if disk.logical_id is None and disk.physical_id is not None:
850
      return
851
    if disk.dev_type == constants.LD_DRBD8:
852
      pnode, snode, port, pminor, sminor, secret = disk.logical_id
853
      if node_name not in (pnode, snode):
854
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
855
                                        node_name)
856
      pnode_info = self._UnlockedGetNodeInfo(pnode)
857
      snode_info = self._UnlockedGetNodeInfo(snode)
858
      if pnode_info is None or snode_info is None:
859
        raise errors.ConfigurationError("Can't find primary or secondary node"
860
                                        " for %s" % str(disk))
861
      p_data = (pnode_info.secondary_ip, port)
862
      s_data = (snode_info.secondary_ip, port)
863
      if pnode == node_name:
864
        disk.physical_id = p_data + s_data + (pminor, secret)
865
      else: # it must be secondary, we tested above
866
        disk.physical_id = s_data + p_data + (sminor, secret)
867
    else:
868
      disk.physical_id = disk.logical_id
869
    return
870

    
871
  @locking.ssynchronized(_config_lock)
872
  def SetDiskID(self, disk, node_name):
873
    """Convert the unique ID to the ID needed on the target nodes.
874

875
    This is used only for drbd, which needs ip/port configuration.
876

877
    The routine descends down and updates its children also, because
878
    this helps when the only the top device is passed to the remote
879
    node.
880

881
    """
882
    return self._UnlockedSetDiskID(disk, node_name)
883

    
884
  @locking.ssynchronized(_config_lock)
885
  def AddTcpUdpPort(self, port):
886
    """Adds a new port to the available port pool.
887

888
    @warning: this method does not "flush" the configuration (via
889
        L{_WriteConfig}); callers should do that themselves once the
890
        configuration is stable
891

892
    """
893
    if not isinstance(port, int):
894
      raise errors.ProgrammerError("Invalid type passed for port")
895

    
896
    self._config_data.cluster.tcpudp_port_pool.add(port)
897

    
898
  @locking.ssynchronized(_config_lock, shared=1)
899
  def GetPortList(self):
900
    """Returns a copy of the current port list.
901

902
    """
903
    return self._config_data.cluster.tcpudp_port_pool.copy()
904

    
905
  @locking.ssynchronized(_config_lock)
906
  def AllocatePort(self):
907
    """Allocate a port.
908

909
    The port will be taken from the available port pool or from the
910
    default port range (and in this case we increase
911
    highest_used_port).
912

913
    """
914
    # If there are TCP/IP ports configured, we use them first.
915
    if self._config_data.cluster.tcpudp_port_pool:
916
      port = self._config_data.cluster.tcpudp_port_pool.pop()
917
    else:
918
      port = self._config_data.cluster.highest_used_port + 1
919
      if port >= constants.LAST_DRBD_PORT:
920
        raise errors.ConfigurationError("The highest used port is greater"
921
                                        " than %s. Aborting." %
922
                                        constants.LAST_DRBD_PORT)
923
      self._config_data.cluster.highest_used_port = port
924

    
925
    self._WriteConfig()
926
    return port
927

    
928
  def _UnlockedComputeDRBDMap(self):
929
    """Compute the used DRBD minor/nodes.
930

931
    @rtype: (dict, list)
932
    @return: dictionary of node_name: dict of minor: instance_name;
933
        the returned dict will have all the nodes in it (even if with
934
        an empty list), and a list of duplicates; if the duplicates
935
        list is not empty, the configuration is corrupted and its caller
936
        should raise an exception
937

938
    """
939
    def _AppendUsedPorts(instance_name, disk, used):
940
      duplicates = []
941
      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
942
        node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
943
        for node, port in ((node_a, minor_a), (node_b, minor_b)):
944
          assert node in used, ("Node '%s' of instance '%s' not found"
945
                                " in node list" % (node, instance_name))
946
          if port in used[node]:
947
            duplicates.append((node, port, instance_name, used[node][port]))
948
          else:
949
            used[node][port] = instance_name
950
      if disk.children:
951
        for child in disk.children:
952
          duplicates.extend(_AppendUsedPorts(instance_name, child, used))
953
      return duplicates
954

    
955
    duplicates = []
956
    my_dict = dict((node, {}) for node in self._config_data.nodes)
957
    for instance in self._config_data.instances.itervalues():
958
      for disk in instance.disks:
959
        duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict))
960
    for (node, minor), instance in self._temporary_drbds.iteritems():
961
      if minor in my_dict[node] and my_dict[node][minor] != instance:
962
        duplicates.append((node, minor, instance, my_dict[node][minor]))
963
      else:
964
        my_dict[node][minor] = instance
965
    return my_dict, duplicates
966

    
967
  @locking.ssynchronized(_config_lock)
968
  def ComputeDRBDMap(self):
969
    """Compute the used DRBD minor/nodes.
970

971
    This is just a wrapper over L{_UnlockedComputeDRBDMap}.
972

973
    @return: dictionary of node_name: dict of minor: instance_name;
974
        the returned dict will have all the nodes in it (even if with
975
        an empty list).
976

977
    """
978
    d_map, duplicates = self._UnlockedComputeDRBDMap()
979
    if duplicates:
980
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
981
                                      str(duplicates))
982
    return d_map
983

    
984
  @locking.ssynchronized(_config_lock)
985
  def AllocateDRBDMinor(self, nodes, instance):
986
    """Allocate a drbd minor.
987

988
    The free minor will be automatically computed from the existing
989
    devices. A node can be given multiple times in order to allocate
990
    multiple minors. The result is the list of minors, in the same
991
    order as the passed nodes.
992

993
    @type instance: string
994
    @param instance: the instance for which we allocate minors
995

996
    """
997
    assert isinstance(instance, basestring), \
998
           "Invalid argument '%s' passed to AllocateDRBDMinor" % instance
999

    
1000
    d_map, duplicates = self._UnlockedComputeDRBDMap()
1001
    if duplicates:
1002
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
1003
                                      str(duplicates))
1004
    result = []
1005
    for nname in nodes:
1006
      ndata = d_map[nname]
1007
      if not ndata:
1008
        # no minors used, we can start at 0
1009
        result.append(0)
1010
        ndata[0] = instance
1011
        self._temporary_drbds[(nname, 0)] = instance
1012
        continue
1013
      keys = ndata.keys()
1014
      keys.sort()
1015
      ffree = utils.FirstFree(keys)
1016
      if ffree is None:
1017
        # return the next minor
1018
        # TODO: implement high-limit check
1019
        minor = keys[-1] + 1
1020
      else:
1021
        minor = ffree
1022
      # double-check minor against current instances
1023
      assert minor not in d_map[nname], \
1024
             ("Attempt to reuse allocated DRBD minor %d on node %s,"
1025
              " already allocated to instance %s" %
1026
              (minor, nname, d_map[nname][minor]))
1027
      ndata[minor] = instance
1028
      # double-check minor against reservation
1029
      r_key = (nname, minor)
1030
      assert r_key not in self._temporary_drbds, \
1031
             ("Attempt to reuse reserved DRBD minor %d on node %s,"
1032
              " reserved for instance %s" %
1033
              (minor, nname, self._temporary_drbds[r_key]))
1034
      self._temporary_drbds[r_key] = instance
1035
      result.append(minor)
1036
    logging.debug("Request to allocate drbd minors, input: %s, returning %s",
1037
                  nodes, result)
1038
    return result
1039

    
1040
  def _UnlockedReleaseDRBDMinors(self, instance):
1041
    """Release temporary drbd minors allocated for a given instance.
1042

1043
    @type instance: string
1044
    @param instance: the instance for which temporary minors should be
1045
                     released
1046

1047
    """
1048
    assert isinstance(instance, basestring), \
1049
           "Invalid argument passed to ReleaseDRBDMinors"
1050
    for key, name in self._temporary_drbds.items():
1051
      if name == instance:
1052
        del self._temporary_drbds[key]
1053

    
1054
  @locking.ssynchronized(_config_lock)
1055
  def ReleaseDRBDMinors(self, instance):
1056
    """Release temporary drbd minors allocated for a given instance.
1057

1058
    This should be called on the error paths, on the success paths
1059
    it's automatically called by the ConfigWriter add and update
1060
    functions.
1061

1062
    This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.
1063

1064
    @type instance: string
1065
    @param instance: the instance for which temporary minors should be
1066
                     released
1067

1068
    """
1069
    self._UnlockedReleaseDRBDMinors(instance)
1070

    
1071
  @locking.ssynchronized(_config_lock, shared=1)
1072
  def GetConfigVersion(self):
1073
    """Get the configuration version.
1074

1075
    @return: Config version
1076

1077
    """
1078
    return self._config_data.version
1079

    
1080
  @locking.ssynchronized(_config_lock, shared=1)
1081
  def GetClusterName(self):
1082
    """Get cluster name.
1083

1084
    @return: Cluster name
1085

1086
    """
1087
    return self._config_data.cluster.cluster_name
1088

    
1089
  @locking.ssynchronized(_config_lock, shared=1)
1090
  def GetMasterNode(self):
1091
    """Get the hostname of the master node for this cluster.
1092

1093
    @return: Master hostname
1094

1095
    """
1096
    return self._config_data.cluster.master_node
1097

    
1098
  @locking.ssynchronized(_config_lock, shared=1)
1099
  def GetMasterIP(self):
1100
    """Get the IP of the master node for this cluster.
1101

1102
    @return: Master IP
1103

1104
    """
1105
    return self._config_data.cluster.master_ip
1106

    
1107
  @locking.ssynchronized(_config_lock, shared=1)
1108
  def GetMasterNetdev(self):
1109
    """Get the master network device for this cluster.
1110

1111
    """
1112
    return self._config_data.cluster.master_netdev
1113

    
1114
  @locking.ssynchronized(_config_lock, shared=1)
1115
  def GetMasterNetmask(self):
1116
    """Get the netmask of the master node for this cluster.
1117

1118
    """
1119
    return self._config_data.cluster.master_netmask
1120

    
1121
  @locking.ssynchronized(_config_lock, shared=1)
1122
  def GetUseExternalMipScript(self):
1123
    """Get flag representing whether to use the external master IP setup script.
1124

1125
    """
1126
    return self._config_data.cluster.use_external_mip_script
1127

    
1128
  @locking.ssynchronized(_config_lock, shared=1)
1129
  def GetFileStorageDir(self):
1130
    """Get the file storage dir for this cluster.
1131

1132
    """
1133
    return self._config_data.cluster.file_storage_dir
1134

    
1135
  @locking.ssynchronized(_config_lock, shared=1)
1136
  def GetSharedFileStorageDir(self):
1137
    """Get the shared file storage dir for this cluster.
1138

1139
    """
1140
    return self._config_data.cluster.shared_file_storage_dir
1141

    
1142
  @locking.ssynchronized(_config_lock, shared=1)
1143
  def GetHypervisorType(self):
1144
    """Get the hypervisor type for this cluster.
1145

1146
    """
1147
    return self._config_data.cluster.enabled_hypervisors[0]
1148

    
1149
  @locking.ssynchronized(_config_lock, shared=1)
1150
  def GetHostKey(self):
1151
    """Return the rsa hostkey from the config.
1152

1153
    @rtype: string
1154
    @return: the rsa hostkey
1155

1156
    """
1157
    return self._config_data.cluster.rsahostkeypub
1158

    
1159
  @locking.ssynchronized(_config_lock, shared=1)
1160
  def GetDefaultIAllocator(self):
1161
    """Get the default instance allocator for this cluster.
1162

1163
    """
1164
    return self._config_data.cluster.default_iallocator
1165

    
1166
  @locking.ssynchronized(_config_lock, shared=1)
1167
  def GetPrimaryIPFamily(self):
1168
    """Get cluster primary ip family.
1169

1170
    @return: primary ip family
1171

1172
    """
1173
    return self._config_data.cluster.primary_ip_family
1174

    
1175
  @locking.ssynchronized(_config_lock, shared=1)
1176
  def GetMasterNetworkParameters(self):
1177
    """Get network parameters of the master node.
1178

1179
    @rtype: L{object.MasterNetworkParameters}
1180
    @return: network parameters of the master node
1181

1182
    """
1183
    cluster = self._config_data.cluster
1184
    result = objects.MasterNetworkParameters(
1185
      name=cluster.master_node, ip=cluster.master_ip,
1186
      netmask=cluster.master_netmask, netdev=cluster.master_netdev,
1187
      ip_family=cluster.primary_ip_family)
1188

    
1189
    return result
1190

    
1191
  @locking.ssynchronized(_config_lock)
1192
  def AddNodeGroup(self, group, ec_id, check_uuid=True):
1193
    """Add a node group to the configuration.
1194

1195
    This method calls group.UpgradeConfig() to fill any missing attributes
1196
    according to their default values.
1197

1198
    @type group: L{objects.NodeGroup}
1199
    @param group: the NodeGroup object to add
1200
    @type ec_id: string
1201
    @param ec_id: unique id for the job to use when creating a missing UUID
1202
    @type check_uuid: bool
1203
    @param check_uuid: add an UUID to the group if it doesn't have one or, if
1204
                       it does, ensure that it does not exist in the
1205
                       configuration already
1206

1207
    """
1208
    self._UnlockedAddNodeGroup(group, ec_id, check_uuid)
1209
    self._WriteConfig()
1210

    
1211
  def _UnlockedAddNodeGroup(self, group, ec_id, check_uuid):
1212
    """Add a node group to the configuration.
1213

1214
    """
1215
    logging.info("Adding node group %s to configuration", group.name)
1216

    
1217
    # Some code might need to add a node group with a pre-populated UUID
1218
    # generated with ConfigWriter.GenerateUniqueID(). We allow them to bypass
1219
    # the "does this UUID" exist already check.
1220
    if check_uuid:
1221
      self._EnsureUUID(group, ec_id)
1222

    
1223
    try:
1224
      existing_uuid = self._UnlockedLookupNodeGroup(group.name)
1225
    except errors.OpPrereqError:
1226
      pass
1227
    else:
1228
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
1229
                                 " node group (UUID: %s)" %
1230
                                 (group.name, existing_uuid),
1231
                                 errors.ECODE_EXISTS)
1232

    
1233
    group.serial_no = 1
1234
    group.ctime = group.mtime = time.time()
1235
    group.UpgradeConfig()
1236

    
1237
    self._config_data.nodegroups[group.uuid] = group
1238
    self._config_data.cluster.serial_no += 1
1239

    
1240
  @locking.ssynchronized(_config_lock)
1241
  def RemoveNodeGroup(self, group_uuid):
1242
    """Remove a node group from the configuration.
1243

1244
    @type group_uuid: string
1245
    @param group_uuid: the UUID of the node group to remove
1246

1247
    """
1248
    logging.info("Removing node group %s from configuration", group_uuid)
1249

    
1250
    if group_uuid not in self._config_data.nodegroups:
1251
      raise errors.ConfigurationError("Unknown node group '%s'" % group_uuid)
1252

    
1253
    assert len(self._config_data.nodegroups) != 1, \
1254
            "Group '%s' is the only group, cannot be removed" % group_uuid
1255

    
1256
    del self._config_data.nodegroups[group_uuid]
1257
    self._config_data.cluster.serial_no += 1
1258
    self._WriteConfig()
1259

    
1260
  def _UnlockedLookupNodeGroup(self, target):
1261
    """Lookup a node group's UUID.
1262

1263
    @type target: string or None
1264
    @param target: group name or UUID or None to look for the default
1265
    @rtype: string
1266
    @return: nodegroup UUID
1267
    @raises errors.OpPrereqError: when the target group cannot be found
1268

1269
    """
1270
    if target is None:
1271
      if len(self._config_data.nodegroups) != 1:
1272
        raise errors.OpPrereqError("More than one node group exists. Target"
1273
                                   " group must be specified explicitly.")
1274
      else:
1275
        return self._config_data.nodegroups.keys()[0]
1276
    if target in self._config_data.nodegroups:
1277
      return target
1278
    for nodegroup in self._config_data.nodegroups.values():
1279
      if nodegroup.name == target:
1280
        return nodegroup.uuid
1281
    raise errors.OpPrereqError("Node group '%s' not found" % target,
1282
                               errors.ECODE_NOENT)
1283

    
1284
  @locking.ssynchronized(_config_lock, shared=1)
1285
  def LookupNodeGroup(self, target):
1286
    """Lookup a node group's UUID.
1287

1288
    This function is just a wrapper over L{_UnlockedLookupNodeGroup}.
1289

1290
    @type target: string or None
1291
    @param target: group name or UUID or None to look for the default
1292
    @rtype: string
1293
    @return: nodegroup UUID
1294

1295
    """
1296
    return self._UnlockedLookupNodeGroup(target)
1297

    
1298
  def _UnlockedGetNodeGroup(self, uuid):
1299
    """Lookup a node group.
1300

1301
    @type uuid: string
1302
    @param uuid: group UUID
1303
    @rtype: L{objects.NodeGroup} or None
1304
    @return: nodegroup object, or None if not found
1305

1306
    """
1307
    if uuid not in self._config_data.nodegroups:
1308
      return None
1309

    
1310
    return self._config_data.nodegroups[uuid]
1311

    
1312
  @locking.ssynchronized(_config_lock, shared=1)
1313
  def GetNodeGroup(self, uuid):
1314
    """Lookup a node group.
1315

1316
    @type uuid: string
1317
    @param uuid: group UUID
1318
    @rtype: L{objects.NodeGroup} or None
1319
    @return: nodegroup object, or None if not found
1320

1321
    """
1322
    return self._UnlockedGetNodeGroup(uuid)
1323

    
1324
  @locking.ssynchronized(_config_lock, shared=1)
1325
  def GetAllNodeGroupsInfo(self):
1326
    """Get the configuration of all node groups.
1327

1328
    """
1329
    return dict(self._config_data.nodegroups)
1330

    
1331
  @locking.ssynchronized(_config_lock, shared=1)
1332
  def GetNodeGroupList(self):
1333
    """Get a list of node groups.
1334

1335
    """
1336
    return self._config_data.nodegroups.keys()
1337

    
1338
  @locking.ssynchronized(_config_lock, shared=1)
1339
  def GetNodeGroupMembersByNodes(self, nodes):
1340
    """Get nodes which are member in the same nodegroups as the given nodes.
1341

1342
    """
1343
    ngfn = lambda node_name: self._UnlockedGetNodeInfo(node_name).group
1344
    return frozenset(member_name
1345
                     for node_name in nodes
1346
                     for member_name in
1347
                       self._UnlockedGetNodeGroup(ngfn(node_name)).members)
1348

    
1349
  @locking.ssynchronized(_config_lock, shared=1)
1350
  def GetMultiNodeGroupInfo(self, group_uuids):
1351
    """Get the configuration of multiple node groups.
1352

1353
    @param group_uuids: List of node group UUIDs
1354
    @rtype: list
1355
    @return: List of tuples of (group_uuid, group_info)
1356

1357
    """
1358
    return [(uuid, self._UnlockedGetNodeGroup(uuid)) for uuid in group_uuids]
1359

    
1360
  @locking.ssynchronized(_config_lock)
1361
  def AddInstance(self, instance, ec_id):
1362
    """Add an instance to the config.
1363

1364
    This should be used after creating a new instance.
1365

1366
    @type instance: L{objects.Instance}
1367
    @param instance: the instance object
1368

1369
    """
1370
    if not isinstance(instance, objects.Instance):
1371
      raise errors.ProgrammerError("Invalid type passed to AddInstance")
1372

    
1373
    if instance.disk_template != constants.DT_DISKLESS:
1374
      all_lvs = instance.MapLVsByNode()
1375
      logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
1376

    
1377
    all_macs = self._AllMACs()
1378
    for nic in instance.nics:
1379
      if nic.mac in all_macs:
1380
        raise errors.ConfigurationError("Cannot add instance %s:"
1381
                                        " MAC address '%s' already in use." %
1382
                                        (instance.name, nic.mac))
1383

    
1384
    self._EnsureUUID(instance, ec_id)
1385

    
1386
    instance.serial_no = 1
1387
    instance.ctime = instance.mtime = time.time()
1388
    self._config_data.instances[instance.name] = instance
1389
    self._config_data.cluster.serial_no += 1
1390
    self._UnlockedReleaseDRBDMinors(instance.name)
1391
    self._UnlockedCommitTemporaryIps(ec_id)
1392
    self._WriteConfig()
1393

    
1394
  def _EnsureUUID(self, item, ec_id):
1395
    """Ensures a given object has a valid UUID.
1396

1397
    @param item: the instance or node to be checked
1398
    @param ec_id: the execution context id for the uuid reservation
1399

1400
    """
1401
    if not item.uuid:
1402
      item.uuid = self._GenerateUniqueID(ec_id)
1403
    elif item.uuid in self._AllIDs(include_temporary=True):
1404
      raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
1405
                                      " in use" % (item.name, item.uuid))
1406

    
1407
  def _SetInstanceStatus(self, instance_name, status):
1408
    """Set the instance's status to a given value.
1409

1410
    """
1411
    assert status in constants.ADMINST_ALL, \
1412
           "Invalid status '%s' passed to SetInstanceStatus" % (status,)
1413

    
1414
    if instance_name not in self._config_data.instances:
1415
      raise errors.ConfigurationError("Unknown instance '%s'" %
1416
                                      instance_name)
1417
    instance = self._config_data.instances[instance_name]
1418
    if instance.admin_state != status:
1419
      instance.admin_state = status
1420
      instance.serial_no += 1
1421
      instance.mtime = time.time()
1422
      self._WriteConfig()
1423

    
1424
  @locking.ssynchronized(_config_lock)
1425
  def MarkInstanceUp(self, instance_name):
1426
    """Mark the instance status to up in the config.
1427

1428
    """
1429
    self._SetInstanceStatus(instance_name, constants.ADMINST_UP)
1430

    
1431
  @locking.ssynchronized(_config_lock)
1432
  def MarkInstanceOffline(self, instance_name):
1433
    """Mark the instance status to down in the config.
1434

1435
    """
1436
    self._SetInstanceStatus(instance_name, constants.ADMINST_OFFLINE)
1437

    
1438
  @locking.ssynchronized(_config_lock)
1439
  def RemoveInstance(self, instance_name):
1440
    """Remove the instance from the configuration.
1441

1442
    """
1443
    if instance_name not in self._config_data.instances:
1444
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
1445

    
1446
    # If a network port has been allocated to the instance,
1447
    # return it to the pool of free ports.
1448
    inst = self._config_data.instances[instance_name]
1449
    network_port = getattr(inst, "network_port", None)
1450
    if network_port is not None:
1451
      self._config_data.cluster.tcpudp_port_pool.add(network_port)
1452

    
1453
    instance = self._UnlockedGetInstanceInfo(instance_name)
1454

    
1455
    for nic in instance.nics:
1456
      if nic.network and nic.ip:
1457
        # Return all IP addresses to the respective address pools
1458
        self._UnlockedCommitIp(constants.RELEASE_ACTION, nic.network, nic.ip)
1459

    
1460
    del self._config_data.instances[instance_name]
1461
    self._config_data.cluster.serial_no += 1
1462
    self._WriteConfig()
1463

    
1464
  @locking.ssynchronized(_config_lock)
1465
  def RenameInstance(self, old_name, new_name):
1466
    """Rename an instance.
1467

1468
    This needs to be done in ConfigWriter and not by RemoveInstance
1469
    combined with AddInstance as only we can guarantee an atomic
1470
    rename.
1471

1472
    """
1473
    if old_name not in self._config_data.instances:
1474
      raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
1475

    
1476
    # Operate on a copy to not loose instance object in case of a failure
1477
    inst = self._config_data.instances[old_name].Copy()
1478
    inst.name = new_name
1479

    
1480
    for (idx, disk) in enumerate(inst.disks):
1481
      if disk.dev_type == constants.LD_FILE:
1482
        # rename the file paths in logical and physical id
1483
        file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
1484
        disk.logical_id = (disk.logical_id[0],
1485
                           utils.PathJoin(file_storage_dir, inst.name,
1486
                                          "disk%s" % idx))
1487
        disk.physical_id = disk.logical_id
1488

    
1489
    # Actually replace instance object
1490
    del self._config_data.instances[old_name]
1491
    self._config_data.instances[inst.name] = inst
1492

    
1493
    # Force update of ssconf files
1494
    self._config_data.cluster.serial_no += 1
1495

    
1496
    self._WriteConfig()
1497

    
1498
  @locking.ssynchronized(_config_lock)
1499
  def MarkInstanceDown(self, instance_name):
1500
    """Mark the status of an instance to down in the configuration.
1501

1502
    """
1503
    self._SetInstanceStatus(instance_name, constants.ADMINST_DOWN)
1504

    
1505
  def _UnlockedGetInstanceList(self):
1506
    """Get the list of instances.
1507

1508
    This function is for internal use, when the config lock is already held.
1509

1510
    """
1511
    return self._config_data.instances.keys()
1512

    
1513
  @locking.ssynchronized(_config_lock, shared=1)
1514
  def GetInstanceList(self):
1515
    """Get the list of instances.
1516

1517
    @return: array of instances, ex. ['instance2.example.com',
1518
        'instance1.example.com']
1519

1520
    """
1521
    return self._UnlockedGetInstanceList()
1522

    
1523
  def ExpandInstanceName(self, short_name):
1524
    """Attempt to expand an incomplete instance name.
1525

1526
    """
1527
    # Locking is done in L{ConfigWriter.GetInstanceList}
1528
    return _MatchNameComponentIgnoreCase(short_name, self.GetInstanceList())
1529

    
1530
  def _UnlockedGetInstanceInfo(self, instance_name):
1531
    """Returns information about an instance.
1532

1533
    This function is for internal use, when the config lock is already held.
1534

1535
    """
1536
    if instance_name not in self._config_data.instances:
1537
      return None
1538

    
1539
    return self._config_data.instances[instance_name]
1540

    
1541
  @locking.ssynchronized(_config_lock, shared=1)
1542
  def GetInstanceInfo(self, instance_name):
1543
    """Returns information about an instance.
1544

1545
    It takes the information from the configuration file. Other information of
1546
    an instance are taken from the live systems.
1547

1548
    @param instance_name: name of the instance, e.g.
1549
        I{instance1.example.com}
1550

1551
    @rtype: L{objects.Instance}
1552
    @return: the instance object
1553

1554
    """
1555
    return self._UnlockedGetInstanceInfo(instance_name)
1556

    
1557
  @locking.ssynchronized(_config_lock, shared=1)
1558
  def GetInstanceNodeGroups(self, instance_name, primary_only=False):
1559
    """Returns set of node group UUIDs for instance's nodes.
1560

1561
    @rtype: frozenset
1562

1563
    """
1564
    instance = self._UnlockedGetInstanceInfo(instance_name)
1565
    if not instance:
1566
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
1567

    
1568
    if primary_only:
1569
      nodes = [instance.primary_node]
1570
    else:
1571
      nodes = instance.all_nodes
1572

    
1573
    return frozenset(self._UnlockedGetNodeInfo(node_name).group
1574
                     for node_name in nodes)
1575

    
1576
  @locking.ssynchronized(_config_lock, shared=1)
1577
  def GetInstanceNetworks(self, instance_name):
1578
    """Returns set of network UUIDs for instance's nics.
1579

1580
    @rtype: frozenset
1581

1582
    """
1583
    instance = self._UnlockedGetInstanceInfo(instance_name)
1584
    if not instance:
1585
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
1586

    
1587
    networks = set()
1588
    for nic in instance.nics:
1589
      if nic.network:
1590
        networks.add(nic.network)
1591

    
1592
    return frozenset(networks)
1593

    
1594
  @locking.ssynchronized(_config_lock, shared=1)
1595
  def GetMultiInstanceInfo(self, instances):
1596
    """Get the configuration of multiple instances.
1597

1598
    @param instances: list of instance names
1599
    @rtype: list
1600
    @return: list of tuples (instance, instance_info), where
1601
        instance_info is what would GetInstanceInfo return for the
1602
        node, while keeping the original order
1603

1604
    """
1605
    return [(name, self._UnlockedGetInstanceInfo(name)) for name in instances]
1606

    
1607
  @locking.ssynchronized(_config_lock, shared=1)
1608
  def GetAllInstancesInfo(self):
1609
    """Get the configuration of all instances.
1610

1611
    @rtype: dict
1612
    @return: dict of (instance, instance_info), where instance_info is what
1613
              would GetInstanceInfo return for the node
1614

1615
    """
1616
    my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
1617
                    for instance in self._UnlockedGetInstanceList()])
1618
    return my_dict
1619

    
1620
  @locking.ssynchronized(_config_lock, shared=1)
1621
  def GetInstancesInfoByFilter(self, filter_fn):
1622
    """Get instance configuration with a filter.
1623

1624
    @type filter_fn: callable
1625
    @param filter_fn: Filter function receiving instance object as parameter,
1626
      returning boolean. Important: this function is called while the
1627
      configuration locks is held. It must not do any complex work or call
1628
      functions potentially leading to a deadlock. Ideally it doesn't call any
1629
      other functions and just compares instance attributes.
1630

1631
    """
1632
    return dict((name, inst)
1633
                for (name, inst) in self._config_data.instances.items()
1634
                if filter_fn(inst))
1635

    
1636
  @locking.ssynchronized(_config_lock)
1637
  def AddNode(self, node, ec_id):
1638
    """Add a node to the configuration.
1639

1640
    @type node: L{objects.Node}
1641
    @param node: a Node instance
1642

1643
    """
1644
    logging.info("Adding node %s to configuration", node.name)
1645

    
1646
    self._EnsureUUID(node, ec_id)
1647

    
1648
    node.serial_no = 1
1649
    node.ctime = node.mtime = time.time()
1650
    self._UnlockedAddNodeToGroup(node.name, node.group)
1651
    self._config_data.nodes[node.name] = node
1652
    self._config_data.cluster.serial_no += 1
1653
    self._WriteConfig()
1654

    
1655
  @locking.ssynchronized(_config_lock)
1656
  def RemoveNode(self, node_name):
1657
    """Remove a node from the configuration.
1658

1659
    """
1660
    logging.info("Removing node %s from configuration", node_name)
1661

    
1662
    if node_name not in self._config_data.nodes:
1663
      raise errors.ConfigurationError("Unknown node '%s'" % node_name)
1664

    
1665
    self._UnlockedRemoveNodeFromGroup(self._config_data.nodes[node_name])
1666
    del self._config_data.nodes[node_name]
1667
    self._config_data.cluster.serial_no += 1
1668
    self._WriteConfig()
1669

    
1670
  def ExpandNodeName(self, short_name):
1671
    """Attempt to expand an incomplete node name.
1672

1673
    """
1674
    # Locking is done in L{ConfigWriter.GetNodeList}
1675
    return _MatchNameComponentIgnoreCase(short_name, self.GetNodeList())
1676

    
1677
  def _UnlockedGetNodeInfo(self, node_name):
1678
    """Get the configuration of a node, as stored in the config.
1679

1680
    This function is for internal use, when the config lock is already
1681
    held.
1682

1683
    @param node_name: the node name, e.g. I{node1.example.com}
1684

1685
    @rtype: L{objects.Node}
1686
    @return: the node object
1687

1688
    """
1689
    if node_name not in self._config_data.nodes:
1690
      return None
1691

    
1692
    return self._config_data.nodes[node_name]
1693

    
1694
  @locking.ssynchronized(_config_lock, shared=1)
1695
  def GetNodeInfo(self, node_name):
1696
    """Get the configuration of a node, as stored in the config.
1697

1698
    This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
1699

1700
    @param node_name: the node name, e.g. I{node1.example.com}
1701

1702
    @rtype: L{objects.Node}
1703
    @return: the node object
1704

1705
    """
1706
    return self._UnlockedGetNodeInfo(node_name)
1707

    
1708
  @locking.ssynchronized(_config_lock, shared=1)
1709
  def GetNodeInstances(self, node_name):
1710
    """Get the instances of a node, as stored in the config.
1711

1712
    @param node_name: the node name, e.g. I{node1.example.com}
1713

1714
    @rtype: (list, list)
1715
    @return: a tuple with two lists: the primary and the secondary instances
1716

1717
    """
1718
    pri = []
1719
    sec = []
1720
    for inst in self._config_data.instances.values():
1721
      if inst.primary_node == node_name:
1722
        pri.append(inst.name)
1723
      if node_name in inst.secondary_nodes:
1724
        sec.append(inst.name)
1725
    return (pri, sec)
1726

    
1727
  @locking.ssynchronized(_config_lock, shared=1)
1728
  def GetNodeGroupInstances(self, uuid, primary_only=False):
1729
    """Get the instances of a node group.
1730

1731
    @param uuid: Node group UUID
1732
    @param primary_only: Whether to only consider primary nodes
1733
    @rtype: frozenset
1734
    @return: List of instance names in node group
1735

1736
    """
1737
    if primary_only:
1738
      nodes_fn = lambda inst: [inst.primary_node]
1739
    else:
1740
      nodes_fn = lambda inst: inst.all_nodes
1741

    
1742
    return frozenset(inst.name
1743
                     for inst in self._config_data.instances.values()
1744
                     for node_name in nodes_fn(inst)
1745
                     if self._UnlockedGetNodeInfo(node_name).group == uuid)
1746

    
1747
  def _UnlockedGetNodeList(self):
1748
    """Return the list of nodes which are in the configuration.
1749

1750
    This function is for internal use, when the config lock is already
1751
    held.
1752

1753
    @rtype: list
1754

1755
    """
1756
    return self._config_data.nodes.keys()
1757

    
1758
  @locking.ssynchronized(_config_lock, shared=1)
1759
  def GetNodeList(self):
1760
    """Return the list of nodes which are in the configuration.
1761

1762
    """
1763
    return self._UnlockedGetNodeList()
1764

    
1765
  def _UnlockedGetOnlineNodeList(self):
1766
    """Return the list of nodes which are online.
1767

1768
    """
1769
    all_nodes = [self._UnlockedGetNodeInfo(node)
1770
                 for node in self._UnlockedGetNodeList()]
1771
    return [node.name for node in all_nodes if not node.offline]
1772

    
1773
  @locking.ssynchronized(_config_lock, shared=1)
1774
  def GetOnlineNodeList(self):
1775
    """Return the list of nodes which are online.
1776

1777
    """
1778
    return self._UnlockedGetOnlineNodeList()
1779

    
1780
  @locking.ssynchronized(_config_lock, shared=1)
1781
  def GetVmCapableNodeList(self):
1782
    """Return the list of nodes which are not vm capable.
1783

1784
    """
1785
    all_nodes = [self._UnlockedGetNodeInfo(node)
1786
                 for node in self._UnlockedGetNodeList()]
1787
    return [node.name for node in all_nodes if node.vm_capable]
1788

    
1789
  @locking.ssynchronized(_config_lock, shared=1)
1790
  def GetNonVmCapableNodeList(self):
1791
    """Return the list of nodes which are not vm capable.
1792

1793
    """
1794
    all_nodes = [self._UnlockedGetNodeInfo(node)
1795
                 for node in self._UnlockedGetNodeList()]
1796
    return [node.name for node in all_nodes if not node.vm_capable]
1797

    
1798
  @locking.ssynchronized(_config_lock, shared=1)
1799
  def GetMultiNodeInfo(self, nodes):
1800
    """Get the configuration of multiple nodes.
1801

1802
    @param nodes: list of node names
1803
    @rtype: list
1804
    @return: list of tuples of (node, node_info), where node_info is
1805
        what would GetNodeInfo return for the node, in the original
1806
        order
1807

1808
    """
1809
    return [(name, self._UnlockedGetNodeInfo(name)) for name in nodes]
1810

    
1811
  @locking.ssynchronized(_config_lock, shared=1)
1812
  def GetAllNodesInfo(self):
1813
    """Get the configuration of all nodes.
1814

1815
    @rtype: dict
1816
    @return: dict of (node, node_info), where node_info is what
1817
              would GetNodeInfo return for the node
1818

1819
    """
1820
    return self._UnlockedGetAllNodesInfo()
1821

    
1822
  def _UnlockedGetAllNodesInfo(self):
1823
    """Gets configuration of all nodes.
1824

1825
    @note: See L{GetAllNodesInfo}
1826

1827
    """
1828
    return dict([(node, self._UnlockedGetNodeInfo(node))
1829
                 for node in self._UnlockedGetNodeList()])
1830

    
1831
  @locking.ssynchronized(_config_lock, shared=1)
1832
  def GetNodeGroupsFromNodes(self, nodes):
1833
    """Returns groups for a list of nodes.
1834

1835
    @type nodes: list of string
1836
    @param nodes: List of node names
1837
    @rtype: frozenset
1838

1839
    """
1840
    return frozenset(self._UnlockedGetNodeInfo(name).group for name in nodes)
1841

    
1842
  def _UnlockedGetMasterCandidateStats(self, exceptions=None):
1843
    """Get the number of current and maximum desired and possible candidates.
1844

1845
    @type exceptions: list
1846
    @param exceptions: if passed, list of nodes that should be ignored
1847
    @rtype: tuple
1848
    @return: tuple of (current, desired and possible, possible)
1849

1850
    """
1851
    mc_now = mc_should = mc_max = 0
1852
    for node in self._config_data.nodes.values():
1853
      if exceptions and node.name in exceptions:
1854
        continue
1855
      if not (node.offline or node.drained) and node.master_capable:
1856
        mc_max += 1
1857
      if node.master_candidate:
1858
        mc_now += 1
1859
    mc_should = min(mc_max, self._config_data.cluster.candidate_pool_size)
1860
    return (mc_now, mc_should, mc_max)
1861

    
1862
  @locking.ssynchronized(_config_lock, shared=1)
1863
  def GetMasterCandidateStats(self, exceptions=None):
1864
    """Get the number of current and maximum possible candidates.
1865

1866
    This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.
1867

1868
    @type exceptions: list
1869
    @param exceptions: if passed, list of nodes that should be ignored
1870
    @rtype: tuple
1871
    @return: tuple of (current, max)
1872

1873
    """
1874
    return self._UnlockedGetMasterCandidateStats(exceptions)
1875

    
1876
  @locking.ssynchronized(_config_lock)
1877
  def MaintainCandidatePool(self, exceptions):
1878
    """Try to grow the candidate pool to the desired size.
1879

1880
    @type exceptions: list
1881
    @param exceptions: if passed, list of nodes that should be ignored
1882
    @rtype: list
1883
    @return: list with the adjusted nodes (L{objects.Node} instances)
1884

1885
    """
1886
    mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(exceptions)
1887
    mod_list = []
1888
    if mc_now < mc_max:
1889
      node_list = self._config_data.nodes.keys()
1890
      random.shuffle(node_list)
1891
      for name in node_list:
1892
        if mc_now >= mc_max:
1893
          break
1894
        node = self._config_data.nodes[name]
1895
        if (node.master_candidate or node.offline or node.drained or
1896
            node.name in exceptions or not node.master_capable):
1897
          continue
1898
        mod_list.append(node)
1899
        node.master_candidate = True
1900
        node.serial_no += 1
1901
        mc_now += 1
1902
      if mc_now != mc_max:
1903
        # this should not happen
1904
        logging.warning("Warning: MaintainCandidatePool didn't manage to"
1905
                        " fill the candidate pool (%d/%d)", mc_now, mc_max)
1906
      if mod_list:
1907
        self._config_data.cluster.serial_no += 1
1908
        self._WriteConfig()
1909

    
1910
    return mod_list
1911

    
1912
  def _UnlockedAddNodeToGroup(self, node_name, nodegroup_uuid):
1913
    """Add a given node to the specified group.
1914

1915
    """
1916
    if nodegroup_uuid not in self._config_data.nodegroups:
1917
      # This can happen if a node group gets deleted between its lookup and
1918
      # when we're adding the first node to it, since we don't keep a lock in
1919
      # the meantime. It's ok though, as we'll fail cleanly if the node group
1920
      # is not found anymore.
1921
      raise errors.OpExecError("Unknown node group: %s" % nodegroup_uuid)
1922
    if node_name not in self._config_data.nodegroups[nodegroup_uuid].members:
1923
      self._config_data.nodegroups[nodegroup_uuid].members.append(node_name)
1924

    
1925
  def _UnlockedRemoveNodeFromGroup(self, node):
1926
    """Remove a given node from its group.
1927

1928
    """
1929
    nodegroup = node.group
1930
    if nodegroup not in self._config_data.nodegroups:
1931
      logging.warning("Warning: node '%s' has unknown node group '%s'"
1932
                      " (while being removed from it)", node.name, nodegroup)
1933
    nodegroup_obj = self._config_data.nodegroups[nodegroup]
1934
    if node.name not in nodegroup_obj.members:
1935
      logging.warning("Warning: node '%s' not a member of its node group '%s'"
1936
                      " (while being removed from it)", node.name, nodegroup)
1937
    else:
1938
      nodegroup_obj.members.remove(node.name)
1939

    
1940
  @locking.ssynchronized(_config_lock)
1941
  def AssignGroupNodes(self, mods):
1942
    """Changes the group of a number of nodes.
1943

1944
    @type mods: list of tuples; (node name, new group UUID)
1945
    @param mods: Node membership modifications
1946

1947
    """
1948
    groups = self._config_data.nodegroups
1949
    nodes = self._config_data.nodes
1950

    
1951
    resmod = []
1952

    
1953
    # Try to resolve names/UUIDs first
1954
    for (node_name, new_group_uuid) in mods:
1955
      try:
1956
        node = nodes[node_name]
1957
      except KeyError:
1958
        raise errors.ConfigurationError("Unable to find node '%s'" % node_name)
1959

    
1960
      if node.group == new_group_uuid:
1961
        # Node is being assigned to its current group
1962
        logging.debug("Node '%s' was assigned to its current group (%s)",
1963
                      node_name, node.group)
1964
        continue
1965

    
1966
      # Try to find current group of node
1967
      try:
1968
        old_group = groups[node.group]
1969
      except KeyError:
1970
        raise errors.ConfigurationError("Unable to find old group '%s'" %
1971
                                        node.group)
1972

    
1973
      # Try to find new group for node
1974
      try:
1975
        new_group = groups[new_group_uuid]
1976
      except KeyError:
1977
        raise errors.ConfigurationError("Unable to find new group '%s'" %
1978
                                        new_group_uuid)
1979

    
1980
      assert node.name in old_group.members, \
1981
        ("Inconsistent configuration: node '%s' not listed in members for its"
1982
         " old group '%s'" % (node.name, old_group.uuid))
1983
      assert node.name not in new_group.members, \
1984
        ("Inconsistent configuration: node '%s' already listed in members for"
1985
         " its new group '%s'" % (node.name, new_group.uuid))
1986

    
1987
      resmod.append((node, old_group, new_group))
1988

    
1989
    # Apply changes
1990
    for (node, old_group, new_group) in resmod:
1991
      assert node.uuid != new_group.uuid and old_group.uuid != new_group.uuid, \
1992
        "Assigning to current group is not possible"
1993

    
1994
      node.group = new_group.uuid
1995

    
1996
      # Update members of involved groups
1997
      if node.name in old_group.members:
1998
        old_group.members.remove(node.name)
1999
      if node.name not in new_group.members:
2000
        new_group.members.append(node.name)
2001

    
2002
    # Update timestamps and serials (only once per node/group object)
2003
    now = time.time()
2004
    for obj in frozenset(itertools.chain(*resmod)): # pylint: disable=W0142
2005
      obj.serial_no += 1
2006
      obj.mtime = now
2007

    
2008
    # Force ssconf update
2009
    self._config_data.cluster.serial_no += 1
2010

    
2011
    self._WriteConfig()
2012

    
2013
  def _BumpSerialNo(self):
2014
    """Bump up the serial number of the config.
2015

2016
    """
2017
    self._config_data.serial_no += 1
2018
    self._config_data.mtime = time.time()
2019

    
2020
  def _AllUUIDObjects(self):
2021
    """Returns all objects with uuid attributes.
2022

2023
    """
2024
    return (self._config_data.instances.values() +
2025
            self._config_data.nodes.values() +
2026
            self._config_data.nodegroups.values() +
2027
            self._config_data.networks.values() +
2028
            [self._config_data.cluster])
2029

    
2030
  def _OpenConfig(self, accept_foreign):
2031
    """Read the config data from disk.
2032

2033
    """
2034
    raw_data = utils.ReadFile(self._cfg_file)
2035

    
2036
    try:
2037
      data = objects.ConfigData.FromDict(serializer.Load(raw_data))
2038
    except Exception, err:
2039
      raise errors.ConfigurationError(err)
2040

    
2041
    # Make sure the configuration has the right version
2042
    _ValidateConfig(data)
2043

    
2044
    if (not hasattr(data, "cluster") or
2045
        not hasattr(data.cluster, "rsahostkeypub")):
2046
      raise errors.ConfigurationError("Incomplete configuration"
2047
                                      " (missing cluster.rsahostkeypub)")
2048

    
2049
    if data.cluster.master_node != self._my_hostname and not accept_foreign:
2050
      msg = ("The configuration denotes node %s as master, while my"
2051
             " hostname is %s; opening a foreign configuration is only"
2052
             " possible in accept_foreign mode" %
2053
             (data.cluster.master_node, self._my_hostname))
2054
      raise errors.ConfigurationError(msg)
2055

    
2056
    self._config_data = data
2057
    # reset the last serial as -1 so that the next write will cause
2058
    # ssconf update
2059
    self._last_cluster_serial = -1
2060

    
2061
    # Upgrade configuration if needed
2062
    self._UpgradeConfig()
2063

    
2064
    self._cfg_id = utils.GetFileID(path=self._cfg_file)
2065

    
2066
  def _UpgradeConfig(self):
2067
    """Run any upgrade steps.
2068

2069
    This method performs both in-object upgrades and also update some data
2070
    elements that need uniqueness across the whole configuration or interact
2071
    with other objects.
2072

2073
    @warning: this function will call L{_WriteConfig()}, but also
2074
        L{DropECReservations} so it needs to be called only from a
2075
        "safe" place (the constructor). If one wanted to call it with
2076
        the lock held, a DropECReservationUnlocked would need to be
2077
        created first, to avoid causing deadlock.
2078

2079
    """
2080
    # Keep a copy of the persistent part of _config_data to check for changes
2081
    # Serialization doesn't guarantee order in dictionaries
2082
    oldconf = copy.deepcopy(self._config_data.ToDict())
2083

    
2084
    # In-object upgrades
2085
    self._config_data.UpgradeConfig()
2086

    
2087
    for item in self._AllUUIDObjects():
2088
      if item.uuid is None:
2089
        item.uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID)
2090
    if not self._config_data.nodegroups:
2091
      default_nodegroup_name = constants.INITIAL_NODE_GROUP_NAME
2092
      default_nodegroup = objects.NodeGroup(name=default_nodegroup_name,
2093
                                            members=[])
2094
      self._UnlockedAddNodeGroup(default_nodegroup, _UPGRADE_CONFIG_JID, True)
2095
    for node in self._config_data.nodes.values():
2096
      if not node.group:
2097
        node.group = self.LookupNodeGroup(None)
2098
      # This is technically *not* an upgrade, but needs to be done both when
2099
      # nodegroups are being added, and upon normally loading the config,
2100
      # because the members list of a node group is discarded upon
2101
      # serializing/deserializing the object.
2102
      self._UnlockedAddNodeToGroup(node.name, node.group)
2103

    
2104
    modified = (oldconf != self._config_data.ToDict())
2105
    if modified:
2106
      self._WriteConfig()
2107
      # This is ok even if it acquires the internal lock, as _UpgradeConfig is
2108
      # only called at config init time, without the lock held
2109
      self.DropECReservations(_UPGRADE_CONFIG_JID)
2110
    else:
2111
      config_errors = self._UnlockedVerifyConfig()
2112
      if config_errors:
2113
        errmsg = ("Loaded configuration data is not consistent: %s" %
2114
                  (utils.CommaJoin(config_errors)))
2115
        logging.critical(errmsg)
2116

    
2117
  def _DistributeConfig(self, feedback_fn):
2118
    """Distribute the configuration to the other nodes.
2119

2120
    Currently, this only copies the configuration file. In the future,
2121
    it could be used to encapsulate the 2/3-phase update mechanism.
2122

2123
    """
2124
    if self._offline:
2125
      return True
2126

    
2127
    bad = False
2128

    
2129
    node_list = []
2130
    addr_list = []
2131
    myhostname = self._my_hostname
2132
    # we can skip checking whether _UnlockedGetNodeInfo returns None
2133
    # since the node list comes from _UnlocketGetNodeList, and we are
2134
    # called with the lock held, so no modifications should take place
2135
    # in between
2136
    for node_name in self._UnlockedGetNodeList():
2137
      if node_name == myhostname:
2138
        continue
2139
      node_info = self._UnlockedGetNodeInfo(node_name)
2140
      if not node_info.master_candidate:
2141
        continue
2142
      node_list.append(node_info.name)
2143
      addr_list.append(node_info.primary_ip)
2144

    
2145
    # TODO: Use dedicated resolver talking to config writer for name resolution
2146
    result = \
2147
      self._GetRpc(addr_list).call_upload_file(node_list, self._cfg_file)
2148
    for to_node, to_result in result.items():
2149
      msg = to_result.fail_msg
2150
      if msg:
2151
        msg = ("Copy of file %s to node %s failed: %s" %
2152
               (self._cfg_file, to_node, msg))
2153
        logging.error(msg)
2154

    
2155
        if feedback_fn:
2156
          feedback_fn(msg)
2157

    
2158
        bad = True
2159

    
2160
    return not bad
2161

    
2162
  def _WriteConfig(self, destination=None, feedback_fn=None):
2163
    """Write the configuration data to persistent storage.
2164

2165
    """
2166
    assert feedback_fn is None or callable(feedback_fn)
2167

    
2168
    # Warn on config errors, but don't abort the save - the
2169
    # configuration has already been modified, and we can't revert;
2170
    # the best we can do is to warn the user and save as is, leaving
2171
    # recovery to the user
2172
    config_errors = self._UnlockedVerifyConfig()
2173
    if config_errors:
2174
      errmsg = ("Configuration data is not consistent: %s" %
2175
                (utils.CommaJoin(config_errors)))
2176
      logging.critical(errmsg)
2177
      if feedback_fn:
2178
        feedback_fn(errmsg)
2179

    
2180
    if destination is None:
2181
      destination = self._cfg_file
2182
    self._BumpSerialNo()
2183
    txt = serializer.Dump(self._config_data.ToDict())
2184

    
2185
    getents = self._getents()
2186
    try:
2187
      fd = utils.SafeWriteFile(destination, self._cfg_id, data=txt,
2188
                               close=False, gid=getents.confd_gid, mode=0640)
2189
    except errors.LockError:
2190
      raise errors.ConfigurationError("The configuration file has been"
2191
                                      " modified since the last write, cannot"
2192
                                      " update")
2193
    try:
2194
      self._cfg_id = utils.GetFileID(fd=fd)
2195
    finally:
2196
      os.close(fd)
2197

    
2198
    self.write_count += 1
2199

    
2200
    # and redistribute the config file to master candidates
2201
    self._DistributeConfig(feedback_fn)
2202

    
2203
    # Write ssconf files on all nodes (including locally)
2204
    if self._last_cluster_serial < self._config_data.cluster.serial_no:
2205
      if not self._offline:
2206
        result = self._GetRpc(None).call_write_ssconf_files(
2207
          self._UnlockedGetOnlineNodeList(),
2208
          self._UnlockedGetSsconfValues())
2209

    
2210
        for nname, nresu in result.items():
2211
          msg = nresu.fail_msg
2212
          if msg:
2213
            errmsg = ("Error while uploading ssconf files to"
2214
                      " node %s: %s" % (nname, msg))
2215
            logging.warning(errmsg)
2216

    
2217
            if feedback_fn:
2218
              feedback_fn(errmsg)
2219

    
2220
      self._last_cluster_serial = self._config_data.cluster.serial_no
2221

    
2222
  def _UnlockedGetSsconfValues(self):
2223
    """Return the values needed by ssconf.
2224

2225
    @rtype: dict
2226
    @return: a dictionary with keys the ssconf names and values their
2227
        associated value
2228

2229
    """
2230
    fn = "\n".join
2231
    instance_names = utils.NiceSort(self._UnlockedGetInstanceList())
2232
    node_names = utils.NiceSort(self._UnlockedGetNodeList())
2233
    node_info = [self._UnlockedGetNodeInfo(name) for name in node_names]
2234
    node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip)
2235
                    for ninfo in node_info]
2236
    node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
2237
                    for ninfo in node_info]
2238

    
2239
    instance_data = fn(instance_names)
2240
    off_data = fn(node.name for node in node_info if node.offline)
2241
    on_data = fn(node.name for node in node_info if not node.offline)
2242
    mc_data = fn(node.name for node in node_info if node.master_candidate)
2243
    mc_ips_data = fn(node.primary_ip for node in node_info
2244
                     if node.master_candidate)
2245
    node_data = fn(node_names)
2246
    node_pri_ips_data = fn(node_pri_ips)
2247
    node_snd_ips_data = fn(node_snd_ips)
2248

    
2249
    cluster = self._config_data.cluster
2250
    cluster_tags = fn(cluster.GetTags())
2251

    
2252
    hypervisor_list = fn(cluster.enabled_hypervisors)
2253

    
2254
    uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n")
2255

    
2256
    nodegroups = ["%s %s" % (nodegroup.uuid, nodegroup.name) for nodegroup in
2257
                  self._config_data.nodegroups.values()]
2258
    nodegroups_data = fn(utils.NiceSort(nodegroups))
2259
    networks = ["%s %s" % (net.uuid, net.name) for net in
2260
                self._config_data.networks.values()]
2261
    networks_data = fn(utils.NiceSort(networks))
2262

    
2263
    ssconf_values = {
2264
      constants.SS_CLUSTER_NAME: cluster.cluster_name,
2265
      constants.SS_CLUSTER_TAGS: cluster_tags,
2266
      constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
2267
      constants.SS_SHARED_FILE_STORAGE_DIR: cluster.shared_file_storage_dir,
2268
      constants.SS_MASTER_CANDIDATES: mc_data,
2269
      constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data,
2270
      constants.SS_MASTER_IP: cluster.master_ip,
2271
      constants.SS_MASTER_NETDEV: cluster.master_netdev,
2272
      constants.SS_MASTER_NETMASK: str(cluster.master_netmask),
2273
      constants.SS_MASTER_NODE: cluster.master_node,
2274
      constants.SS_NODE_LIST: node_data,
2275
      constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
2276
      constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data,
2277
      constants.SS_OFFLINE_NODES: off_data,
2278
      constants.SS_ONLINE_NODES: on_data,
2279
      constants.SS_PRIMARY_IP_FAMILY: str(cluster.primary_ip_family),
2280
      constants.SS_INSTANCE_LIST: instance_data,
2281
      constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION,
2282
      constants.SS_HYPERVISOR_LIST: hypervisor_list,
2283
      constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health),
2284
      constants.SS_UID_POOL: uid_pool,
2285
      constants.SS_NODEGROUPS: nodegroups_data,
2286
      constants.SS_NETWORKS: networks_data,
2287
      }
2288
    bad_values = [(k, v) for k, v in ssconf_values.items()
2289
                  if not isinstance(v, (str, basestring))]
2290
    if bad_values:
2291
      err = utils.CommaJoin("%s=%s" % (k, v) for k, v in bad_values)
2292
      raise errors.ConfigurationError("Some ssconf key(s) have non-string"
2293
                                      " values: %s" % err)
2294
    return ssconf_values
2295

    
2296
  @locking.ssynchronized(_config_lock, shared=1)
2297
  def GetSsconfValues(self):
2298
    """Wrapper using lock around _UnlockedGetSsconf().
2299

2300
    """
2301
    return self._UnlockedGetSsconfValues()
2302

    
2303
  @locking.ssynchronized(_config_lock, shared=1)
2304
  def GetVGName(self):
2305
    """Return the volume group name.
2306

2307
    """
2308
    return self._config_data.cluster.volume_group_name
2309

    
2310
  @locking.ssynchronized(_config_lock)
2311
  def SetVGName(self, vg_name):
2312
    """Set the volume group name.
2313

2314
    """
2315
    self._config_data.cluster.volume_group_name = vg_name
2316
    self._config_data.cluster.serial_no += 1
2317
    self._WriteConfig()
2318

    
2319
  @locking.ssynchronized(_config_lock, shared=1)
2320
  def GetDRBDHelper(self):
2321
    """Return DRBD usermode helper.
2322

2323
    """
2324
    return self._config_data.cluster.drbd_usermode_helper
2325

    
2326
  @locking.ssynchronized(_config_lock)
2327
  def SetDRBDHelper(self, drbd_helper):
2328
    """Set DRBD usermode helper.
2329

2330
    """
2331
    self._config_data.cluster.drbd_usermode_helper = drbd_helper
2332
    self._config_data.cluster.serial_no += 1
2333
    self._WriteConfig()
2334

    
2335
  @locking.ssynchronized(_config_lock, shared=1)
2336
  def GetMACPrefix(self):
2337
    """Return the mac prefix.
2338

2339
    """
2340
    return self._config_data.cluster.mac_prefix
2341

    
2342
  @locking.ssynchronized(_config_lock, shared=1)
2343
  def GetClusterInfo(self):
2344
    """Returns information about the cluster
2345

2346
    @rtype: L{objects.Cluster}
2347
    @return: the cluster object
2348

2349
    """
2350
    return self._config_data.cluster
2351

    
2352
  @locking.ssynchronized(_config_lock, shared=1)
2353
  def HasAnyDiskOfType(self, dev_type):
2354
    """Check if in there is at disk of the given type in the configuration.
2355

2356
    """
2357
    return self._config_data.HasAnyDiskOfType(dev_type)
2358

    
2359
  @locking.ssynchronized(_config_lock)
2360
  def Update(self, target, feedback_fn, ec_id=None):
2361
    """Notify function to be called after updates.
2362

2363
    This function must be called when an object (as returned by
2364
    GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
2365
    caller wants the modifications saved to the backing store. Note
2366
    that all modified objects will be saved, but the target argument
2367
    is the one the caller wants to ensure that it's saved.
2368

2369
    @param target: an instance of either L{objects.Cluster},
2370
        L{objects.Node} or L{objects.Instance} which is existing in
2371
        the cluster
2372
    @param feedback_fn: Callable feedback function
2373

2374
    """
2375
    if self._config_data is None:
2376
      raise errors.ProgrammerError("Configuration file not read,"
2377
                                   " cannot save.")
2378
    update_serial = False
2379
    if isinstance(target, objects.Cluster):
2380
      test = target == self._config_data.cluster
2381
    elif isinstance(target, objects.Node):
2382
      test = target in self._config_data.nodes.values()
2383
      update_serial = True
2384
    elif isinstance(target, objects.Instance):
2385
      test = target in self._config_data.instances.values()
2386
    elif isinstance(target, objects.NodeGroup):
2387
      test = target in self._config_data.nodegroups.values()
2388
    elif isinstance(target, objects.Network):
2389
      test = target in self._config_data.networks.values()
2390
    else:
2391
      raise errors.ProgrammerError("Invalid object type (%s) passed to"
2392
                                   " ConfigWriter.Update" % type(target))
2393
    if not test:
2394
      raise errors.ConfigurationError("Configuration updated since object"
2395
                                      " has been read or unknown object")
2396
    target.serial_no += 1
2397
    target.mtime = now = time.time()
2398

    
2399
    if update_serial:
2400
      # for node updates, we need to increase the cluster serial too
2401
      self._config_data.cluster.serial_no += 1
2402
      self._config_data.cluster.mtime = now
2403

    
2404
    if isinstance(target, objects.Instance):
2405
      self._UnlockedReleaseDRBDMinors(target.name)
2406

    
2407
    if ec_id is not None:
2408
      # Commit all ips reserved by OpInstanceSetParams and OpGroupSetParams
2409
      self._UnlockedCommitTemporaryIps(ec_id)
2410

    
2411
    self._WriteConfig(feedback_fn=feedback_fn)
2412

    
2413
  @locking.ssynchronized(_config_lock)
2414
  def DropECReservations(self, ec_id):
2415
    """Drop per-execution-context reservations
2416

2417
    """
2418
    for rm in self._all_rms:
2419
      rm.DropECReservations(ec_id)
2420

    
2421
  @locking.ssynchronized(_config_lock, shared=1)
2422
  def GetAllNetworksInfo(self):
2423
    """Get configuration info of all the networks.
2424

2425
    """
2426
    return dict(self._config_data.networks)
2427

    
2428
  def _UnlockedGetNetworkList(self):
2429
    """Get the list of networks.
2430

2431
    This function is for internal use, when the config lock is already held.
2432

2433
    """
2434
    return self._config_data.networks.keys()
2435

    
2436
  @locking.ssynchronized(_config_lock, shared=1)
2437
  def GetNetworkList(self):
2438
    """Get the list of networks.
2439

2440
    @return: array of networks, ex. ["main", "vlan100", "200]
2441

2442
    """
2443
    return self._UnlockedGetNetworkList()
2444

    
2445
  @locking.ssynchronized(_config_lock, shared=1)
2446
  def GetNetworkNames(self):
2447
    """Get a list of network names
2448

2449
    """
2450
    names = [net.name
2451
             for net in self._config_data.networks.values()]
2452
    return names
2453

    
2454
  def _UnlockedGetNetwork(self, uuid):
2455
    """Returns information about a network.
2456

2457
    This function is for internal use, when the config lock is already held.
2458

2459
    """
2460
    if uuid not in self._config_data.networks:
2461
      return None
2462

    
2463
    return self._config_data.networks[uuid]
2464

    
2465
  @locking.ssynchronized(_config_lock, shared=1)
2466
  def GetNetwork(self, uuid):
2467
    """Returns information about a network.
2468

2469
    It takes the information from the configuration file.
2470

2471
    @param uuid: UUID of the network
2472

2473
    @rtype: L{objects.Network}
2474
    @return: the network object
2475

2476
    """
2477
    return self._UnlockedGetNetwork(uuid)
2478

    
2479
  @locking.ssynchronized(_config_lock)
2480
  def AddNetwork(self, net, ec_id, check_uuid=True):
2481
    """Add a network to the configuration.
2482

2483
    @type net: L{objects.Network}
2484
    @param net: the Network object to add
2485
    @type ec_id: string
2486
    @param ec_id: unique id for the job to use when creating a missing UUID
2487

2488
    """
2489
    self._UnlockedAddNetwork(net, ec_id, check_uuid)
2490
    self._WriteConfig()
2491

    
2492
  def _UnlockedAddNetwork(self, net, ec_id, check_uuid):
2493
    """Add a network to the configuration.
2494

2495
    """
2496
    logging.info("Adding network %s to configuration", net.name)
2497

    
2498
    if check_uuid:
2499
      self._EnsureUUID(net, ec_id)
2500

    
2501
    net.serial_no = 1
2502
    self._config_data.networks[net.uuid] = net
2503
    self._config_data.cluster.serial_no += 1
2504

    
2505
  def _UnlockedLookupNetwork(self, target):
2506
    """Lookup a network's UUID.
2507

2508
    @type target: string
2509
    @param target: network name or UUID
2510
    @rtype: string
2511
    @return: network UUID
2512
    @raises errors.OpPrereqError: when the target network cannot be found
2513

2514
    """
2515
    if target is None:
2516
      return None
2517
    if target in self._config_data.networks:
2518
      return target
2519
    for net in self._config_data.networks.values():
2520
      if net.name == target:
2521
        return net.uuid
2522
    raise errors.OpPrereqError("Network '%s' not found" % target,
2523
                               errors.ECODE_NOENT)
2524

    
2525
  @locking.ssynchronized(_config_lock, shared=1)
2526
  def LookupNetwork(self, target):
2527
    """Lookup a network's UUID.
2528

2529
    This function is just a wrapper over L{_UnlockedLookupNetwork}.
2530

2531
    @type target: string
2532
    @param target: network name or UUID
2533
    @rtype: string
2534
    @return: network UUID
2535

2536
    """
2537
    return self._UnlockedLookupNetwork(target)
2538

    
2539
  @locking.ssynchronized(_config_lock)
2540
  def RemoveNetwork(self, network_uuid):
2541
    """Remove a network from the configuration.
2542

2543
    @type network_uuid: string
2544
    @param network_uuid: the UUID of the network to remove
2545

2546
    """
2547
    logging.info("Removing network %s from configuration", network_uuid)
2548

    
2549
    if network_uuid not in self._config_data.networks:
2550
      raise errors.ConfigurationError("Unknown network '%s'" % network_uuid)
2551

    
2552
    del self._config_data.networks[network_uuid]
2553
    self._config_data.cluster.serial_no += 1
2554
    self._WriteConfig()
2555

    
2556
  def _UnlockedGetGroupNetParams(self, net_uuid, node):
2557
    """Get the netparams (mode, link) of a network.
2558

2559
    Get a network's netparams for a given node.
2560

2561
    @type net_uuid: string
2562
    @param net_uuid: network uuid
2563
    @type node: string
2564
    @param node: node name
2565
    @rtype: dict or None
2566
    @return: netparams
2567

2568
    """
2569
    node_info = self._UnlockedGetNodeInfo(node)
2570
    nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
2571
    netparams = nodegroup_info.networks.get(net_uuid, None)
2572

    
2573
    return netparams
2574

    
2575
  @locking.ssynchronized(_config_lock, shared=1)
2576
  def GetGroupNetParams(self, net_uuid, node):
2577
    """Locking wrapper of _UnlockedGetGroupNetParams()
2578

2579
    """
2580
    return self._UnlockedGetGroupNetParams(net_uuid, node)
2581

    
2582
  @locking.ssynchronized(_config_lock, shared=1)
2583
  def CheckIPInNodeGroup(self, ip, node):
2584
    """Check IP uniqueness in nodegroup.
2585

2586
    Check networks that are connected in the node's node group
2587
    if ip is contained in any of them. Used when creating/adding
2588
    a NIC to ensure uniqueness among nodegroups.
2589

2590
    @type ip: string
2591
    @param ip: ip address
2592
    @type node: string
2593
    @param node: node name
2594
    @rtype: (string, dict) or (None, None)
2595
    @return: (network name, netparams)
2596

2597
    """
2598
    if ip is None:
2599
      return (None, None)
2600
    node_info = self._UnlockedGetNodeInfo(node)
2601
    nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
2602
    for net_uuid in nodegroup_info.networks.keys():
2603
      net_info = self._UnlockedGetNetwork(net_uuid)
2604
      pool = network.AddressPool(net_info)
2605
      if pool.Contains(ip):
2606
        return (net_info.name, nodegroup_info.networks[net_uuid])
2607

    
2608
    return (None, None)