Statistics
| Branch: | Tag: | Revision:

root / lib / config.py @ c0fc01e9

History | View | Annotate | Download (84.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Configuration management for Ganeti
23

24
This module provides the interface to the Ganeti cluster configuration.
25

26
The configuration data is stored on every node but is updated on the master
27
only. After each update, the master distributes the data to the other nodes.
28

29
Currently, the data storage format is JSON. YAML was slow and consuming too
30
much memory.
31

32
"""
33

    
34
# pylint: disable=R0904
35
# R0904: Too many public methods
36

    
37
import os
38
import random
39
import logging
40
import time
41
import itertools
42
from functools import wraps
43

    
44
from ganeti import errors
45
from ganeti import locking
46
from ganeti import utils
47
from ganeti import constants
48
from ganeti import rpc
49
from ganeti import objects
50
from ganeti import serializer
51
from ganeti import uidpool
52
from ganeti import netutils
53
from ganeti import runtime
54
from ganeti import pathutils
55
from ganeti import network
56

    
57

    
58
_config_lock = locking.SharedLock("ConfigWriter")
59

    
60
# job id used for resource management at config upgrade time
61
_UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
62

    
63

    
64
def _ValidateConfig(data):
65
  """Verifies that a configuration objects looks valid.
66

67
  This only verifies the version of the configuration.
68

69
  @raise errors.ConfigurationError: if the version differs from what
70
      we expect
71

72
  """
73
  if data.version != constants.CONFIG_VERSION:
74
    raise errors.ConfigVersionMismatch(constants.CONFIG_VERSION, data.version)
75

    
76

    
77
class TemporaryReservationManager:
78
  """A temporary resource reservation manager.
79

80
  This is used to reserve resources in a job, before using them, making sure
81
  other jobs cannot get them in the meantime.
82

83
  """
84
  def __init__(self):
85
    self._ec_reserved = {}
86

    
87
  def Reserved(self, resource):
88
    for holder_reserved in self._ec_reserved.values():
89
      if resource in holder_reserved:
90
        return True
91
    return False
92

    
93
  def Reserve(self, ec_id, resource):
94
    if self.Reserved(resource):
95
      raise errors.ReservationError("Duplicate reservation for resource '%s'"
96
                                    % str(resource))
97
    if ec_id not in self._ec_reserved:
98
      self._ec_reserved[ec_id] = set([resource])
99
    else:
100
      self._ec_reserved[ec_id].add(resource)
101

    
102
  def DropECReservations(self, ec_id):
103
    if ec_id in self._ec_reserved:
104
      del self._ec_reserved[ec_id]
105

    
106
  def GetReserved(self):
107
    all_reserved = set()
108
    for holder_reserved in self._ec_reserved.values():
109
      all_reserved.update(holder_reserved)
110
    return all_reserved
111

    
112
  def GetECReserved(self, ec_id):
113
    """ Used when you want to retrieve all reservations for a specific
114
        execution context. E.g when commiting reserved IPs for a specific
115
        network.
116

117
    """
118
    ec_reserved = set()
119
    if ec_id in self._ec_reserved:
120
      ec_reserved.update(self._ec_reserved[ec_id])
121
    return ec_reserved
122

    
123
  def Generate(self, existing, generate_one_fn, ec_id):
124
    """Generate a new resource of this type
125

126
    """
127
    assert callable(generate_one_fn)
128

    
129
    all_elems = self.GetReserved()
130
    all_elems.update(existing)
131
    retries = 64
132
    while retries > 0:
133
      new_resource = generate_one_fn()
134
      if new_resource is not None and new_resource not in all_elems:
135
        break
136
    else:
137
      raise errors.ConfigurationError("Not able generate new resource"
138
                                      " (last tried: %s)" % new_resource)
139
    self.Reserve(ec_id, new_resource)
140
    return new_resource
141

    
142

    
143
def _MatchNameComponentIgnoreCase(short_name, names):
144
  """Wrapper around L{utils.text.MatchNameComponent}.
145

146
  """
147
  return utils.MatchNameComponent(short_name, names, case_sensitive=False)
148

    
149

    
150
def _CheckInstanceDiskIvNames(disks):
151
  """Checks if instance's disks' C{iv_name} attributes are in order.
152

153
  @type disks: list of L{objects.Disk}
154
  @param disks: List of disks
155
  @rtype: list of tuples; (int, string, string)
156
  @return: List of wrongly named disks, each tuple contains disk index,
157
    expected and actual name
158

159
  """
160
  result = []
161

    
162
  for (idx, disk) in enumerate(disks):
163
    exp_iv_name = "disk/%s" % idx
164
    if disk.iv_name != exp_iv_name:
165
      result.append((idx, exp_iv_name, disk.iv_name))
166

    
167
  return result
168

    
169

    
170
class ConfigWriter:
171
  """The interface to the cluster configuration.
172

173
  @ivar _temporary_lvs: reservation manager for temporary LVs
174
  @ivar _all_rms: a list of all temporary reservation managers
175

176
  """
177
  def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts,
178
               accept_foreign=False):
179
    self.write_count = 0
180
    self._lock = _config_lock
181
    self._config_data = None
182
    self._offline = offline
183
    if cfg_file is None:
184
      self._cfg_file = pathutils.CLUSTER_CONF_FILE
185
    else:
186
      self._cfg_file = cfg_file
187
    self._getents = _getents
188
    self._temporary_ids = TemporaryReservationManager()
189
    self._temporary_drbds = {}
190
    self._temporary_macs = TemporaryReservationManager()
191
    self._temporary_secrets = TemporaryReservationManager()
192
    self._temporary_lvs = TemporaryReservationManager()
193
    self._temporary_ips = TemporaryReservationManager()
194
    self._all_rms = [self._temporary_ids, self._temporary_macs,
195
                     self._temporary_secrets, self._temporary_lvs,
196
                     self._temporary_ips]
197
    # Note: in order to prevent errors when resolving our name in
198
    # _DistributeConfig, we compute it here once and reuse it; it's
199
    # better to raise an error before starting to modify the config
200
    # file than after it was modified
201
    self._my_hostname = netutils.Hostname.GetSysName()
202
    self._last_cluster_serial = -1
203
    self._cfg_id = None
204
    self._context = None
205
    self._OpenConfig(accept_foreign)
206

    
207
  def _GetRpc(self, address_list):
208
    """Returns RPC runner for configuration.
209

210
    """
211
    return rpc.ConfigRunner(self._context, address_list)
212

    
213
  def SetContext(self, context):
214
    """Sets Ganeti context.
215

216
    """
217
    self._context = context
218

    
219
  # this method needs to be static, so that we can call it on the class
220
  @staticmethod
221
  def IsCluster():
222
    """Check if the cluster is configured.
223

224
    """
225
    return os.path.exists(pathutils.CLUSTER_CONF_FILE)
226

    
227
  def _GenerateMACPrefix(self, net=None):
228
    def _get_mac_prefix(view_func):
229
      def _decorator(*args, **kwargs):
230
        prefix = self._config_data.cluster.mac_prefix
231
        if net:
232
          net_uuid = self._UnlockedLookupNetwork(net)
233
          if net_uuid:
234
            nobj = self._UnlockedGetNetwork(net_uuid)
235
            if nobj.mac_prefix:
236
              prefix = nobj.mac_prefix
237
        suffix = view_func(*args, **kwargs)
238
        return prefix+':'+suffix
239
      return wraps(view_func)(_decorator)
240
    return _get_mac_prefix
241

    
242
  def _GenerateMACSuffix(self):
243
    """Generate one mac address
244

245
    """
246
    byte1 = random.randrange(0, 256)
247
    byte2 = random.randrange(0, 256)
248
    byte3 = random.randrange(0, 256)
249
    suffix = "%02x:%02x:%02x" % (byte1, byte2, byte3)
250
    return suffix
251

    
252
  @locking.ssynchronized(_config_lock, shared=1)
253
  def GetNdParams(self, node):
254
    """Get the node params populated with cluster defaults.
255

256
    @type node: L{objects.Node}
257
    @param node: The node we want to know the params for
258
    @return: A dict with the filled in node params
259

260
    """
261
    nodegroup = self._UnlockedGetNodeGroup(node.group)
262
    return self._config_data.cluster.FillND(node, nodegroup)
263

    
264
  @locking.ssynchronized(_config_lock, shared=1)
265
  def GetInstanceDiskParams(self, instance):
266
    """Get the disk params populated with inherit chain.
267

268
    @type instance: L{objects.Instance}
269
    @param instance: The instance we want to know the params for
270
    @return: A dict with the filled in disk params
271

272
    """
273
    node = self._UnlockedGetNodeInfo(instance.primary_node)
274
    nodegroup = self._UnlockedGetNodeGroup(node.group)
275
    return self._UnlockedGetGroupDiskParams(nodegroup)
276

    
277
  @locking.ssynchronized(_config_lock, shared=1)
278
  def GetGroupDiskParams(self, group):
279
    """Get the disk params populated with inherit chain.
280

281
    @type group: L{objects.NodeGroup}
282
    @param group: The group we want to know the params for
283
    @return: A dict with the filled in disk params
284

285
    """
286
    return self._UnlockedGetGroupDiskParams(group)
287

    
288
  def _UnlockedGetGroupDiskParams(self, group):
289
    """Get the disk params populated with inherit chain down to node-group.
290

291
    @type group: L{objects.NodeGroup}
292
    @param group: The group we want to know the params for
293
    @return: A dict with the filled in disk params
294

295
    """
296
    return self._config_data.cluster.SimpleFillDP(group.diskparams)
297

    
298
  @locking.ssynchronized(_config_lock, shared=1)
299
  def GenerateMAC(self, net, ec_id):
300
    """Generate a MAC for an instance.
301

302
    This should check the current instances for duplicates.
303

304
    """
305
    existing = self._AllMACs()
306
    gen_mac = self._GenerateMACPrefix(net)(self._GenerateMACSuffix)
307
    return self._temporary_ids.Generate(existing, gen_mac, ec_id)
308

    
309
  @locking.ssynchronized(_config_lock, shared=1)
310
  def ReserveMAC(self, mac, ec_id):
311
    """Reserve a MAC for an instance.
312

313
    This only checks instances managed by this cluster, it does not
314
    check for potential collisions elsewhere.
315

316
    """
317
    all_macs = self._AllMACs()
318
    if mac in all_macs:
319
      raise errors.ReservationError("mac already in use")
320
    else:
321
      self._temporary_macs.Reserve(ec_id, mac)
322

    
323
  def _UnlockedCommitTemporaryIps(self, ec_id):
324
    """Commit all reserved IP address to their respective pools
325

326
    """
327
    for action, address, net_uuid in self._temporary_ips.GetECReserved(ec_id):
328
      self._UnlockedCommitIp(action, net_uuid, address)
329

    
330
  def _UnlockedCommitIp(self, action, net_uuid, address):
331
    """Commit a reserved IP address to an IP pool.
332

333
    The IP address is taken from the network's IP pool and marked as reserved.
334

335
    """
336
    nobj = self._UnlockedGetNetwork(net_uuid)
337
    pool = network.AddressPool(nobj)
338
    if action == constants.RESERVE_ACTION:
339
      pool.Reserve(address)
340
    elif action == constants.RELEASE_ACTION:
341
      pool.Release(address)
342

    
343
  def _UnlockedReleaseIp(self, net_uuid, address, ec_id):
344
    """Give a specific IP address back to an IP pool.
345

346
    The IP address is returned to the IP pool designated by pool_id and marked
347
    as reserved.
348

349
    """
350
    self._temporary_ips.Reserve(ec_id,
351
                                (constants.RELEASE_ACTION, address, net_uuid))
352

    
353
  @locking.ssynchronized(_config_lock, shared=1)
354
  def ReleaseIp(self, net, address, ec_id):
355
    """Give a specified IP address back to an IP pool.
356

357
    This is just a wrapper around _UnlockedReleaseIp.
358

359
    """
360
    net_uuid = self._UnlockedLookupNetwork(net)
361
    if net_uuid:
362
      self._UnlockedReleaseIp(net_uuid, address, ec_id)
363

    
364
  @locking.ssynchronized(_config_lock, shared=1)
365
  def GenerateIp(self, net, ec_id):
366
    """Find a free IPv4 address for an instance.
367

368
    """
369
    net_uuid = self._UnlockedLookupNetwork(net)
370
    nobj = self._UnlockedGetNetwork(net_uuid)
371
    pool = network.AddressPool(nobj)
372

    
373
    def gen_one():
374
      try:
375
        ip = pool.GenerateFree()
376
      except errors.AddressPoolError:
377
        raise errors.ReservationError("Cannot generate IP. Network is full")
378
      return (constants.RESERVE_ACTION, ip, net_uuid)
379

    
380
    _, address, _ = self._temporary_ips.Generate([], gen_one, ec_id)
381
    return address
382

    
383
  def _UnlockedReserveIp(self, net_uuid, address, ec_id):
384
    """Reserve a given IPv4 address for use by an instance.
385

386
    """
387
    nobj = self._UnlockedGetNetwork(net_uuid)
388
    pool = network.AddressPool(nobj)
389
    try:
390
      isreserved = pool.IsReserved(address)
391
    except errors.AddressPoolError:
392
      raise errors.ReservationError("IP address not in network")
393
    if isreserved:
394
      raise errors.ReservationError("IP address already in use")
395

    
396
    return self._temporary_ips.Reserve(ec_id,
397
                                       (constants.RESERVE_ACTION,
398
                                        address, net_uuid))
399

    
400
  @locking.ssynchronized(_config_lock, shared=1)
401
  def ReserveIp(self, net, address, ec_id):
402
    """Reserve a given IPv4 address for use by an instance.
403

404
    """
405
    net_uuid = self._UnlockedLookupNetwork(net)
406
    if net_uuid:
407
      return self._UnlockedReserveIp(net_uuid, address, ec_id)
408

    
409
  @locking.ssynchronized(_config_lock, shared=1)
410
  def ReserveLV(self, lv_name, ec_id):
411
    """Reserve an VG/LV pair for an instance.
412

413
    @type lv_name: string
414
    @param lv_name: the logical volume name to reserve
415

416
    """
417
    all_lvs = self._AllLVs()
418
    if lv_name in all_lvs:
419
      raise errors.ReservationError("LV already in use")
420
    else:
421
      self._temporary_lvs.Reserve(ec_id, lv_name)
422

    
423
  @locking.ssynchronized(_config_lock, shared=1)
424
  def GenerateDRBDSecret(self, ec_id):
425
    """Generate a DRBD secret.
426

427
    This checks the current disks for duplicates.
428

429
    """
430
    return self._temporary_secrets.Generate(self._AllDRBDSecrets(),
431
                                            utils.GenerateSecret,
432
                                            ec_id)
433

    
434
  def _AllLVs(self):
435
    """Compute the list of all LVs.
436

437
    """
438
    lvnames = set()
439
    for instance in self._config_data.instances.values():
440
      node_data = instance.MapLVsByNode()
441
      for lv_list in node_data.values():
442
        lvnames.update(lv_list)
443
    return lvnames
444

    
445
  def _AllIDs(self, include_temporary):
446
    """Compute the list of all UUIDs and names we have.
447

448
    @type include_temporary: boolean
449
    @param include_temporary: whether to include the _temporary_ids set
450
    @rtype: set
451
    @return: a set of IDs
452

453
    """
454
    existing = set()
455
    if include_temporary:
456
      existing.update(self._temporary_ids.GetReserved())
457
    existing.update(self._AllLVs())
458
    existing.update(self._config_data.instances.keys())
459
    existing.update(self._config_data.nodes.keys())
460
    existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
461
    return existing
462

    
463
  def _GenerateUniqueID(self, ec_id):
464
    """Generate an unique UUID.
465

466
    This checks the current node, instances and disk names for
467
    duplicates.
468

469
    @rtype: string
470
    @return: the unique id
471

472
    """
473
    existing = self._AllIDs(include_temporary=False)
474
    return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
475

    
476
  @locking.ssynchronized(_config_lock, shared=1)
477
  def GenerateUniqueID(self, ec_id):
478
    """Generate an unique ID.
479

480
    This is just a wrapper over the unlocked version.
481

482
    @type ec_id: string
483
    @param ec_id: unique id for the job to reserve the id to
484

485
    """
486
    return self._GenerateUniqueID(ec_id)
487

    
488
  def _AllMACs(self):
489
    """Return all MACs present in the config.
490

491
    @rtype: list
492
    @return: the list of all MACs
493

494
    """
495
    result = []
496
    for instance in self._config_data.instances.values():
497
      for nic in instance.nics:
498
        result.append(nic.mac)
499

    
500
    return result
501

    
502
  def _AllDRBDSecrets(self):
503
    """Return all DRBD secrets present in the config.
504

505
    @rtype: list
506
    @return: the list of all DRBD secrets
507

508
    """
509
    def helper(disk, result):
510
      """Recursively gather secrets from this disk."""
511
      if disk.dev_type == constants.DT_DRBD8:
512
        result.append(disk.logical_id[5])
513
      if disk.children:
514
        for child in disk.children:
515
          helper(child, result)
516

    
517
    result = []
518
    for instance in self._config_data.instances.values():
519
      for disk in instance.disks:
520
        helper(disk, result)
521

    
522
    return result
523

    
524
  def _CheckDiskIDs(self, disk, l_ids, p_ids):
525
    """Compute duplicate disk IDs
526

527
    @type disk: L{objects.Disk}
528
    @param disk: the disk at which to start searching
529
    @type l_ids: list
530
    @param l_ids: list of current logical ids
531
    @type p_ids: list
532
    @param p_ids: list of current physical ids
533
    @rtype: list
534
    @return: a list of error messages
535

536
    """
537
    result = []
538
    if disk.logical_id is not None:
539
      if disk.logical_id in l_ids:
540
        result.append("duplicate logical id %s" % str(disk.logical_id))
541
      else:
542
        l_ids.append(disk.logical_id)
543
    if disk.physical_id is not None:
544
      if disk.physical_id in p_ids:
545
        result.append("duplicate physical id %s" % str(disk.physical_id))
546
      else:
547
        p_ids.append(disk.physical_id)
548

    
549
    if disk.children:
550
      for child in disk.children:
551
        result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
552
    return result
553

    
554
  def _UnlockedVerifyConfig(self):
555
    """Verify function.
556

557
    @rtype: list
558
    @return: a list of error messages; a non-empty list signifies
559
        configuration errors
560

561
    """
562
    # pylint: disable=R0914
563
    result = []
564
    seen_macs = []
565
    ports = {}
566
    data = self._config_data
567
    cluster = data.cluster
568
    seen_lids = []
569
    seen_pids = []
570

    
571
    # global cluster checks
572
    if not cluster.enabled_hypervisors:
573
      result.append("enabled hypervisors list doesn't have any entries")
574
    invalid_hvs = set(cluster.enabled_hypervisors) - constants.HYPER_TYPES
575
    if invalid_hvs:
576
      result.append("enabled hypervisors contains invalid entries: %s" %
577
                    invalid_hvs)
578
    missing_hvp = (set(cluster.enabled_hypervisors) -
579
                   set(cluster.hvparams.keys()))
580
    if missing_hvp:
581
      result.append("hypervisor parameters missing for the enabled"
582
                    " hypervisor(s) %s" % utils.CommaJoin(missing_hvp))
583

    
584
    if cluster.master_node not in data.nodes:
585
      result.append("cluster has invalid primary node '%s'" %
586
                    cluster.master_node)
587

    
588
    def _helper(owner, attr, value, template):
589
      try:
590
        utils.ForceDictType(value, template)
591
      except errors.GenericError, err:
592
        result.append("%s has invalid %s: %s" % (owner, attr, err))
593

    
594
    def _helper_nic(owner, params):
595
      try:
596
        objects.NIC.CheckParameterSyntax(params)
597
      except errors.ConfigurationError, err:
598
        result.append("%s has invalid nicparams: %s" % (owner, err))
599

    
600
    def _helper_ipolicy(owner, params, check_std):
601
      try:
602
        objects.InstancePolicy.CheckParameterSyntax(params, check_std)
603
      except errors.ConfigurationError, err:
604
        result.append("%s has invalid instance policy: %s" % (owner, err))
605

    
606
    def _helper_ispecs(owner, params):
607
      for key, value in params.items():
608
        if key in constants.IPOLICY_ISPECS:
609
          fullkey = "ipolicy/" + key
610
          _helper(owner, fullkey, value, constants.ISPECS_PARAMETER_TYPES)
611
        else:
612
          # FIXME: assuming list type
613
          if key in constants.IPOLICY_PARAMETERS:
614
            exp_type = float
615
          else:
616
            exp_type = list
617
          if not isinstance(value, exp_type):
618
            result.append("%s has invalid instance policy: for %s,"
619
                          " expecting %s, got %s" %
620
                          (owner, key, exp_type.__name__, type(value)))
621

    
622
    # check cluster parameters
623
    _helper("cluster", "beparams", cluster.SimpleFillBE({}),
624
            constants.BES_PARAMETER_TYPES)
625
    _helper("cluster", "nicparams", cluster.SimpleFillNIC({}),
626
            constants.NICS_PARAMETER_TYPES)
627
    _helper_nic("cluster", cluster.SimpleFillNIC({}))
628
    _helper("cluster", "ndparams", cluster.SimpleFillND({}),
629
            constants.NDS_PARAMETER_TYPES)
630
    _helper_ipolicy("cluster", cluster.SimpleFillIPolicy({}), True)
631
    _helper_ispecs("cluster", cluster.SimpleFillIPolicy({}))
632

    
633
    # per-instance checks
634
    for instance_name in data.instances:
635
      instance = data.instances[instance_name]
636
      if instance.name != instance_name:
637
        result.append("instance '%s' is indexed by wrong name '%s'" %
638
                      (instance.name, instance_name))
639
      if instance.primary_node not in data.nodes:
640
        result.append("instance '%s' has invalid primary node '%s'" %
641
                      (instance_name, instance.primary_node))
642
      for snode in instance.secondary_nodes:
643
        if snode not in data.nodes:
644
          result.append("instance '%s' has invalid secondary node '%s'" %
645
                        (instance_name, snode))
646
      for idx, nic in enumerate(instance.nics):
647
        if nic.mac in seen_macs:
648
          result.append("instance '%s' has NIC %d mac %s duplicate" %
649
                        (instance_name, idx, nic.mac))
650
        else:
651
          seen_macs.append(nic.mac)
652
        if nic.nicparams:
653
          filled = cluster.SimpleFillNIC(nic.nicparams)
654
          owner = "instance %s nic %d" % (instance.name, idx)
655
          _helper(owner, "nicparams",
656
                  filled, constants.NICS_PARAMETER_TYPES)
657
          _helper_nic(owner, filled)
658

    
659
      # parameter checks
660
      if instance.beparams:
661
        _helper("instance %s" % instance.name, "beparams",
662
                cluster.FillBE(instance), constants.BES_PARAMETER_TYPES)
663

    
664
      # gather the drbd ports for duplicate checks
665
      for (idx, dsk) in enumerate(instance.disks):
666
        if dsk.dev_type in constants.LDS_DRBD:
667
          tcp_port = dsk.logical_id[2]
668
          if tcp_port not in ports:
669
            ports[tcp_port] = []
670
          ports[tcp_port].append((instance.name, "drbd disk %s" % idx))
671
      # gather network port reservation
672
      net_port = getattr(instance, "network_port", None)
673
      if net_port is not None:
674
        if net_port not in ports:
675
          ports[net_port] = []
676
        ports[net_port].append((instance.name, "network port"))
677

    
678
      # instance disk verify
679
      for idx, disk in enumerate(instance.disks):
680
        result.extend(["instance '%s' disk %d error: %s" %
681
                       (instance.name, idx, msg) for msg in disk.Verify()])
682
        result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
683

    
684
      wrong_names = _CheckInstanceDiskIvNames(instance.disks)
685
      if wrong_names:
686
        tmp = "; ".join(("name of disk %s should be '%s', but is '%s'" %
687
                         (idx, exp_name, actual_name))
688
                        for (idx, exp_name, actual_name) in wrong_names)
689

    
690
        result.append("Instance '%s' has wrongly named disks: %s" %
691
                      (instance.name, tmp))
692

    
693
    # cluster-wide pool of free ports
694
    for free_port in cluster.tcpudp_port_pool:
695
      if free_port not in ports:
696
        ports[free_port] = []
697
      ports[free_port].append(("cluster", "port marked as free"))
698

    
699
    # compute tcp/udp duplicate ports
700
    keys = ports.keys()
701
    keys.sort()
702
    for pnum in keys:
703
      pdata = ports[pnum]
704
      if len(pdata) > 1:
705
        txt = utils.CommaJoin(["%s/%s" % val for val in pdata])
706
        result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
707

    
708
    # highest used tcp port check
709
    if keys:
710
      if keys[-1] > cluster.highest_used_port:
711
        result.append("Highest used port mismatch, saved %s, computed %s" %
712
                      (cluster.highest_used_port, keys[-1]))
713

    
714
    if not data.nodes[cluster.master_node].master_candidate:
715
      result.append("Master node is not a master candidate")
716

    
717
    # master candidate checks
718
    mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
719
    if mc_now < mc_max:
720
      result.append("Not enough master candidates: actual %d, target %d" %
721
                    (mc_now, mc_max))
722

    
723
    # node checks
724
    for node_name, node in data.nodes.items():
725
      if node.name != node_name:
726
        result.append("Node '%s' is indexed by wrong name '%s'" %
727
                      (node.name, node_name))
728
      if [node.master_candidate, node.drained, node.offline].count(True) > 1:
729
        result.append("Node %s state is invalid: master_candidate=%s,"
730
                      " drain=%s, offline=%s" %
731
                      (node.name, node.master_candidate, node.drained,
732
                       node.offline))
733
      if node.group not in data.nodegroups:
734
        result.append("Node '%s' has invalid group '%s'" %
735
                      (node.name, node.group))
736
      else:
737
        _helper("node %s" % node.name, "ndparams",
738
                cluster.FillND(node, data.nodegroups[node.group]),
739
                constants.NDS_PARAMETER_TYPES)
740

    
741
    # nodegroups checks
742
    nodegroups_names = set()
743
    for nodegroup_uuid in data.nodegroups:
744
      nodegroup = data.nodegroups[nodegroup_uuid]
745
      if nodegroup.uuid != nodegroup_uuid:
746
        result.append("node group '%s' (uuid: '%s') indexed by wrong uuid '%s'"
747
                      % (nodegroup.name, nodegroup.uuid, nodegroup_uuid))
748
      if utils.UUID_RE.match(nodegroup.name.lower()):
749
        result.append("node group '%s' (uuid: '%s') has uuid-like name" %
750
                      (nodegroup.name, nodegroup.uuid))
751
      if nodegroup.name in nodegroups_names:
752
        result.append("duplicate node group name '%s'" % nodegroup.name)
753
      else:
754
        nodegroups_names.add(nodegroup.name)
755
      group_name = "group %s" % nodegroup.name
756
      _helper_ipolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy),
757
                      False)
758
      _helper_ispecs(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy))
759
      if nodegroup.ndparams:
760
        _helper(group_name, "ndparams",
761
                cluster.SimpleFillND(nodegroup.ndparams),
762
                constants.NDS_PARAMETER_TYPES)
763

    
764
    # drbd minors check
765
    _, duplicates = self._UnlockedComputeDRBDMap()
766
    for node, minor, instance_a, instance_b in duplicates:
767
      result.append("DRBD minor %d on node %s is assigned twice to instances"
768
                    " %s and %s" % (minor, node, instance_a, instance_b))
769

    
770
    # IP checks
771
    default_nicparams = cluster.nicparams[constants.PP_DEFAULT]
772
    ips = {}
773

    
774
    def _AddIpAddress(ip, name):
775
      ips.setdefault(ip, []).append(name)
776

    
777
    _AddIpAddress(cluster.master_ip, "cluster_ip")
778

    
779
    for node in data.nodes.values():
780
      _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name)
781
      if node.secondary_ip != node.primary_ip:
782
        _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name)
783

    
784
    for instance in data.instances.values():
785
      for idx, nic in enumerate(instance.nics):
786
        if nic.ip is None:
787
          continue
788

    
789
        nicparams = objects.FillDict(default_nicparams, nic.nicparams)
790
        nic_mode = nicparams[constants.NIC_MODE]
791
        nic_link = nicparams[constants.NIC_LINK]
792

    
793
        if nic_mode == constants.NIC_MODE_BRIDGED:
794
          link = "bridge:%s" % nic_link
795
        elif nic_mode == constants.NIC_MODE_ROUTED:
796
          link = "route:%s" % nic_link
797
        else:
798
          raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode)
799

    
800
        _AddIpAddress("%s/%s/%s" % (link, nic.ip, nic.network),
801
                      "instance:%s/nic:%d" % (instance.name, idx))
802

    
803
    for ip, owners in ips.items():
804
      if len(owners) > 1:
805
        result.append("IP address %s is used by multiple owners: %s" %
806
                      (ip, utils.CommaJoin(owners)))
807

    
808
    return result
809

    
810
  @locking.ssynchronized(_config_lock, shared=1)
811
  def VerifyConfig(self):
812
    """Verify function.
813

814
    This is just a wrapper over L{_UnlockedVerifyConfig}.
815

816
    @rtype: list
817
    @return: a list of error messages; a non-empty list signifies
818
        configuration errors
819

820
    """
821
    return self._UnlockedVerifyConfig()
822

    
823
  def _UnlockedSetDiskID(self, disk, node_name):
824
    """Convert the unique ID to the ID needed on the target nodes.
825

826
    This is used only for drbd, which needs ip/port configuration.
827

828
    The routine descends down and updates its children also, because
829
    this helps when the only the top device is passed to the remote
830
    node.
831

832
    This function is for internal use, when the config lock is already held.
833

834
    """
835
    if disk.children:
836
      for child in disk.children:
837
        self._UnlockedSetDiskID(child, node_name)
838

    
839
    if disk.logical_id is None and disk.physical_id is not None:
840
      return
841
    if disk.dev_type == constants.LD_DRBD8:
842
      pnode, snode, port, pminor, sminor, secret = disk.logical_id
843
      if node_name not in (pnode, snode):
844
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
845
                                        node_name)
846
      pnode_info = self._UnlockedGetNodeInfo(pnode)
847
      snode_info = self._UnlockedGetNodeInfo(snode)
848
      if pnode_info is None or snode_info is None:
849
        raise errors.ConfigurationError("Can't find primary or secondary node"
850
                                        " for %s" % str(disk))
851
      p_data = (pnode_info.secondary_ip, port)
852
      s_data = (snode_info.secondary_ip, port)
853
      if pnode == node_name:
854
        disk.physical_id = p_data + s_data + (pminor, secret)
855
      else: # it must be secondary, we tested above
856
        disk.physical_id = s_data + p_data + (sminor, secret)
857
    else:
858
      disk.physical_id = disk.logical_id
859
    return
860

    
861
  @locking.ssynchronized(_config_lock)
862
  def SetDiskID(self, disk, node_name):
863
    """Convert the unique ID to the ID needed on the target nodes.
864

865
    This is used only for drbd, which needs ip/port configuration.
866

867
    The routine descends down and updates its children also, because
868
    this helps when the only the top device is passed to the remote
869
    node.
870

871
    """
872
    return self._UnlockedSetDiskID(disk, node_name)
873

    
874
  @locking.ssynchronized(_config_lock)
875
  def AddTcpUdpPort(self, port):
876
    """Adds a new port to the available port pool.
877

878
    @warning: this method does not "flush" the configuration (via
879
        L{_WriteConfig}); callers should do that themselves once the
880
        configuration is stable
881

882
    """
883
    if not isinstance(port, int):
884
      raise errors.ProgrammerError("Invalid type passed for port")
885

    
886
    self._config_data.cluster.tcpudp_port_pool.add(port)
887

    
888
  @locking.ssynchronized(_config_lock, shared=1)
889
  def GetPortList(self):
890
    """Returns a copy of the current port list.
891

892
    """
893
    return self._config_data.cluster.tcpudp_port_pool.copy()
894

    
895
  @locking.ssynchronized(_config_lock)
896
  def AllocatePort(self):
897
    """Allocate a port.
898

899
    The port will be taken from the available port pool or from the
900
    default port range (and in this case we increase
901
    highest_used_port).
902

903
    """
904
    # If there are TCP/IP ports configured, we use them first.
905
    if self._config_data.cluster.tcpudp_port_pool:
906
      port = self._config_data.cluster.tcpudp_port_pool.pop()
907
    else:
908
      port = self._config_data.cluster.highest_used_port + 1
909
      if port >= constants.LAST_DRBD_PORT:
910
        raise errors.ConfigurationError("The highest used port is greater"
911
                                        " than %s. Aborting." %
912
                                        constants.LAST_DRBD_PORT)
913
      self._config_data.cluster.highest_used_port = port
914

    
915
    self._WriteConfig()
916
    return port
917

    
918
  def _UnlockedComputeDRBDMap(self):
919
    """Compute the used DRBD minor/nodes.
920

921
    @rtype: (dict, list)
922
    @return: dictionary of node_name: dict of minor: instance_name;
923
        the returned dict will have all the nodes in it (even if with
924
        an empty list), and a list of duplicates; if the duplicates
925
        list is not empty, the configuration is corrupted and its caller
926
        should raise an exception
927

928
    """
929
    def _AppendUsedPorts(instance_name, disk, used):
930
      duplicates = []
931
      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
932
        node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
933
        for node, port in ((node_a, minor_a), (node_b, minor_b)):
934
          assert node in used, ("Node '%s' of instance '%s' not found"
935
                                " in node list" % (node, instance_name))
936
          if port in used[node]:
937
            duplicates.append((node, port, instance_name, used[node][port]))
938
          else:
939
            used[node][port] = instance_name
940
      if disk.children:
941
        for child in disk.children:
942
          duplicates.extend(_AppendUsedPorts(instance_name, child, used))
943
      return duplicates
944

    
945
    duplicates = []
946
    my_dict = dict((node, {}) for node in self._config_data.nodes)
947
    for instance in self._config_data.instances.itervalues():
948
      for disk in instance.disks:
949
        duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict))
950
    for (node, minor), instance in self._temporary_drbds.iteritems():
951
      if minor in my_dict[node] and my_dict[node][minor] != instance:
952
        duplicates.append((node, minor, instance, my_dict[node][minor]))
953
      else:
954
        my_dict[node][minor] = instance
955
    return my_dict, duplicates
956

    
957
  @locking.ssynchronized(_config_lock)
958
  def ComputeDRBDMap(self):
959
    """Compute the used DRBD minor/nodes.
960

961
    This is just a wrapper over L{_UnlockedComputeDRBDMap}.
962

963
    @return: dictionary of node_name: dict of minor: instance_name;
964
        the returned dict will have all the nodes in it (even if with
965
        an empty list).
966

967
    """
968
    d_map, duplicates = self._UnlockedComputeDRBDMap()
969
    if duplicates:
970
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
971
                                      str(duplicates))
972
    return d_map
973

    
974
  @locking.ssynchronized(_config_lock)
975
  def AllocateDRBDMinor(self, nodes, instance):
976
    """Allocate a drbd minor.
977

978
    The free minor will be automatically computed from the existing
979
    devices. A node can be given multiple times in order to allocate
980
    multiple minors. The result is the list of minors, in the same
981
    order as the passed nodes.
982

983
    @type instance: string
984
    @param instance: the instance for which we allocate minors
985

986
    """
987
    assert isinstance(instance, basestring), \
988
           "Invalid argument '%s' passed to AllocateDRBDMinor" % instance
989

    
990
    d_map, duplicates = self._UnlockedComputeDRBDMap()
991
    if duplicates:
992
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
993
                                      str(duplicates))
994
    result = []
995
    for nname in nodes:
996
      ndata = d_map[nname]
997
      if not ndata:
998
        # no minors used, we can start at 0
999
        result.append(0)
1000
        ndata[0] = instance
1001
        self._temporary_drbds[(nname, 0)] = instance
1002
        continue
1003
      keys = ndata.keys()
1004
      keys.sort()
1005
      ffree = utils.FirstFree(keys)
1006
      if ffree is None:
1007
        # return the next minor
1008
        # TODO: implement high-limit check
1009
        minor = keys[-1] + 1
1010
      else:
1011
        minor = ffree
1012
      # double-check minor against current instances
1013
      assert minor not in d_map[nname], \
1014
             ("Attempt to reuse allocated DRBD minor %d on node %s,"
1015
              " already allocated to instance %s" %
1016
              (minor, nname, d_map[nname][minor]))
1017
      ndata[minor] = instance
1018
      # double-check minor against reservation
1019
      r_key = (nname, minor)
1020
      assert r_key not in self._temporary_drbds, \
1021
             ("Attempt to reuse reserved DRBD minor %d on node %s,"
1022
              " reserved for instance %s" %
1023
              (minor, nname, self._temporary_drbds[r_key]))
1024
      self._temporary_drbds[r_key] = instance
1025
      result.append(minor)
1026
    logging.debug("Request to allocate drbd minors, input: %s, returning %s",
1027
                  nodes, result)
1028
    return result
1029

    
1030
  def _UnlockedReleaseDRBDMinors(self, instance):
1031
    """Release temporary drbd minors allocated for a given instance.
1032

1033
    @type instance: string
1034
    @param instance: the instance for which temporary minors should be
1035
                     released
1036

1037
    """
1038
    assert isinstance(instance, basestring), \
1039
           "Invalid argument passed to ReleaseDRBDMinors"
1040
    for key, name in self._temporary_drbds.items():
1041
      if name == instance:
1042
        del self._temporary_drbds[key]
1043

    
1044
  @locking.ssynchronized(_config_lock)
1045
  def ReleaseDRBDMinors(self, instance):
1046
    """Release temporary drbd minors allocated for a given instance.
1047

1048
    This should be called on the error paths, on the success paths
1049
    it's automatically called by the ConfigWriter add and update
1050
    functions.
1051

1052
    This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.
1053

1054
    @type instance: string
1055
    @param instance: the instance for which temporary minors should be
1056
                     released
1057

1058
    """
1059
    self._UnlockedReleaseDRBDMinors(instance)
1060

    
1061
  @locking.ssynchronized(_config_lock, shared=1)
1062
  def GetConfigVersion(self):
1063
    """Get the configuration version.
1064

1065
    @return: Config version
1066

1067
    """
1068
    return self._config_data.version
1069

    
1070
  @locking.ssynchronized(_config_lock, shared=1)
1071
  def GetClusterName(self):
1072
    """Get cluster name.
1073

1074
    @return: Cluster name
1075

1076
    """
1077
    return self._config_data.cluster.cluster_name
1078

    
1079
  @locking.ssynchronized(_config_lock, shared=1)
1080
  def GetMasterNode(self):
1081
    """Get the hostname of the master node for this cluster.
1082

1083
    @return: Master hostname
1084

1085
    """
1086
    return self._config_data.cluster.master_node
1087

    
1088
  @locking.ssynchronized(_config_lock, shared=1)
1089
  def GetMasterIP(self):
1090
    """Get the IP of the master node for this cluster.
1091

1092
    @return: Master IP
1093

1094
    """
1095
    return self._config_data.cluster.master_ip
1096

    
1097
  @locking.ssynchronized(_config_lock, shared=1)
1098
  def GetMasterNetdev(self):
1099
    """Get the master network device for this cluster.
1100

1101
    """
1102
    return self._config_data.cluster.master_netdev
1103

    
1104
  @locking.ssynchronized(_config_lock, shared=1)
1105
  def GetMasterNetmask(self):
1106
    """Get the netmask of the master node for this cluster.
1107

1108
    """
1109
    return self._config_data.cluster.master_netmask
1110

    
1111
  @locking.ssynchronized(_config_lock, shared=1)
1112
  def GetUseExternalMipScript(self):
1113
    """Get flag representing whether to use the external master IP setup script.
1114

1115
    """
1116
    return self._config_data.cluster.use_external_mip_script
1117

    
1118
  @locking.ssynchronized(_config_lock, shared=1)
1119
  def GetFileStorageDir(self):
1120
    """Get the file storage dir for this cluster.
1121

1122
    """
1123
    return self._config_data.cluster.file_storage_dir
1124

    
1125
  @locking.ssynchronized(_config_lock, shared=1)
1126
  def GetSharedFileStorageDir(self):
1127
    """Get the shared file storage dir for this cluster.
1128

1129
    """
1130
    return self._config_data.cluster.shared_file_storage_dir
1131

    
1132
  @locking.ssynchronized(_config_lock, shared=1)
1133
  def GetHypervisorType(self):
1134
    """Get the hypervisor type for this cluster.
1135

1136
    """
1137
    return self._config_data.cluster.enabled_hypervisors[0]
1138

    
1139
  @locking.ssynchronized(_config_lock, shared=1)
1140
  def GetHostKey(self):
1141
    """Return the rsa hostkey from the config.
1142

1143
    @rtype: string
1144
    @return: the rsa hostkey
1145

1146
    """
1147
    return self._config_data.cluster.rsahostkeypub
1148

    
1149
  @locking.ssynchronized(_config_lock, shared=1)
1150
  def GetDefaultIAllocator(self):
1151
    """Get the default instance allocator for this cluster.
1152

1153
    """
1154
    return self._config_data.cluster.default_iallocator
1155

    
1156
  @locking.ssynchronized(_config_lock, shared=1)
1157
  def GetPrimaryIPFamily(self):
1158
    """Get cluster primary ip family.
1159

1160
    @return: primary ip family
1161

1162
    """
1163
    return self._config_data.cluster.primary_ip_family
1164

    
1165
  @locking.ssynchronized(_config_lock, shared=1)
1166
  def GetMasterNetworkParameters(self):
1167
    """Get network parameters of the master node.
1168

1169
    @rtype: L{object.MasterNetworkParameters}
1170
    @return: network parameters of the master node
1171

1172
    """
1173
    cluster = self._config_data.cluster
1174
    result = objects.MasterNetworkParameters(
1175
      name=cluster.master_node, ip=cluster.master_ip,
1176
      netmask=cluster.master_netmask, netdev=cluster.master_netdev,
1177
      ip_family=cluster.primary_ip_family)
1178

    
1179
    return result
1180

    
1181
  @locking.ssynchronized(_config_lock)
1182
  def AddNodeGroup(self, group, ec_id, check_uuid=True):
1183
    """Add a node group to the configuration.
1184

1185
    This method calls group.UpgradeConfig() to fill any missing attributes
1186
    according to their default values.
1187

1188
    @type group: L{objects.NodeGroup}
1189
    @param group: the NodeGroup object to add
1190
    @type ec_id: string
1191
    @param ec_id: unique id for the job to use when creating a missing UUID
1192
    @type check_uuid: bool
1193
    @param check_uuid: add an UUID to the group if it doesn't have one or, if
1194
                       it does, ensure that it does not exist in the
1195
                       configuration already
1196

1197
    """
1198
    self._UnlockedAddNodeGroup(group, ec_id, check_uuid)
1199
    self._WriteConfig()
1200

    
1201
  def _UnlockedAddNodeGroup(self, group, ec_id, check_uuid):
1202
    """Add a node group to the configuration.
1203

1204
    """
1205
    logging.info("Adding node group %s to configuration", group.name)
1206

    
1207
    # Some code might need to add a node group with a pre-populated UUID
1208
    # generated with ConfigWriter.GenerateUniqueID(). We allow them to bypass
1209
    # the "does this UUID" exist already check.
1210
    if check_uuid:
1211
      self._EnsureUUID(group, ec_id)
1212

    
1213
    try:
1214
      existing_uuid = self._UnlockedLookupNodeGroup(group.name)
1215
    except errors.OpPrereqError:
1216
      pass
1217
    else:
1218
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
1219
                                 " node group (UUID: %s)" %
1220
                                 (group.name, existing_uuid),
1221
                                 errors.ECODE_EXISTS)
1222

    
1223
    group.serial_no = 1
1224
    group.ctime = group.mtime = time.time()
1225
    group.UpgradeConfig()
1226

    
1227
    self._config_data.nodegroups[group.uuid] = group
1228
    self._config_data.cluster.serial_no += 1
1229

    
1230
  @locking.ssynchronized(_config_lock)
1231
  def RemoveNodeGroup(self, group_uuid):
1232
    """Remove a node group from the configuration.
1233

1234
    @type group_uuid: string
1235
    @param group_uuid: the UUID of the node group to remove
1236

1237
    """
1238
    logging.info("Removing node group %s from configuration", group_uuid)
1239

    
1240
    if group_uuid not in self._config_data.nodegroups:
1241
      raise errors.ConfigurationError("Unknown node group '%s'" % group_uuid)
1242

    
1243
    assert len(self._config_data.nodegroups) != 1, \
1244
            "Group '%s' is the only group, cannot be removed" % group_uuid
1245

    
1246
    del self._config_data.nodegroups[group_uuid]
1247
    self._config_data.cluster.serial_no += 1
1248
    self._WriteConfig()
1249

    
1250
  def _UnlockedLookupNodeGroup(self, target):
1251
    """Lookup a node group's UUID.
1252

1253
    @type target: string or None
1254
    @param target: group name or UUID or None to look for the default
1255
    @rtype: string
1256
    @return: nodegroup UUID
1257
    @raises errors.OpPrereqError: when the target group cannot be found
1258

1259
    """
1260
    if target is None:
1261
      if len(self._config_data.nodegroups) != 1:
1262
        raise errors.OpPrereqError("More than one node group exists. Target"
1263
                                   " group must be specified explicitly.")
1264
      else:
1265
        return self._config_data.nodegroups.keys()[0]
1266
    if target in self._config_data.nodegroups:
1267
      return target
1268
    for nodegroup in self._config_data.nodegroups.values():
1269
      if nodegroup.name == target:
1270
        return nodegroup.uuid
1271
    raise errors.OpPrereqError("Node group '%s' not found" % target,
1272
                               errors.ECODE_NOENT)
1273

    
1274
  @locking.ssynchronized(_config_lock, shared=1)
1275
  def LookupNodeGroup(self, target):
1276
    """Lookup a node group's UUID.
1277

1278
    This function is just a wrapper over L{_UnlockedLookupNodeGroup}.
1279

1280
    @type target: string or None
1281
    @param target: group name or UUID or None to look for the default
1282
    @rtype: string
1283
    @return: nodegroup UUID
1284

1285
    """
1286
    return self._UnlockedLookupNodeGroup(target)
1287

    
1288
  def _UnlockedGetNodeGroup(self, uuid):
1289
    """Lookup a node group.
1290

1291
    @type uuid: string
1292
    @param uuid: group UUID
1293
    @rtype: L{objects.NodeGroup} or None
1294
    @return: nodegroup object, or None if not found
1295

1296
    """
1297
    if uuid not in self._config_data.nodegroups:
1298
      return None
1299

    
1300
    return self._config_data.nodegroups[uuid]
1301

    
1302
  @locking.ssynchronized(_config_lock, shared=1)
1303
  def GetNodeGroup(self, uuid):
1304
    """Lookup a node group.
1305

1306
    @type uuid: string
1307
    @param uuid: group UUID
1308
    @rtype: L{objects.NodeGroup} or None
1309
    @return: nodegroup object, or None if not found
1310

1311
    """
1312
    return self._UnlockedGetNodeGroup(uuid)
1313

    
1314
  @locking.ssynchronized(_config_lock, shared=1)
1315
  def GetAllNodeGroupsInfo(self):
1316
    """Get the configuration of all node groups.
1317

1318
    """
1319
    return dict(self._config_data.nodegroups)
1320

    
1321
  @locking.ssynchronized(_config_lock, shared=1)
1322
  def GetNodeGroupList(self):
1323
    """Get a list of node groups.
1324

1325
    """
1326
    return self._config_data.nodegroups.keys()
1327

    
1328
  @locking.ssynchronized(_config_lock, shared=1)
1329
  def GetNodeGroupMembersByNodes(self, nodes):
1330
    """Get nodes which are member in the same nodegroups as the given nodes.
1331

1332
    """
1333
    ngfn = lambda node_name: self._UnlockedGetNodeInfo(node_name).group
1334
    return frozenset(member_name
1335
                     for node_name in nodes
1336
                     for member_name in
1337
                       self._UnlockedGetNodeGroup(ngfn(node_name)).members)
1338

    
1339
  @locking.ssynchronized(_config_lock, shared=1)
1340
  def GetMultiNodeGroupInfo(self, group_uuids):
1341
    """Get the configuration of multiple node groups.
1342

1343
    @param group_uuids: List of node group UUIDs
1344
    @rtype: list
1345
    @return: List of tuples of (group_uuid, group_info)
1346

1347
    """
1348
    return [(uuid, self._UnlockedGetNodeGroup(uuid)) for uuid in group_uuids]
1349

    
1350
  @locking.ssynchronized(_config_lock)
1351
  def AddInstance(self, instance, ec_id):
1352
    """Add an instance to the config.
1353

1354
    This should be used after creating a new instance.
1355

1356
    @type instance: L{objects.Instance}
1357
    @param instance: the instance object
1358

1359
    """
1360
    if not isinstance(instance, objects.Instance):
1361
      raise errors.ProgrammerError("Invalid type passed to AddInstance")
1362

    
1363
    if instance.disk_template != constants.DT_DISKLESS:
1364
      all_lvs = instance.MapLVsByNode()
1365
      logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
1366

    
1367
    all_macs = self._AllMACs()
1368
    for nic in instance.nics:
1369
      if nic.mac in all_macs:
1370
        raise errors.ConfigurationError("Cannot add instance %s:"
1371
                                        " MAC address '%s' already in use." %
1372
                                        (instance.name, nic.mac))
1373

    
1374
    self._EnsureUUID(instance, ec_id)
1375

    
1376
    instance.serial_no = 1
1377
    instance.ctime = instance.mtime = time.time()
1378
    self._config_data.instances[instance.name] = instance
1379
    self._config_data.cluster.serial_no += 1
1380
    self._UnlockedReleaseDRBDMinors(instance.name)
1381
    self._UnlockedCommitTemporaryIps(ec_id)
1382
    self._WriteConfig()
1383

    
1384
  def _EnsureUUID(self, item, ec_id):
1385
    """Ensures a given object has a valid UUID.
1386

1387
    @param item: the instance or node to be checked
1388
    @param ec_id: the execution context id for the uuid reservation
1389

1390
    """
1391
    if not item.uuid:
1392
      item.uuid = self._GenerateUniqueID(ec_id)
1393
    elif item.uuid in self._AllIDs(include_temporary=True):
1394
      raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
1395
                                      " in use" % (item.name, item.uuid))
1396

    
1397
  def _SetInstanceStatus(self, instance_name, status):
1398
    """Set the instance's status to a given value.
1399

1400
    """
1401
    assert status in constants.ADMINST_ALL, \
1402
           "Invalid status '%s' passed to SetInstanceStatus" % (status,)
1403

    
1404
    if instance_name not in self._config_data.instances:
1405
      raise errors.ConfigurationError("Unknown instance '%s'" %
1406
                                      instance_name)
1407
    instance = self._config_data.instances[instance_name]
1408
    if instance.admin_state != status:
1409
      instance.admin_state = status
1410
      instance.serial_no += 1
1411
      instance.mtime = time.time()
1412
      self._WriteConfig()
1413

    
1414
  @locking.ssynchronized(_config_lock)
1415
  def MarkInstanceUp(self, instance_name):
1416
    """Mark the instance status to up in the config.
1417

1418
    """
1419
    self._SetInstanceStatus(instance_name, constants.ADMINST_UP)
1420

    
1421
  @locking.ssynchronized(_config_lock)
1422
  def MarkInstanceOffline(self, instance_name):
1423
    """Mark the instance status to down in the config.
1424

1425
    """
1426
    self._SetInstanceStatus(instance_name, constants.ADMINST_OFFLINE)
1427

    
1428
  @locking.ssynchronized(_config_lock)
1429
  def RemoveInstance(self, instance_name):
1430
    """Remove the instance from the configuration.
1431

1432
    """
1433
    if instance_name not in self._config_data.instances:
1434
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
1435

    
1436
    # If a network port has been allocated to the instance,
1437
    # return it to the pool of free ports.
1438
    inst = self._config_data.instances[instance_name]
1439
    network_port = getattr(inst, "network_port", None)
1440
    if network_port is not None:
1441
      self._config_data.cluster.tcpudp_port_pool.add(network_port)
1442

    
1443
    instance = self._UnlockedGetInstanceInfo(instance_name)
1444

    
1445
    for nic in instance.nics:
1446
      if nic.network is not None and nic.ip is not None:
1447
        net_uuid = self._UnlockedLookupNetwork(nic.network)
1448
        if net_uuid:
1449
          # Return all IP addresses to the respective address pools
1450
          self._UnlockedCommitIp(constants.RELEASE_ACTION, net_uuid, nic.ip)
1451

    
1452

    
1453
    del self._config_data.instances[instance_name]
1454
    self._config_data.cluster.serial_no += 1
1455
    self._WriteConfig()
1456

    
1457
  @locking.ssynchronized(_config_lock)
1458
  def RenameInstance(self, old_name, new_name):
1459
    """Rename an instance.
1460

1461
    This needs to be done in ConfigWriter and not by RemoveInstance
1462
    combined with AddInstance as only we can guarantee an atomic
1463
    rename.
1464

1465
    """
1466
    if old_name not in self._config_data.instances:
1467
      raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
1468

    
1469
    # Operate on a copy to not loose instance object in case of a failure
1470
    inst = self._config_data.instances[old_name].Copy()
1471
    inst.name = new_name
1472

    
1473
    for (idx, disk) in enumerate(inst.disks):
1474
      if disk.dev_type == constants.LD_FILE:
1475
        # rename the file paths in logical and physical id
1476
        file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
1477
        disk.logical_id = (disk.logical_id[0],
1478
                           utils.PathJoin(file_storage_dir, inst.name,
1479
                                          "disk%s" % idx))
1480
        disk.physical_id = disk.logical_id
1481

    
1482
    # Actually replace instance object
1483
    del self._config_data.instances[old_name]
1484
    self._config_data.instances[inst.name] = inst
1485

    
1486
    # Force update of ssconf files
1487
    self._config_data.cluster.serial_no += 1
1488

    
1489
    self._WriteConfig()
1490

    
1491
  @locking.ssynchronized(_config_lock)
1492
  def MarkInstanceDown(self, instance_name):
1493
    """Mark the status of an instance to down in the configuration.
1494

1495
    """
1496
    self._SetInstanceStatus(instance_name, constants.ADMINST_DOWN)
1497

    
1498
  def _UnlockedGetInstanceList(self):
1499
    """Get the list of instances.
1500

1501
    This function is for internal use, when the config lock is already held.
1502

1503
    """
1504
    return self._config_data.instances.keys()
1505

    
1506
  @locking.ssynchronized(_config_lock, shared=1)
1507
  def GetInstanceList(self):
1508
    """Get the list of instances.
1509

1510
    @return: array of instances, ex. ['instance2.example.com',
1511
        'instance1.example.com']
1512

1513
    """
1514
    return self._UnlockedGetInstanceList()
1515

    
1516
  def ExpandInstanceName(self, short_name):
1517
    """Attempt to expand an incomplete instance name.
1518

1519
    """
1520
    # Locking is done in L{ConfigWriter.GetInstanceList}
1521
    return _MatchNameComponentIgnoreCase(short_name, self.GetInstanceList())
1522

    
1523
  def _UnlockedGetInstanceInfo(self, instance_name):
1524
    """Returns information about an instance.
1525

1526
    This function is for internal use, when the config lock is already held.
1527

1528
    """
1529
    if instance_name not in self._config_data.instances:
1530
      return None
1531

    
1532
    return self._config_data.instances[instance_name]
1533

    
1534
  @locking.ssynchronized(_config_lock, shared=1)
1535
  def GetInstanceInfo(self, instance_name):
1536
    """Returns information about an instance.
1537

1538
    It takes the information from the configuration file. Other information of
1539
    an instance are taken from the live systems.
1540

1541
    @param instance_name: name of the instance, e.g.
1542
        I{instance1.example.com}
1543

1544
    @rtype: L{objects.Instance}
1545
    @return: the instance object
1546

1547
    """
1548
    return self._UnlockedGetInstanceInfo(instance_name)
1549

    
1550
  @locking.ssynchronized(_config_lock, shared=1)
1551
  def GetInstanceNodeGroups(self, instance_name, primary_only=False):
1552
    """Returns set of node group UUIDs for instance's nodes.
1553

1554
    @rtype: frozenset
1555

1556
    """
1557
    instance = self._UnlockedGetInstanceInfo(instance_name)
1558
    if not instance:
1559
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
1560

    
1561
    if primary_only:
1562
      nodes = [instance.primary_node]
1563
    else:
1564
      nodes = instance.all_nodes
1565

    
1566
    return frozenset(self._UnlockedGetNodeInfo(node_name).group
1567
                     for node_name in nodes)
1568

    
1569
  @locking.ssynchronized(_config_lock, shared=1)
1570
  def GetMultiInstanceInfo(self, instances):
1571
    """Get the configuration of multiple instances.
1572

1573
    @param instances: list of instance names
1574
    @rtype: list
1575
    @return: list of tuples (instance, instance_info), where
1576
        instance_info is what would GetInstanceInfo return for the
1577
        node, while keeping the original order
1578

1579
    """
1580
    return [(name, self._UnlockedGetInstanceInfo(name)) for name in instances]
1581

    
1582
  @locking.ssynchronized(_config_lock, shared=1)
1583
  def GetAllInstancesInfo(self):
1584
    """Get the configuration of all instances.
1585

1586
    @rtype: dict
1587
    @return: dict of (instance, instance_info), where instance_info is what
1588
              would GetInstanceInfo return for the node
1589

1590
    """
1591
    my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
1592
                    for instance in self._UnlockedGetInstanceList()])
1593
    return my_dict
1594

    
1595
  @locking.ssynchronized(_config_lock, shared=1)
1596
  def GetInstancesInfoByFilter(self, filter_fn):
1597
    """Get instance configuration with a filter.
1598

1599
    @type filter_fn: callable
1600
    @param filter_fn: Filter function receiving instance object as parameter,
1601
      returning boolean. Important: this function is called while the
1602
      configuration locks is held. It must not do any complex work or call
1603
      functions potentially leading to a deadlock. Ideally it doesn't call any
1604
      other functions and just compares instance attributes.
1605

1606
    """
1607
    return dict((name, inst)
1608
                for (name, inst) in self._config_data.instances.items()
1609
                if filter_fn(inst))
1610

    
1611
  @locking.ssynchronized(_config_lock)
1612
  def AddNode(self, node, ec_id):
1613
    """Add a node to the configuration.
1614

1615
    @type node: L{objects.Node}
1616
    @param node: a Node instance
1617

1618
    """
1619
    logging.info("Adding node %s to configuration", node.name)
1620

    
1621
    self._EnsureUUID(node, ec_id)
1622

    
1623
    node.serial_no = 1
1624
    node.ctime = node.mtime = time.time()
1625
    self._UnlockedAddNodeToGroup(node.name, node.group)
1626
    self._config_data.nodes[node.name] = node
1627
    self._config_data.cluster.serial_no += 1
1628
    self._WriteConfig()
1629

    
1630
  @locking.ssynchronized(_config_lock)
1631
  def RemoveNode(self, node_name):
1632
    """Remove a node from the configuration.
1633

1634
    """
1635
    logging.info("Removing node %s from configuration", node_name)
1636

    
1637
    if node_name not in self._config_data.nodes:
1638
      raise errors.ConfigurationError("Unknown node '%s'" % node_name)
1639

    
1640
    self._UnlockedRemoveNodeFromGroup(self._config_data.nodes[node_name])
1641
    del self._config_data.nodes[node_name]
1642
    self._config_data.cluster.serial_no += 1
1643
    self._WriteConfig()
1644

    
1645
  def ExpandNodeName(self, short_name):
1646
    """Attempt to expand an incomplete node name.
1647

1648
    """
1649
    # Locking is done in L{ConfigWriter.GetNodeList}
1650
    return _MatchNameComponentIgnoreCase(short_name, self.GetNodeList())
1651

    
1652
  def _UnlockedGetNodeInfo(self, node_name):
1653
    """Get the configuration of a node, as stored in the config.
1654

1655
    This function is for internal use, when the config lock is already
1656
    held.
1657

1658
    @param node_name: the node name, e.g. I{node1.example.com}
1659

1660
    @rtype: L{objects.Node}
1661
    @return: the node object
1662

1663
    """
1664
    if node_name not in self._config_data.nodes:
1665
      return None
1666

    
1667
    return self._config_data.nodes[node_name]
1668

    
1669
  @locking.ssynchronized(_config_lock, shared=1)
1670
  def GetNodeInfo(self, node_name):
1671
    """Get the configuration of a node, as stored in the config.
1672

1673
    This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
1674

1675
    @param node_name: the node name, e.g. I{node1.example.com}
1676

1677
    @rtype: L{objects.Node}
1678
    @return: the node object
1679

1680
    """
1681
    return self._UnlockedGetNodeInfo(node_name)
1682

    
1683
  @locking.ssynchronized(_config_lock, shared=1)
1684
  def GetNodeInstances(self, node_name):
1685
    """Get the instances of a node, as stored in the config.
1686

1687
    @param node_name: the node name, e.g. I{node1.example.com}
1688

1689
    @rtype: (list, list)
1690
    @return: a tuple with two lists: the primary and the secondary instances
1691

1692
    """
1693
    pri = []
1694
    sec = []
1695
    for inst in self._config_data.instances.values():
1696
      if inst.primary_node == node_name:
1697
        pri.append(inst.name)
1698
      if node_name in inst.secondary_nodes:
1699
        sec.append(inst.name)
1700
    return (pri, sec)
1701

    
1702
  @locking.ssynchronized(_config_lock, shared=1)
1703
  def GetNodeGroupInstances(self, uuid, primary_only=False):
1704
    """Get the instances of a node group.
1705

1706
    @param uuid: Node group UUID
1707
    @param primary_only: Whether to only consider primary nodes
1708
    @rtype: frozenset
1709
    @return: List of instance names in node group
1710

1711
    """
1712
    if primary_only:
1713
      nodes_fn = lambda inst: [inst.primary_node]
1714
    else:
1715
      nodes_fn = lambda inst: inst.all_nodes
1716

    
1717
    return frozenset(inst.name
1718
                     for inst in self._config_data.instances.values()
1719
                     for node_name in nodes_fn(inst)
1720
                     if self._UnlockedGetNodeInfo(node_name).group == uuid)
1721

    
1722
  def _UnlockedGetNodeList(self):
1723
    """Return the list of nodes which are in the configuration.
1724

1725
    This function is for internal use, when the config lock is already
1726
    held.
1727

1728
    @rtype: list
1729

1730
    """
1731
    return self._config_data.nodes.keys()
1732

    
1733
  @locking.ssynchronized(_config_lock, shared=1)
1734
  def GetNodeList(self):
1735
    """Return the list of nodes which are in the configuration.
1736

1737
    """
1738
    return self._UnlockedGetNodeList()
1739

    
1740
  def _UnlockedGetOnlineNodeList(self):
1741
    """Return the list of nodes which are online.
1742

1743
    """
1744
    all_nodes = [self._UnlockedGetNodeInfo(node)
1745
                 for node in self._UnlockedGetNodeList()]
1746
    return [node.name for node in all_nodes if not node.offline]
1747

    
1748
  @locking.ssynchronized(_config_lock, shared=1)
1749
  def GetOnlineNodeList(self):
1750
    """Return the list of nodes which are online.
1751

1752
    """
1753
    return self._UnlockedGetOnlineNodeList()
1754

    
1755
  @locking.ssynchronized(_config_lock, shared=1)
1756
  def GetVmCapableNodeList(self):
1757
    """Return the list of nodes which are not vm capable.
1758

1759
    """
1760
    all_nodes = [self._UnlockedGetNodeInfo(node)
1761
                 for node in self._UnlockedGetNodeList()]
1762
    return [node.name for node in all_nodes if node.vm_capable]
1763

    
1764
  @locking.ssynchronized(_config_lock, shared=1)
1765
  def GetNonVmCapableNodeList(self):
1766
    """Return the list of nodes which are not vm capable.
1767

1768
    """
1769
    all_nodes = [self._UnlockedGetNodeInfo(node)
1770
                 for node in self._UnlockedGetNodeList()]
1771
    return [node.name for node in all_nodes if not node.vm_capable]
1772

    
1773
  @locking.ssynchronized(_config_lock, shared=1)
1774
  def GetMultiNodeInfo(self, nodes):
1775
    """Get the configuration of multiple nodes.
1776

1777
    @param nodes: list of node names
1778
    @rtype: list
1779
    @return: list of tuples of (node, node_info), where node_info is
1780
        what would GetNodeInfo return for the node, in the original
1781
        order
1782

1783
    """
1784
    return [(name, self._UnlockedGetNodeInfo(name)) for name in nodes]
1785

    
1786
  @locking.ssynchronized(_config_lock, shared=1)
1787
  def GetAllNodesInfo(self):
1788
    """Get the configuration of all nodes.
1789

1790
    @rtype: dict
1791
    @return: dict of (node, node_info), where node_info is what
1792
              would GetNodeInfo return for the node
1793

1794
    """
1795
    return self._UnlockedGetAllNodesInfo()
1796

    
1797
  def _UnlockedGetAllNodesInfo(self):
1798
    """Gets configuration of all nodes.
1799

1800
    @note: See L{GetAllNodesInfo}
1801

1802
    """
1803
    return dict([(node, self._UnlockedGetNodeInfo(node))
1804
                 for node in self._UnlockedGetNodeList()])
1805

    
1806
  @locking.ssynchronized(_config_lock, shared=1)
1807
  def GetNodeGroupsFromNodes(self, nodes):
1808
    """Returns groups for a list of nodes.
1809

1810
    @type nodes: list of string
1811
    @param nodes: List of node names
1812
    @rtype: frozenset
1813

1814
    """
1815
    return frozenset(self._UnlockedGetNodeInfo(name).group for name in nodes)
1816

    
1817
  def _UnlockedGetMasterCandidateStats(self, exceptions=None):
1818
    """Get the number of current and maximum desired and possible candidates.
1819

1820
    @type exceptions: list
1821
    @param exceptions: if passed, list of nodes that should be ignored
1822
    @rtype: tuple
1823
    @return: tuple of (current, desired and possible, possible)
1824

1825
    """
1826
    mc_now = mc_should = mc_max = 0
1827
    for node in self._config_data.nodes.values():
1828
      if exceptions and node.name in exceptions:
1829
        continue
1830
      if not (node.offline or node.drained) and node.master_capable:
1831
        mc_max += 1
1832
      if node.master_candidate:
1833
        mc_now += 1
1834
    mc_should = min(mc_max, self._config_data.cluster.candidate_pool_size)
1835
    return (mc_now, mc_should, mc_max)
1836

    
1837
  @locking.ssynchronized(_config_lock, shared=1)
1838
  def GetMasterCandidateStats(self, exceptions=None):
1839
    """Get the number of current and maximum possible candidates.
1840

1841
    This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.
1842

1843
    @type exceptions: list
1844
    @param exceptions: if passed, list of nodes that should be ignored
1845
    @rtype: tuple
1846
    @return: tuple of (current, max)
1847

1848
    """
1849
    return self._UnlockedGetMasterCandidateStats(exceptions)
1850

    
1851
  @locking.ssynchronized(_config_lock)
1852
  def MaintainCandidatePool(self, exceptions):
1853
    """Try to grow the candidate pool to the desired size.
1854

1855
    @type exceptions: list
1856
    @param exceptions: if passed, list of nodes that should be ignored
1857
    @rtype: list
1858
    @return: list with the adjusted nodes (L{objects.Node} instances)
1859

1860
    """
1861
    mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(exceptions)
1862
    mod_list = []
1863
    if mc_now < mc_max:
1864
      node_list = self._config_data.nodes.keys()
1865
      random.shuffle(node_list)
1866
      for name in node_list:
1867
        if mc_now >= mc_max:
1868
          break
1869
        node = self._config_data.nodes[name]
1870
        if (node.master_candidate or node.offline or node.drained or
1871
            node.name in exceptions or not node.master_capable):
1872
          continue
1873
        mod_list.append(node)
1874
        node.master_candidate = True
1875
        node.serial_no += 1
1876
        mc_now += 1
1877
      if mc_now != mc_max:
1878
        # this should not happen
1879
        logging.warning("Warning: MaintainCandidatePool didn't manage to"
1880
                        " fill the candidate pool (%d/%d)", mc_now, mc_max)
1881
      if mod_list:
1882
        self._config_data.cluster.serial_no += 1
1883
        self._WriteConfig()
1884

    
1885
    return mod_list
1886

    
1887
  def _UnlockedAddNodeToGroup(self, node_name, nodegroup_uuid):
1888
    """Add a given node to the specified group.
1889

1890
    """
1891
    if nodegroup_uuid not in self._config_data.nodegroups:
1892
      # This can happen if a node group gets deleted between its lookup and
1893
      # when we're adding the first node to it, since we don't keep a lock in
1894
      # the meantime. It's ok though, as we'll fail cleanly if the node group
1895
      # is not found anymore.
1896
      raise errors.OpExecError("Unknown node group: %s" % nodegroup_uuid)
1897
    if node_name not in self._config_data.nodegroups[nodegroup_uuid].members:
1898
      self._config_data.nodegroups[nodegroup_uuid].members.append(node_name)
1899

    
1900
  def _UnlockedRemoveNodeFromGroup(self, node):
1901
    """Remove a given node from its group.
1902

1903
    """
1904
    nodegroup = node.group
1905
    if nodegroup not in self._config_data.nodegroups:
1906
      logging.warning("Warning: node '%s' has unknown node group '%s'"
1907
                      " (while being removed from it)", node.name, nodegroup)
1908
    nodegroup_obj = self._config_data.nodegroups[nodegroup]
1909
    if node.name not in nodegroup_obj.members:
1910
      logging.warning("Warning: node '%s' not a member of its node group '%s'"
1911
                      " (while being removed from it)", node.name, nodegroup)
1912
    else:
1913
      nodegroup_obj.members.remove(node.name)
1914

    
1915
  @locking.ssynchronized(_config_lock)
1916
  def AssignGroupNodes(self, mods):
1917
    """Changes the group of a number of nodes.
1918

1919
    @type mods: list of tuples; (node name, new group UUID)
1920
    @param mods: Node membership modifications
1921

1922
    """
1923
    groups = self._config_data.nodegroups
1924
    nodes = self._config_data.nodes
1925

    
1926
    resmod = []
1927

    
1928
    # Try to resolve names/UUIDs first
1929
    for (node_name, new_group_uuid) in mods:
1930
      try:
1931
        node = nodes[node_name]
1932
      except KeyError:
1933
        raise errors.ConfigurationError("Unable to find node '%s'" % node_name)
1934

    
1935
      if node.group == new_group_uuid:
1936
        # Node is being assigned to its current group
1937
        logging.debug("Node '%s' was assigned to its current group (%s)",
1938
                      node_name, node.group)
1939
        continue
1940

    
1941
      # Try to find current group of node
1942
      try:
1943
        old_group = groups[node.group]
1944
      except KeyError:
1945
        raise errors.ConfigurationError("Unable to find old group '%s'" %
1946
                                        node.group)
1947

    
1948
      # Try to find new group for node
1949
      try:
1950
        new_group = groups[new_group_uuid]
1951
      except KeyError:
1952
        raise errors.ConfigurationError("Unable to find new group '%s'" %
1953
                                        new_group_uuid)
1954

    
1955
      assert node.name in old_group.members, \
1956
        ("Inconsistent configuration: node '%s' not listed in members for its"
1957
         " old group '%s'" % (node.name, old_group.uuid))
1958
      assert node.name not in new_group.members, \
1959
        ("Inconsistent configuration: node '%s' already listed in members for"
1960
         " its new group '%s'" % (node.name, new_group.uuid))
1961

    
1962
      resmod.append((node, old_group, new_group))
1963

    
1964
    # Apply changes
1965
    for (node, old_group, new_group) in resmod:
1966
      assert node.uuid != new_group.uuid and old_group.uuid != new_group.uuid, \
1967
        "Assigning to current group is not possible"
1968

    
1969
      node.group = new_group.uuid
1970

    
1971
      # Update members of involved groups
1972
      if node.name in old_group.members:
1973
        old_group.members.remove(node.name)
1974
      if node.name not in new_group.members:
1975
        new_group.members.append(node.name)
1976

    
1977
    # Update timestamps and serials (only once per node/group object)
1978
    now = time.time()
1979
    for obj in frozenset(itertools.chain(*resmod)): # pylint: disable=W0142
1980
      obj.serial_no += 1
1981
      obj.mtime = now
1982

    
1983
    # Force ssconf update
1984
    self._config_data.cluster.serial_no += 1
1985

    
1986
    self._WriteConfig()
1987

    
1988
  def _BumpSerialNo(self):
1989
    """Bump up the serial number of the config.
1990

1991
    """
1992
    self._config_data.serial_no += 1
1993
    self._config_data.mtime = time.time()
1994

    
1995
  def _AllUUIDObjects(self):
1996
    """Returns all objects with uuid attributes.
1997

1998
    """
1999
    return (self._config_data.instances.values() +
2000
            self._config_data.nodes.values() +
2001
            self._config_data.nodegroups.values() +
2002
            [self._config_data.cluster])
2003

    
2004
  def _OpenConfig(self, accept_foreign):
2005
    """Read the config data from disk.
2006

2007
    """
2008
    raw_data = utils.ReadFile(self._cfg_file)
2009

    
2010
    try:
2011
      data = objects.ConfigData.FromDict(serializer.Load(raw_data))
2012
    except Exception, err:
2013
      raise errors.ConfigurationError(err)
2014

    
2015
    # Make sure the configuration has the right version
2016
    _ValidateConfig(data)
2017

    
2018
    if (not hasattr(data, "cluster") or
2019
        not hasattr(data.cluster, "rsahostkeypub")):
2020
      raise errors.ConfigurationError("Incomplete configuration"
2021
                                      " (missing cluster.rsahostkeypub)")
2022

    
2023
    if data.cluster.master_node != self._my_hostname and not accept_foreign:
2024
      msg = ("The configuration denotes node %s as master, while my"
2025
             " hostname is %s; opening a foreign configuration is only"
2026
             " possible in accept_foreign mode" %
2027
             (data.cluster.master_node, self._my_hostname))
2028
      raise errors.ConfigurationError(msg)
2029

    
2030
    # Upgrade configuration if needed
2031
    data.UpgradeConfig()
2032

    
2033
    self._config_data = data
2034
    # reset the last serial as -1 so that the next write will cause
2035
    # ssconf update
2036
    self._last_cluster_serial = -1
2037

    
2038
    # And finally run our (custom) config upgrade sequence
2039
    self._UpgradeConfig()
2040

    
2041
    self._cfg_id = utils.GetFileID(path=self._cfg_file)
2042

    
2043
  def _UpgradeConfig(self):
2044
    """Run upgrade steps that cannot be done purely in the objects.
2045

2046
    This is because some data elements need uniqueness across the
2047
    whole configuration, etc.
2048

2049
    @warning: this function will call L{_WriteConfig()}, but also
2050
        L{DropECReservations} so it needs to be called only from a
2051
        "safe" place (the constructor). If one wanted to call it with
2052
        the lock held, a DropECReservationUnlocked would need to be
2053
        created first, to avoid causing deadlock.
2054

2055
    """
2056
    modified = False
2057
    for item in self._AllUUIDObjects():
2058
      if item.uuid is None:
2059
        item.uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID)
2060
        modified = True
2061
    if not self._config_data.nodegroups:
2062
      default_nodegroup_name = constants.INITIAL_NODE_GROUP_NAME
2063
      default_nodegroup = objects.NodeGroup(name=default_nodegroup_name,
2064
                                            members=[])
2065
      self._UnlockedAddNodeGroup(default_nodegroup, _UPGRADE_CONFIG_JID, True)
2066
      modified = True
2067
    for node in self._config_data.nodes.values():
2068
      if not node.group:
2069
        node.group = self.LookupNodeGroup(None)
2070
        modified = True
2071
      # This is technically *not* an upgrade, but needs to be done both when
2072
      # nodegroups are being added, and upon normally loading the config,
2073
      # because the members list of a node group is discarded upon
2074
      # serializing/deserializing the object.
2075
      self._UnlockedAddNodeToGroup(node.name, node.group)
2076
    if modified:
2077
      self._WriteConfig()
2078
      # This is ok even if it acquires the internal lock, as _UpgradeConfig is
2079
      # only called at config init time, without the lock held
2080
      self.DropECReservations(_UPGRADE_CONFIG_JID)
2081

    
2082
  def _DistributeConfig(self, feedback_fn):
2083
    """Distribute the configuration to the other nodes.
2084

2085
    Currently, this only copies the configuration file. In the future,
2086
    it could be used to encapsulate the 2/3-phase update mechanism.
2087

2088
    """
2089
    if self._offline:
2090
      return True
2091

    
2092
    bad = False
2093

    
2094
    node_list = []
2095
    addr_list = []
2096
    myhostname = self._my_hostname
2097
    # we can skip checking whether _UnlockedGetNodeInfo returns None
2098
    # since the node list comes from _UnlocketGetNodeList, and we are
2099
    # called with the lock held, so no modifications should take place
2100
    # in between
2101
    for node_name in self._UnlockedGetNodeList():
2102
      if node_name == myhostname:
2103
        continue
2104
      node_info = self._UnlockedGetNodeInfo(node_name)
2105
      if not node_info.master_candidate:
2106
        continue
2107
      node_list.append(node_info.name)
2108
      addr_list.append(node_info.primary_ip)
2109

    
2110
    # TODO: Use dedicated resolver talking to config writer for name resolution
2111
    result = \
2112
      self._GetRpc(addr_list).call_upload_file(node_list, self._cfg_file)
2113
    for to_node, to_result in result.items():
2114
      msg = to_result.fail_msg
2115
      if msg:
2116
        msg = ("Copy of file %s to node %s failed: %s" %
2117
               (self._cfg_file, to_node, msg))
2118
        logging.error(msg)
2119

    
2120
        if feedback_fn:
2121
          feedback_fn(msg)
2122

    
2123
        bad = True
2124

    
2125
    return not bad
2126

    
2127
  def _WriteConfig(self, destination=None, feedback_fn=None):
2128
    """Write the configuration data to persistent storage.
2129

2130
    """
2131
    assert feedback_fn is None or callable(feedback_fn)
2132

    
2133
    # Warn on config errors, but don't abort the save - the
2134
    # configuration has already been modified, and we can't revert;
2135
    # the best we can do is to warn the user and save as is, leaving
2136
    # recovery to the user
2137
    config_errors = self._UnlockedVerifyConfig()
2138
    if config_errors:
2139
      errmsg = ("Configuration data is not consistent: %s" %
2140
                (utils.CommaJoin(config_errors)))
2141
      logging.critical(errmsg)
2142
      if feedback_fn:
2143
        feedback_fn(errmsg)
2144

    
2145
    if destination is None:
2146
      destination = self._cfg_file
2147
    self._BumpSerialNo()
2148
    txt = serializer.Dump(self._config_data.ToDict())
2149

    
2150
    getents = self._getents()
2151
    try:
2152
      fd = utils.SafeWriteFile(destination, self._cfg_id, data=txt,
2153
                               close=False, gid=getents.confd_gid, mode=0640)
2154
    except errors.LockError:
2155
      raise errors.ConfigurationError("The configuration file has been"
2156
                                      " modified since the last write, cannot"
2157
                                      " update")
2158
    try:
2159
      self._cfg_id = utils.GetFileID(fd=fd)
2160
    finally:
2161
      os.close(fd)
2162

    
2163
    self.write_count += 1
2164

    
2165
    # and redistribute the config file to master candidates
2166
    self._DistributeConfig(feedback_fn)
2167

    
2168
    # Write ssconf files on all nodes (including locally)
2169
    if self._last_cluster_serial < self._config_data.cluster.serial_no:
2170
      if not self._offline:
2171
        result = self._GetRpc(None).call_write_ssconf_files(
2172
          self._UnlockedGetOnlineNodeList(),
2173
          self._UnlockedGetSsconfValues())
2174

    
2175
        for nname, nresu in result.items():
2176
          msg = nresu.fail_msg
2177
          if msg:
2178
            errmsg = ("Error while uploading ssconf files to"
2179
                      " node %s: %s" % (nname, msg))
2180
            logging.warning(errmsg)
2181

    
2182
            if feedback_fn:
2183
              feedback_fn(errmsg)
2184

    
2185
      self._last_cluster_serial = self._config_data.cluster.serial_no
2186

    
2187
  def _UnlockedGetSsconfValues(self):
2188
    """Return the values needed by ssconf.
2189

2190
    @rtype: dict
2191
    @return: a dictionary with keys the ssconf names and values their
2192
        associated value
2193

2194
    """
2195
    fn = "\n".join
2196
    instance_names = utils.NiceSort(self._UnlockedGetInstanceList())
2197
    node_names = utils.NiceSort(self._UnlockedGetNodeList())
2198
    node_info = [self._UnlockedGetNodeInfo(name) for name in node_names]
2199
    node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip)
2200
                    for ninfo in node_info]
2201
    node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
2202
                    for ninfo in node_info]
2203

    
2204
    instance_data = fn(instance_names)
2205
    off_data = fn(node.name for node in node_info if node.offline)
2206
    on_data = fn(node.name for node in node_info if not node.offline)
2207
    mc_data = fn(node.name for node in node_info if node.master_candidate)
2208
    mc_ips_data = fn(node.primary_ip for node in node_info
2209
                     if node.master_candidate)
2210
    node_data = fn(node_names)
2211
    node_pri_ips_data = fn(node_pri_ips)
2212
    node_snd_ips_data = fn(node_snd_ips)
2213

    
2214
    cluster = self._config_data.cluster
2215
    cluster_tags = fn(cluster.GetTags())
2216

    
2217
    hypervisor_list = fn(cluster.enabled_hypervisors)
2218

    
2219
    uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n")
2220

    
2221
    nodegroups = ["%s %s" % (nodegroup.uuid, nodegroup.name) for nodegroup in
2222
                  self._config_data.nodegroups.values()]
2223
    nodegroups_data = fn(utils.NiceSort(nodegroups))
2224
    networks = ["%s %s" % (net.uuid, net.name) for net in
2225
                self._config_data.networks.values()]
2226
    networks_data = fn(utils.NiceSort(networks))
2227

    
2228
    ssconf_values = {
2229
      constants.SS_CLUSTER_NAME: cluster.cluster_name,
2230
      constants.SS_CLUSTER_TAGS: cluster_tags,
2231
      constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
2232
      constants.SS_SHARED_FILE_STORAGE_DIR: cluster.shared_file_storage_dir,
2233
      constants.SS_MASTER_CANDIDATES: mc_data,
2234
      constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data,
2235
      constants.SS_MASTER_IP: cluster.master_ip,
2236
      constants.SS_MASTER_NETDEV: cluster.master_netdev,
2237
      constants.SS_MASTER_NETMASK: str(cluster.master_netmask),
2238
      constants.SS_MASTER_NODE: cluster.master_node,
2239
      constants.SS_NODE_LIST: node_data,
2240
      constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
2241
      constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data,
2242
      constants.SS_OFFLINE_NODES: off_data,
2243
      constants.SS_ONLINE_NODES: on_data,
2244
      constants.SS_PRIMARY_IP_FAMILY: str(cluster.primary_ip_family),
2245
      constants.SS_INSTANCE_LIST: instance_data,
2246
      constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION,
2247
      constants.SS_HYPERVISOR_LIST: hypervisor_list,
2248
      constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health),
2249
      constants.SS_UID_POOL: uid_pool,
2250
      constants.SS_NODEGROUPS: nodegroups_data,
2251
      constants.SS_NETWORKS: networks_data,
2252
      }
2253
    bad_values = [(k, v) for k, v in ssconf_values.items()
2254
                  if not isinstance(v, (str, basestring))]
2255
    if bad_values:
2256
      err = utils.CommaJoin("%s=%s" % (k, v) for k, v in bad_values)
2257
      raise errors.ConfigurationError("Some ssconf key(s) have non-string"
2258
                                      " values: %s" % err)
2259
    return ssconf_values
2260

    
2261
  @locking.ssynchronized(_config_lock, shared=1)
2262
  def GetSsconfValues(self):
2263
    """Wrapper using lock around _UnlockedGetSsconf().
2264

2265
    """
2266
    return self._UnlockedGetSsconfValues()
2267

    
2268
  @locking.ssynchronized(_config_lock, shared=1)
2269
  def GetVGName(self):
2270
    """Return the volume group name.
2271

2272
    """
2273
    return self._config_data.cluster.volume_group_name
2274

    
2275
  @locking.ssynchronized(_config_lock)
2276
  def SetVGName(self, vg_name):
2277
    """Set the volume group name.
2278

2279
    """
2280
    self._config_data.cluster.volume_group_name = vg_name
2281
    self._config_data.cluster.serial_no += 1
2282
    self._WriteConfig()
2283

    
2284
  @locking.ssynchronized(_config_lock, shared=1)
2285
  def GetDRBDHelper(self):
2286
    """Return DRBD usermode helper.
2287

2288
    """
2289
    return self._config_data.cluster.drbd_usermode_helper
2290

    
2291
  @locking.ssynchronized(_config_lock)
2292
  def SetDRBDHelper(self, drbd_helper):
2293
    """Set DRBD usermode helper.
2294

2295
    """
2296
    self._config_data.cluster.drbd_usermode_helper = drbd_helper
2297
    self._config_data.cluster.serial_no += 1
2298
    self._WriteConfig()
2299

    
2300
  @locking.ssynchronized(_config_lock, shared=1)
2301
  def GetMACPrefix(self):
2302
    """Return the mac prefix.
2303

2304
    """
2305
    return self._config_data.cluster.mac_prefix
2306

    
2307
  @locking.ssynchronized(_config_lock, shared=1)
2308
  def GetClusterInfo(self):
2309
    """Returns information about the cluster
2310

2311
    @rtype: L{objects.Cluster}
2312
    @return: the cluster object
2313

2314
    """
2315
    return self._config_data.cluster
2316

    
2317
  @locking.ssynchronized(_config_lock, shared=1)
2318
  def HasAnyDiskOfType(self, dev_type):
2319
    """Check if in there is at disk of the given type in the configuration.
2320

2321
    """
2322
    return self._config_data.HasAnyDiskOfType(dev_type)
2323

    
2324
  @locking.ssynchronized(_config_lock)
2325
  def Update(self, target, feedback_fn, ec_id=None):
2326
    """Notify function to be called after updates.
2327

2328
    This function must be called when an object (as returned by
2329
    GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
2330
    caller wants the modifications saved to the backing store. Note
2331
    that all modified objects will be saved, but the target argument
2332
    is the one the caller wants to ensure that it's saved.
2333

2334
    @param target: an instance of either L{objects.Cluster},
2335
        L{objects.Node} or L{objects.Instance} which is existing in
2336
        the cluster
2337
    @param feedback_fn: Callable feedback function
2338

2339
    """
2340
    if self._config_data is None:
2341
      raise errors.ProgrammerError("Configuration file not read,"
2342
                                   " cannot save.")
2343
    update_serial = False
2344
    if isinstance(target, objects.Cluster):
2345
      test = target == self._config_data.cluster
2346
    elif isinstance(target, objects.Node):
2347
      test = target in self._config_data.nodes.values()
2348
      update_serial = True
2349
    elif isinstance(target, objects.Instance):
2350
      test = target in self._config_data.instances.values()
2351
    elif isinstance(target, objects.NodeGroup):
2352
      test = target in self._config_data.nodegroups.values()
2353
    elif isinstance(target, objects.Network):
2354
      test = target in self._config_data.networks.values()
2355
    else:
2356
      raise errors.ProgrammerError("Invalid object type (%s) passed to"
2357
                                   " ConfigWriter.Update" % type(target))
2358
    if not test:
2359
      raise errors.ConfigurationError("Configuration updated since object"
2360
                                      " has been read or unknown object")
2361
    target.serial_no += 1
2362
    target.mtime = now = time.time()
2363

    
2364
    if update_serial:
2365
      # for node updates, we need to increase the cluster serial too
2366
      self._config_data.cluster.serial_no += 1
2367
      self._config_data.cluster.mtime = now
2368

    
2369
    if isinstance(target, objects.Instance):
2370
      self._UnlockedReleaseDRBDMinors(target.name)
2371

    
2372
    if ec_id is not None:
2373
      # Commit all ips reserved by OpInstanceSetParams and OpGroupSetParams
2374
      self._UnlockedCommitTemporaryIps(ec_id)
2375

    
2376
    self._WriteConfig(feedback_fn=feedback_fn)
2377

    
2378
  @locking.ssynchronized(_config_lock)
2379
  def DropECReservations(self, ec_id):
2380
    """Drop per-execution-context reservations
2381

2382
    """
2383
    for rm in self._all_rms:
2384
      rm.DropECReservations(ec_id)
2385

    
2386
  @locking.ssynchronized(_config_lock, shared=1)
2387
  def GetAllNetworksInfo(self):
2388
    """Get configuration info of all the networks.
2389

2390
    """
2391
    return dict(self._config_data.networks)
2392

    
2393
  def _UnlockedGetNetworkList(self):
2394
    """Get the list of networks.
2395

2396
    This function is for internal use, when the config lock is already held.
2397

2398
    """
2399
    return self._config_data.networks.keys()
2400

    
2401
  @locking.ssynchronized(_config_lock, shared=1)
2402
  def GetNetworkList(self):
2403
    """Get the list of networks.
2404

2405
    @return: array of networks, ex. ["main", "vlan100", "200]
2406

2407
    """
2408
    return self._UnlockedGetNetworkList()
2409

    
2410
  @locking.ssynchronized(_config_lock, shared=1)
2411
  def GetNetworkNames(self):
2412
    """Get a list of network names
2413

2414
    """
2415
    names = [net.name
2416
             for net in self._config_data.networks.values()]
2417
    return names
2418

    
2419
  def _UnlockedGetNetwork(self, uuid):
2420
    """Returns information about a network.
2421

2422
    This function is for internal use, when the config lock is already held.
2423

2424
    """
2425
    if uuid not in self._config_data.networks:
2426
      return None
2427

    
2428
    return self._config_data.networks[uuid]
2429

    
2430
  @locking.ssynchronized(_config_lock, shared=1)
2431
  def GetNetwork(self, uuid):
2432
    """Returns information about a network.
2433

2434
    It takes the information from the configuration file.
2435

2436
    @param uuid: UUID of the network
2437

2438
    @rtype: L{objects.Network}
2439
    @return: the network object
2440

2441
    """
2442
    return self._UnlockedGetNetwork(uuid)
2443

    
2444
  @locking.ssynchronized(_config_lock)
2445
  def AddNetwork(self, net, ec_id, check_uuid=True):
2446
    """Add a network to the configuration.
2447

2448
    @type net: L{objects.Network}
2449
    @param net: the Network object to add
2450
    @type ec_id: string
2451
    @param ec_id: unique id for the job to use when creating a missing UUID
2452

2453
    """
2454
    self._UnlockedAddNetwork(net, ec_id, check_uuid)
2455
    self._WriteConfig()
2456

    
2457
  def _UnlockedAddNetwork(self, net, ec_id, check_uuid):
2458
    """Add a network to the configuration.
2459

2460
    """
2461
    logging.info("Adding network %s to configuration", net.name)
2462

    
2463
    if check_uuid:
2464
      self._EnsureUUID(net, ec_id)
2465

    
2466
    existing_uuid = self._UnlockedLookupNetwork(net.name)
2467
    if existing_uuid:
2468
      raise errors.OpPrereqError("Desired network name '%s' already"
2469
                                 " exists as a network (UUID: %s)" %
2470
                                 (net.name, existing_uuid),
2471
                                 errors.ECODE_EXISTS)
2472
    net.serial_no = 1
2473
    self._config_data.networks[net.uuid] = net
2474
    self._config_data.cluster.serial_no += 1
2475

    
2476
  def _UnlockedLookupNetwork(self, target):
2477
    """Lookup a network's UUID.
2478

2479
    @type target: string
2480
    @param target: network name or UUID
2481
    @rtype: string
2482
    @return: network UUID
2483
    @raises errors.OpPrereqError: when the target network cannot be found
2484

2485
    """
2486
    if target in self._config_data.networks:
2487
      return target
2488
    for net in self._config_data.networks.values():
2489
      if net.name == target:
2490
        return net.uuid
2491
    return None
2492

    
2493
  @locking.ssynchronized(_config_lock, shared=1)
2494
  def LookupNetwork(self, target):
2495
    """Lookup a network's UUID.
2496

2497
    This function is just a wrapper over L{_UnlockedLookupNetwork}.
2498

2499
    @type target: string
2500
    @param target: network name or UUID
2501
    @rtype: string
2502
    @return: network UUID
2503

2504
    """
2505
    return self._UnlockedLookupNetwork(target)
2506

    
2507
  @locking.ssynchronized(_config_lock)
2508
  def RemoveNetwork(self, network_uuid):
2509
    """Remove a network from the configuration.
2510

2511
    @type network_uuid: string
2512
    @param network_uuid: the UUID of the network to remove
2513

2514
    """
2515
    logging.info("Removing network %s from configuration", network_uuid)
2516

    
2517
    if network_uuid not in self._config_data.networks:
2518
      raise errors.ConfigurationError("Unknown network '%s'" % network_uuid)
2519

    
2520
    del self._config_data.networks[network_uuid]
2521
    self._config_data.cluster.serial_no += 1
2522
    self._WriteConfig()
2523

    
2524
  def _UnlockedGetGroupNetParams(self, net, node):
2525
    """Get the netparams (mode, link) of a network.
2526

2527
    Get a network's netparams for a given node.
2528

2529
    @type net: string
2530
    @param net: network name
2531
    @type node: string
2532
    @param node: node name
2533
    @rtype: dict or None
2534
    @return: netparams
2535

2536
    """
2537
    net_uuid = self._UnlockedLookupNetwork(net)
2538
    if net_uuid is None:
2539
      return None
2540

    
2541
    node_info = self._UnlockedGetNodeInfo(node)
2542
    nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
2543
    netparams = nodegroup_info.networks.get(net_uuid, None)
2544

    
2545
    return netparams
2546

    
2547
  @locking.ssynchronized(_config_lock, shared=1)
2548
  def GetGroupNetParams(self, net, node):
2549
    """Locking wrapper of _UnlockedGetGroupNetParams()
2550

2551
    """
2552
    return self._UnlockedGetGroupNetParams(net, node)
2553

    
2554
  @locking.ssynchronized(_config_lock, shared=1)
2555
  def CheckIPInNodeGroup(self, ip, node):
2556
    """Check IP uniqueness in nodegroup.
2557

2558
    Check networks that are connected in the node's node group
2559
    if ip is contained in any of them. Used when creating/adding
2560
    a NIC to ensure uniqueness among nodegroups.
2561

2562
    @type ip: string
2563
    @param ip: ip address
2564
    @type node: string
2565
    @param node: node name
2566
    @rtype: (string, dict) or (None, None)
2567
    @return: (network name, netparams)
2568

2569
    """
2570
    if ip is None:
2571
      return (None, None)
2572
    node_info = self._UnlockedGetNodeInfo(node)
2573
    nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
2574
    for net_uuid in nodegroup_info.networks.keys():
2575
      net_info = self._UnlockedGetNetwork(net_uuid)
2576
      pool = network.AddressPool(net_info)
2577
      if pool.Contains(ip):
2578
        return (net_info.name, nodegroup_info.networks[net_uuid])
2579

    
2580
    return (None, None)