Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_utils.py @ d676dbea

History | View | Annotate | Download (18.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Utility function mainly, but not only used by instance LU's."""
23

    
24
import logging
25
import os
26

    
27
from ganeti import constants
28
from ganeti import errors
29
from ganeti import locking
30
from ganeti import network
31
from ganeti import objects
32
from ganeti import pathutils
33
from ganeti import utils
34
from ganeti.cmdlib.common import AnnotateDiskParams, \
35
  ComputeIPolicyInstanceViolation, CheckDiskTemplateEnabled
36

    
37

    
38
def BuildInstanceHookEnv(name, primary_node_name, secondary_node_names, os_type,
39
                         status, minmem, maxmem, vcpus, nics, disk_template,
40
                         disks, bep, hvp, hypervisor_name, tags):
41
  """Builds instance related env variables for hooks
42

43
  This builds the hook environment from individual variables.
44

45
  @type name: string
46
  @param name: the name of the instance
47
  @type primary_node_name: string
48
  @param primary_node_name: the name of the instance's primary node
49
  @type secondary_node_names: list
50
  @param secondary_node_names: list of secondary nodes as strings
51
  @type os_type: string
52
  @param os_type: the name of the instance's OS
53
  @type status: string
54
  @param status: the desired status of the instance
55
  @type minmem: string
56
  @param minmem: the minimum memory size of the instance
57
  @type maxmem: string
58
  @param maxmem: the maximum memory size of the instance
59
  @type vcpus: string
60
  @param vcpus: the count of VCPUs the instance has
61
  @type nics: list
62
  @param nics: list of tuples (name, uuid, ip, mac, mode, link, vlan, net,
63
      netinfo) representing the NICs the instance has
64
  @type disk_template: string
65
  @param disk_template: the disk template of the instance
66
  @type disks: list
67
  @param disks: list of tuples (name, uuid, size, mode)
68
  @type bep: dict
69
  @param bep: the backend parameters for the instance
70
  @type hvp: dict
71
  @param hvp: the hypervisor parameters for the instance
72
  @type hypervisor_name: string
73
  @param hypervisor_name: the hypervisor for the instance
74
  @type tags: list
75
  @param tags: list of instance tags as strings
76
  @rtype: dict
77
  @return: the hook environment for this instance
78

79
  """
80
  env = {
81
    "OP_TARGET": name,
82
    "INSTANCE_NAME": name,
83
    "INSTANCE_PRIMARY": primary_node_name,
84
    "INSTANCE_SECONDARIES": " ".join(secondary_node_names),
85
    "INSTANCE_OS_TYPE": os_type,
86
    "INSTANCE_STATUS": status,
87
    "INSTANCE_MINMEM": minmem,
88
    "INSTANCE_MAXMEM": maxmem,
89
    # TODO(2.9) remove deprecated "memory" value
90
    "INSTANCE_MEMORY": maxmem,
91
    "INSTANCE_VCPUS": vcpus,
92
    "INSTANCE_DISK_TEMPLATE": disk_template,
93
    "INSTANCE_HYPERVISOR": hypervisor_name,
94
    }
95
  if nics:
96
    nic_count = len(nics)
97
    for idx, (name, uuid, ip, mac, mode, link, vlan, net, netinfo) \
98
        in enumerate(nics):
99
      if ip is None:
100
        ip = ""
101
      if name:
102
        env["INSTANCE_NIC%d_NAME" % idx] = name
103
      env["INSTANCE_NIC%d_UUID" % idx] = uuid
104
      env["INSTANCE_NIC%d_IP" % idx] = ip
105
      env["INSTANCE_NIC%d_MAC" % idx] = mac
106
      env["INSTANCE_NIC%d_MODE" % idx] = mode
107
      env["INSTANCE_NIC%d_LINK" % idx] = link
108
      env["INSTANCE_NIC%d_VLAN" % idx] = vlan
109
      if netinfo:
110
        nobj = objects.Network.FromDict(netinfo)
111
        env.update(nobj.HooksDict("INSTANCE_NIC%d_" % idx))
112
      elif network:
113
        # FIXME: broken network reference: the instance NIC specifies a
114
        # network, but the relevant network entry was not in the config. This
115
        # should be made impossible.
116
        env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net
117
      if mode == constants.NIC_MODE_BRIDGED or \
118
         mode == constants.NIC_MODE_OVS:
119
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
120
  else:
121
    nic_count = 0
122

    
123
  env["INSTANCE_NIC_COUNT"] = nic_count
124

    
125
  if disks:
126
    disk_count = len(disks)
127
    for idx, (name, uuid, size, mode) in enumerate(disks):
128
      if name:
129
        env["INSTANCE_DISK%d_NAME" % idx] = name
130
      env["INSTANCE_DISK%d_UUID" % idx] = uuid
131
      env["INSTANCE_DISK%d_SIZE" % idx] = size
132
      env["INSTANCE_DISK%d_MODE" % idx] = mode
133
  else:
134
    disk_count = 0
135

    
136
  env["INSTANCE_DISK_COUNT"] = disk_count
137

    
138
  if not tags:
139
    tags = []
140

    
141
  env["INSTANCE_TAGS"] = " ".join(tags)
142

    
143
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
144
    for key, value in source.items():
145
      env["INSTANCE_%s_%s" % (kind, key)] = value
146

    
147
  return env
148

    
149

    
150
def BuildInstanceHookEnvByObject(lu, instance, override=None):
151
  """Builds instance related env variables for hooks from an object.
152

153
  @type lu: L{LogicalUnit}
154
  @param lu: the logical unit on whose behalf we execute
155
  @type instance: L{objects.Instance}
156
  @param instance: the instance for which we should build the
157
      environment
158
  @type override: dict
159
  @param override: dictionary with key/values that will override
160
      our values
161
  @rtype: dict
162
  @return: the hook environment dictionary
163

164
  """
165
  cluster = lu.cfg.GetClusterInfo()
166
  bep = cluster.FillBE(instance)
167
  hvp = cluster.FillHV(instance)
168
  args = {
169
    "name": instance.name,
170
    "primary_node_name": lu.cfg.GetNodeName(instance.primary_node),
171
    "secondary_node_names": lu.cfg.GetNodeNames(instance.secondary_nodes),
172
    "os_type": instance.os,
173
    "status": instance.admin_state,
174
    "maxmem": bep[constants.BE_MAXMEM],
175
    "minmem": bep[constants.BE_MINMEM],
176
    "vcpus": bep[constants.BE_VCPUS],
177
    "nics": NICListToTuple(lu, instance.nics),
178
    "disk_template": instance.disk_template,
179
    "disks": [(disk.name, disk.uuid, disk.size, disk.mode)
180
              for disk in instance.disks],
181
    "bep": bep,
182
    "hvp": hvp,
183
    "hypervisor_name": instance.hypervisor,
184
    "tags": instance.tags,
185
  }
186
  if override:
187
    args.update(override)
188
  return BuildInstanceHookEnv(**args) # pylint: disable=W0142
189

    
190

    
191
def GetClusterDomainSecret():
192
  """Reads the cluster domain secret.
193

194
  """
195
  return utils.ReadOneLineFile(pathutils.CLUSTER_DOMAIN_SECRET_FILE,
196
                               strict=True)
197

    
198

    
199
def CheckNodeNotDrained(lu, node_uuid):
200
  """Ensure that a given node is not drained.
201

202
  @param lu: the LU on behalf of which we make the check
203
  @param node_uuid: the node to check
204
  @raise errors.OpPrereqError: if the node is drained
205

206
  """
207
  if lu.cfg.GetNodeInfo(node_uuid).drained:
208
    raise errors.OpPrereqError("Can't use drained node %s" % node_uuid,
209
                               errors.ECODE_STATE)
210

    
211

    
212
def CheckNodeVmCapable(lu, node_uuid):
213
  """Ensure that a given node is vm capable.
214

215
  @param lu: the LU on behalf of which we make the check
216
  @param node_uuid: the node to check
217
  @raise errors.OpPrereqError: if the node is not vm capable
218

219
  """
220
  if not lu.cfg.GetNodeInfo(node_uuid).vm_capable:
221
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node_uuid,
222
                               errors.ECODE_STATE)
223

    
224

    
225
def RemoveInstance(lu, feedback_fn, instance, ignore_failures):
226
  """Utility function to remove an instance.
227

228
  """
229
  logging.info("Removing block devices for instance %s", instance.name)
230

    
231
  if not RemoveDisks(lu, instance, ignore_failures=ignore_failures):
232
    if not ignore_failures:
233
      raise errors.OpExecError("Can't remove instance's disks")
234
    feedback_fn("Warning: can't remove instance's disks")
235

    
236
  logging.info("Removing instance %s out of cluster config", instance.name)
237

    
238
  lu.cfg.RemoveInstance(instance.uuid)
239

    
240
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
241
    "Instance lock removal conflict"
242

    
243
  # Remove lock for the instance
244
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
245

    
246

    
247
def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False):
248
  """Remove all disks for an instance.
249

250
  This abstracts away some work from `AddInstance()` and
251
  `RemoveInstance()`. Note that in case some of the devices couldn't
252
  be removed, the removal will continue with the other ones.
253

254
  @type lu: L{LogicalUnit}
255
  @param lu: the logical unit on whose behalf we execute
256
  @type instance: L{objects.Instance}
257
  @param instance: the instance whose disks we should remove
258
  @type target_node_uuid: string
259
  @param target_node_uuid: used to override the node on which to remove the
260
          disks
261
  @rtype: boolean
262
  @return: the success of the removal
263

264
  """
265
  logging.info("Removing block devices for instance %s", instance.name)
266

    
267
  all_result = True
268
  ports_to_release = set()
269
  anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg)
270
  for (idx, device) in enumerate(anno_disks):
271
    if target_node_uuid:
272
      edata = [(target_node_uuid, device)]
273
    else:
274
      edata = device.ComputeNodeTree(instance.primary_node)
275
    for node_uuid, disk in edata:
276
      lu.cfg.SetDiskID(disk, node_uuid)
277
      result = lu.rpc.call_blockdev_remove(node_uuid, disk)
278
      if result.fail_msg:
279
        lu.LogWarning("Could not remove disk %s on node %s,"
280
                      " continuing anyway: %s", idx,
281
                      lu.cfg.GetNodeName(node_uuid), result.fail_msg)
282
        if not (result.offline and node_uuid != instance.primary_node):
283
          all_result = False
284

    
285
    # if this is a DRBD disk, return its port to the pool
286
    if device.dev_type in constants.LDS_DRBD:
287
      ports_to_release.add(device.logical_id[2])
288

    
289
  if all_result or ignore_failures:
290
    for port in ports_to_release:
291
      lu.cfg.AddTcpUdpPort(port)
292

    
293
  CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
294

    
295
  if instance.disk_template in constants.DTS_FILEBASED:
296
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
297
    if target_node_uuid:
298
      tgt = target_node_uuid
299
    else:
300
      tgt = instance.primary_node
301
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
302
    if result.fail_msg:
303
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
304
                    file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg)
305
      all_result = False
306

    
307
  return all_result
308

    
309

    
310
def NICToTuple(lu, nic):
311
  """Build a tupple of nic information.
312

313
  @type lu:  L{LogicalUnit}
314
  @param lu: the logical unit on whose behalf we execute
315
  @type nic: L{objects.NIC}
316
  @param nic: nic to convert to hooks tuple
317

318
  """
319
  cluster = lu.cfg.GetClusterInfo()
320
  filled_params = cluster.SimpleFillNIC(nic.nicparams)
321
  mode = filled_params[constants.NIC_MODE]
322
  link = filled_params[constants.NIC_LINK]
323
  vlan = filled_params[constants.NIC_VLAN]
324
  netinfo = None
325
  if nic.network:
326
    nobj = lu.cfg.GetNetwork(nic.network)
327
    netinfo = objects.Network.ToDict(nobj)
328
  return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, vlan,
329
          nic.network, netinfo)
330

    
331

    
332
def NICListToTuple(lu, nics):
333
  """Build a list of nic information tuples.
334

335
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
336
  value in LUInstanceQueryData.
337

338
  @type lu:  L{LogicalUnit}
339
  @param lu: the logical unit on whose behalf we execute
340
  @type nics: list of L{objects.NIC}
341
  @param nics: list of nics to convert to hooks tuples
342

343
  """
344
  hooks_nics = []
345
  for nic in nics:
346
    hooks_nics.append(NICToTuple(lu, nic))
347
  return hooks_nics
348

    
349

    
350
def CopyLockList(names):
351
  """Makes a copy of a list of lock names.
352

353
  Handles L{locking.ALL_SET} correctly.
354

355
  """
356
  if names == locking.ALL_SET:
357
    return locking.ALL_SET
358
  else:
359
    return names[:]
360

    
361

    
362
def ReleaseLocks(lu, level, names=None, keep=None):
363
  """Releases locks owned by an LU.
364

365
  @type lu: L{LogicalUnit}
366
  @param level: Lock level
367
  @type names: list or None
368
  @param names: Names of locks to release
369
  @type keep: list or None
370
  @param keep: Names of locks to retain
371

372
  """
373
  assert not (keep is not None and names is not None), \
374
         "Only one of the 'names' and the 'keep' parameters can be given"
375

    
376
  if names is not None:
377
    should_release = names.__contains__
378
  elif keep:
379
    should_release = lambda name: name not in keep
380
  else:
381
    should_release = None
382

    
383
  owned = lu.owned_locks(level)
384
  if not owned:
385
    # Not owning any lock at this level, do nothing
386
    pass
387

    
388
  elif should_release:
389
    retain = []
390
    release = []
391

    
392
    # Determine which locks to release
393
    for name in owned:
394
      if should_release(name):
395
        release.append(name)
396
      else:
397
        retain.append(name)
398

    
399
    assert len(lu.owned_locks(level)) == (len(retain) + len(release))
400

    
401
    # Release just some locks
402
    lu.glm.release(level, names=release)
403

    
404
    assert frozenset(lu.owned_locks(level)) == frozenset(retain)
405
  else:
406
    # Release everything
407
    lu.glm.release(level)
408

    
409
    assert not lu.glm.is_owned(level), "No locks should be owned"
410

    
411

    
412
def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
413
                                 target_group, cfg,
414
                                 _compute_fn=ComputeIPolicyInstanceViolation):
415
  """Compute if instance meets the specs of the new target group.
416

417
  @param ipolicy: The ipolicy to verify
418
  @param instance: The instance object to verify
419
  @param current_group: The current group of the instance
420
  @param target_group: The new group of the instance
421
  @type cfg: L{config.ConfigWriter}
422
  @param cfg: Cluster configuration
423
  @param _compute_fn: The function to verify ipolicy (unittest only)
424
  @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
425

426
  """
427
  if current_group == target_group:
428
    return []
429
  else:
430
    return _compute_fn(ipolicy, instance, cfg)
431

    
432

    
433
def CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
434
                           _compute_fn=_ComputeIPolicyNodeViolation):
435
  """Checks that the target node is correct in terms of instance policy.
436

437
  @param ipolicy: The ipolicy to verify
438
  @param instance: The instance object to verify
439
  @param node: The new node to relocate
440
  @type cfg: L{config.ConfigWriter}
441
  @param cfg: Cluster configuration
442
  @param ignore: Ignore violations of the ipolicy
443
  @param _compute_fn: The function to verify ipolicy (unittest only)
444
  @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
445

446
  """
447
  primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
448
  res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg)
449

    
450
  if res:
451
    msg = ("Instance does not meet target node group's (%s) instance"
452
           " policy: %s") % (node.group, utils.CommaJoin(res))
453
    if ignore:
454
      lu.LogWarning(msg)
455
    else:
456
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
457

    
458

    
459
def GetInstanceInfoText(instance):
460
  """Compute that text that should be added to the disk's metadata.
461

462
  """
463
  return "originstname+%s" % instance.name
464

    
465

    
466
def CheckNodeFreeMemory(lu, node_uuid, reason, requested, hvname, hvparams):
467
  """Checks if a node has enough free memory.
468

469
  This function checks if a given node has the needed amount of free
470
  memory. In case the node has less memory or we cannot get the
471
  information from the node, this function raises an OpPrereqError
472
  exception.
473

474
  @type lu: C{LogicalUnit}
475
  @param lu: a logical unit from which we get configuration data
476
  @type node_uuid: C{str}
477
  @param node_uuid: the node to check
478
  @type reason: C{str}
479
  @param reason: string to use in the error message
480
  @type requested: C{int}
481
  @param requested: the amount of memory in MiB to check for
482
  @type hvname: string
483
  @param hvname: the hypervisor's name
484
  @type hvparams: dict of strings
485
  @param hvparams: the hypervisor's parameters
486
  @rtype: integer
487
  @return: node current free memory
488
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
489
      we cannot check the node
490

491
  """
492
  node_name = lu.cfg.GetNodeName(node_uuid)
493
  nodeinfo = lu.rpc.call_node_info([node_uuid], None, [(hvname, hvparams)])
494
  nodeinfo[node_uuid].Raise("Can't get data from node %s" % node_name,
495
                            prereq=True, ecode=errors.ECODE_ENVIRON)
496
  (_, _, (hv_info, )) = nodeinfo[node_uuid].payload
497

    
498
  free_mem = hv_info.get("memory_free", None)
499
  if not isinstance(free_mem, int):
500
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
501
                               " was '%s'" % (node_name, free_mem),
502
                               errors.ECODE_ENVIRON)
503
  if requested > free_mem:
504
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
505
                               " needed %s MiB, available %s MiB" %
506
                               (node_name, reason, requested, free_mem),
507
                               errors.ECODE_NORES)
508
  return free_mem
509

    
510

    
511
def CheckInstanceBridgesExist(lu, instance, node_uuid=None):
512
  """Check that the brigdes needed by an instance exist.
513

514
  """
515
  if node_uuid is None:
516
    node_uuid = instance.primary_node
517
  CheckNicsBridgesExist(lu, instance.nics, node_uuid)
518

    
519

    
520
def CheckNicsBridgesExist(lu, nics, node_uuid):
521
  """Check that the brigdes needed by a list of nics exist.
522

523
  """
524
  cluster = lu.cfg.GetClusterInfo()
525
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in nics]
526
  brlist = [params[constants.NIC_LINK] for params in paramslist
527
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
528
  if brlist:
529
    result = lu.rpc.call_bridges_exist(node_uuid, brlist)
530
    result.Raise("Error checking bridges on destination node '%s'" %
531
                 lu.cfg.GetNodeName(node_uuid), prereq=True,
532
                 ecode=errors.ECODE_ENVIRON)
533

    
534

    
535
def CheckNodeHasOS(lu, node_uuid, os_name, force_variant):
536
  """Ensure that a node supports a given OS.
537

538
  @param lu: the LU on behalf of which we make the check
539
  @param node_uuid: the node to check
540
  @param os_name: the OS to query about
541
  @param force_variant: whether to ignore variant errors
542
  @raise errors.OpPrereqError: if the node is not supporting the OS
543

544
  """
545
  result = lu.rpc.call_os_get(node_uuid, os_name)
546
  result.Raise("OS '%s' not in supported OS list for node %s" %
547
               (os_name, lu.cfg.GetNodeName(node_uuid)),
548
               prereq=True, ecode=errors.ECODE_INVAL)
549
  if not force_variant:
550
    _CheckOSVariant(result.payload, os_name)
551

    
552

    
553
def _CheckOSVariant(os_obj, name):
554
  """Check whether an OS name conforms to the os variants specification.
555

556
  @type os_obj: L{objects.OS}
557
  @param os_obj: OS object to check
558
  @type name: string
559
  @param name: OS name passed by the user, to check for validity
560

561
  """
562
  variant = objects.OS.GetVariant(name)
563
  if not os_obj.supported_variants:
564
    if variant:
565
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
566
                                 " passed)" % (os_obj.name, variant),
567
                                 errors.ECODE_INVAL)
568
    return
569
  if not variant:
570
    raise errors.OpPrereqError("OS name must include a variant",
571
                               errors.ECODE_INVAL)
572

    
573
  if variant not in os_obj.supported_variants:
574
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)