Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / common.py @ 4e7f986e

History | View | Annotate | Download (46.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Common functions used by multiple logical units."""
23

    
24
import copy
25
import os
26

    
27
from ganeti import compat
28
from ganeti import constants
29
from ganeti import errors
30
from ganeti import hypervisor
31
from ganeti import locking
32
from ganeti import objects
33
from ganeti import opcodes
34
from ganeti import pathutils
35
from ganeti.serializer import Private
36
import ganeti.rpc.node as rpc
37
from ganeti import ssconf
38
from ganeti import utils
39

    
40

    
41
# States of instance
42
INSTANCE_DOWN = [constants.ADMINST_DOWN]
43
INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
44
INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
45

    
46
#: Instance status in which an instance can be marked as offline/online
47
CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
48
  constants.ADMINST_OFFLINE,
49
  ]))
50

    
51

    
52
def _ExpandItemName(expand_fn, name, kind):
53
  """Expand an item name.
54

55
  @param expand_fn: the function to use for expansion
56
  @param name: requested item name
57
  @param kind: text description ('Node' or 'Instance')
58
  @return: the result of the expand_fn, if successful
59
  @raise errors.OpPrereqError: if the item is not found
60

61
  """
62
  (uuid, full_name) = expand_fn(name)
63
  if uuid is None or full_name is None:
64
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
65
                               errors.ECODE_NOENT)
66
  return (uuid, full_name)
67

    
68

    
69
def ExpandInstanceUuidAndName(cfg, expected_uuid, name):
70
  """Wrapper over L{_ExpandItemName} for instance."""
71
  (uuid, full_name) = _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
72
  if expected_uuid is not None and uuid != expected_uuid:
73
    raise errors.OpPrereqError(
74
      "The instances UUID '%s' does not match the expected UUID '%s' for"
75
      " instance '%s'. Maybe the instance changed since you submitted this"
76
      " job." % (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
77
  return (uuid, full_name)
78

    
79

    
80
def ExpandNodeUuidAndName(cfg, expected_uuid, name):
81
  """Expand a short node name into the node UUID and full name.
82

83
  @type cfg: L{config.ConfigWriter}
84
  @param cfg: The cluster configuration
85
  @type expected_uuid: string
86
  @param expected_uuid: expected UUID for the node (or None if there is no
87
        expectation). If it does not match, a L{errors.OpPrereqError} is
88
        raised.
89
  @type name: string
90
  @param name: the short node name
91

92
  """
93
  (uuid, full_name) = _ExpandItemName(cfg.ExpandNodeName, name, "Node")
94
  if expected_uuid is not None and uuid != expected_uuid:
95
    raise errors.OpPrereqError(
96
      "The nodes UUID '%s' does not match the expected UUID '%s' for node"
97
      " '%s'. Maybe the node changed since you submitted this job." %
98
      (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
99
  return (uuid, full_name)
100

    
101

    
102
def ShareAll():
103
  """Returns a dict declaring all lock levels shared.
104

105
  """
106
  return dict.fromkeys(locking.LEVELS, 1)
107

    
108

    
109
def CheckNodeGroupInstances(cfg, group_uuid, owned_instance_names):
110
  """Checks if the instances in a node group are still correct.
111

112
  @type cfg: L{config.ConfigWriter}
113
  @param cfg: The cluster configuration
114
  @type group_uuid: string
115
  @param group_uuid: Node group UUID
116
  @type owned_instance_names: set or frozenset
117
  @param owned_instance_names: List of currently owned instances
118

119
  """
120
  wanted_instances = frozenset(cfg.GetInstanceNames(
121
                                 cfg.GetNodeGroupInstances(group_uuid)))
122
  if owned_instance_names != wanted_instances:
123
    raise errors.OpPrereqError("Instances in node group '%s' changed since"
124
                               " locks were acquired, wanted '%s', have '%s';"
125
                               " retry the operation" %
126
                               (group_uuid,
127
                                utils.CommaJoin(wanted_instances),
128
                                utils.CommaJoin(owned_instance_names)),
129
                               errors.ECODE_STATE)
130

    
131
  return wanted_instances
132

    
133

    
134
def GetWantedNodes(lu, short_node_names):
135
  """Returns list of checked and expanded node names.
136

137
  @type lu: L{LogicalUnit}
138
  @param lu: the logical unit on whose behalf we execute
139
  @type short_node_names: list
140
  @param short_node_names: list of node names or None for all nodes
141
  @rtype: tuple of lists
142
  @return: tupe with (list of node UUIDs, list of node names)
143
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
144

145
  """
146
  if short_node_names:
147
    node_uuids = [ExpandNodeUuidAndName(lu.cfg, None, name)[0]
148
                  for name in short_node_names]
149
  else:
150
    node_uuids = lu.cfg.GetNodeList()
151

    
152
  return (node_uuids, [lu.cfg.GetNodeName(uuid) for uuid in node_uuids])
153

    
154

    
155
def GetWantedInstances(lu, short_inst_names):
156
  """Returns list of checked and expanded instance names.
157

158
  @type lu: L{LogicalUnit}
159
  @param lu: the logical unit on whose behalf we execute
160
  @type short_inst_names: list
161
  @param short_inst_names: list of instance names or None for all instances
162
  @rtype: tuple of lists
163
  @return: tuple of (instance UUIDs, instance names)
164
  @raise errors.OpPrereqError: if the instances parameter is wrong type
165
  @raise errors.OpPrereqError: if any of the passed instances is not found
166

167
  """
168
  if short_inst_names:
169
    inst_uuids = [ExpandInstanceUuidAndName(lu.cfg, None, name)[0]
170
                  for name in short_inst_names]
171
  else:
172
    inst_uuids = lu.cfg.GetInstanceList()
173
  return (inst_uuids, [lu.cfg.GetInstanceName(uuid) for uuid in inst_uuids])
174

    
175

    
176
def RunPostHook(lu, node_name):
177
  """Runs the post-hook for an opcode on a single node.
178

179
  """
180
  hm = lu.proc.BuildHooksManager(lu)
181
  try:
182
    hm.RunPhase(constants.HOOKS_PHASE_POST, node_names=[node_name])
183
  except Exception, err: # pylint: disable=W0703
184
    lu.LogWarning("Errors occurred running hooks on %s: %s",
185
                  node_name, err)
186

    
187

    
188
def RedistributeAncillaryFiles(lu):
189
  """Distribute additional files which are part of the cluster configuration.
190

191
  ConfigWriter takes care of distributing the config and ssconf files, but
192
  there are more files which should be distributed to all nodes. This function
193
  makes sure those are copied.
194

195
  """
196
  # Gather target nodes
197
  cluster = lu.cfg.GetClusterInfo()
198
  master_info = lu.cfg.GetMasterNodeInfo()
199

    
200
  online_node_uuids = lu.cfg.GetOnlineNodeList()
201
  online_node_uuid_set = frozenset(online_node_uuids)
202
  vm_node_uuids = list(online_node_uuid_set.intersection(
203
                         lu.cfg.GetVmCapableNodeList()))
204

    
205
  # Never distribute to master node
206
  for node_uuids in [online_node_uuids, vm_node_uuids]:
207
    if master_info.uuid in node_uuids:
208
      node_uuids.remove(master_info.uuid)
209

    
210
  # Gather file lists
211
  (files_all, _, files_mc, files_vm) = \
212
    ComputeAncillaryFiles(cluster, True)
213

    
214
  # Never re-distribute configuration file from here
215
  assert not (pathutils.CLUSTER_CONF_FILE in files_all or
216
              pathutils.CLUSTER_CONF_FILE in files_vm)
217
  assert not files_mc, "Master candidates not handled in this function"
218

    
219
  filemap = [
220
    (online_node_uuids, files_all),
221
    (vm_node_uuids, files_vm),
222
    ]
223

    
224
  # Upload the files
225
  for (node_uuids, files) in filemap:
226
    for fname in files:
227
      UploadHelper(lu, node_uuids, fname)
228

    
229

    
230
def ComputeAncillaryFiles(cluster, redist):
231
  """Compute files external to Ganeti which need to be consistent.
232

233
  @type redist: boolean
234
  @param redist: Whether to include files which need to be redistributed
235

236
  """
237
  # Compute files for all nodes
238
  files_all = set([
239
    pathutils.SSH_KNOWN_HOSTS_FILE,
240
    pathutils.CONFD_HMAC_KEY,
241
    pathutils.CLUSTER_DOMAIN_SECRET_FILE,
242
    pathutils.SPICE_CERT_FILE,
243
    pathutils.SPICE_CACERT_FILE,
244
    pathutils.RAPI_USERS_FILE,
245
    ])
246

    
247
  if redist:
248
    # we need to ship at least the RAPI certificate
249
    files_all.add(pathutils.RAPI_CERT_FILE)
250
  else:
251
    files_all.update(pathutils.ALL_CERT_FILES)
252
    files_all.update(ssconf.SimpleStore().GetFileList())
253

    
254
  if cluster.modify_etc_hosts:
255
    files_all.add(pathutils.ETC_HOSTS)
256

    
257
  if cluster.use_external_mip_script:
258
    files_all.add(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
259

    
260
  # Files which are optional, these must:
261
  # - be present in one other category as well
262
  # - either exist or not exist on all nodes of that category (mc, vm all)
263
  files_opt = set([
264
    pathutils.RAPI_USERS_FILE,
265
    ])
266

    
267
  # Files which should only be on master candidates
268
  files_mc = set()
269

    
270
  if not redist:
271
    files_mc.add(pathutils.CLUSTER_CONF_FILE)
272

    
273
  # File storage
274
  if (not redist and (cluster.IsFileStorageEnabled() or
275
                        cluster.IsSharedFileStorageEnabled())):
276
    files_all.add(pathutils.FILE_STORAGE_PATHS_FILE)
277
    files_opt.add(pathutils.FILE_STORAGE_PATHS_FILE)
278

    
279
  # Files which should only be on VM-capable nodes
280
  files_vm = set(
281
    filename
282
    for hv_name in cluster.enabled_hypervisors
283
    for filename in
284
    hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
285

    
286
  files_opt |= set(
287
    filename
288
    for hv_name in cluster.enabled_hypervisors
289
    for filename in
290
    hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
291

    
292
  # Filenames in each category must be unique
293
  all_files_set = files_all | files_mc | files_vm
294
  assert (len(all_files_set) ==
295
          sum(map(len, [files_all, files_mc, files_vm]))), \
296
    "Found file listed in more than one file list"
297

    
298
  # Optional files must be present in one other category
299
  assert all_files_set.issuperset(files_opt), \
300
    "Optional file not in a different required list"
301

    
302
  # This one file should never ever be re-distributed via RPC
303
  assert not (redist and
304
              pathutils.FILE_STORAGE_PATHS_FILE in all_files_set)
305

    
306
  return (files_all, files_opt, files_mc, files_vm)
307

    
308

    
309
def UploadHelper(lu, node_uuids, fname):
310
  """Helper for uploading a file and showing warnings.
311

312
  """
313
  if os.path.exists(fname):
314
    result = lu.rpc.call_upload_file(node_uuids, fname)
315
    for to_node_uuids, to_result in result.items():
316
      msg = to_result.fail_msg
317
      if msg:
318
        msg = ("Copy of file %s to node %s failed: %s" %
319
               (fname, lu.cfg.GetNodeName(to_node_uuids), msg))
320
        lu.LogWarning(msg)
321

    
322

    
323
def MergeAndVerifyHvState(op_input, obj_input):
324
  """Combines the hv state from an opcode with the one of the object
325

326
  @param op_input: The input dict from the opcode
327
  @param obj_input: The input dict from the objects
328
  @return: The verified and updated dict
329

330
  """
331
  if op_input:
332
    invalid_hvs = set(op_input) - constants.HYPER_TYPES
333
    if invalid_hvs:
334
      raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
335
                                 " %s" % utils.CommaJoin(invalid_hvs),
336
                                 errors.ECODE_INVAL)
337
    if obj_input is None:
338
      obj_input = {}
339
    type_check = constants.HVSTS_PARAMETER_TYPES
340
    return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
341

    
342
  return None
343

    
344

    
345
def MergeAndVerifyDiskState(op_input, obj_input):
346
  """Combines the disk state from an opcode with the one of the object
347

348
  @param op_input: The input dict from the opcode
349
  @param obj_input: The input dict from the objects
350
  @return: The verified and updated dict
351
  """
352
  if op_input:
353
    invalid_dst = set(op_input) - constants.DS_VALID_TYPES
354
    if invalid_dst:
355
      raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
356
                                 utils.CommaJoin(invalid_dst),
357
                                 errors.ECODE_INVAL)
358
    type_check = constants.DSS_PARAMETER_TYPES
359
    if obj_input is None:
360
      obj_input = {}
361
    return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
362
                                              type_check))
363
                for key, value in op_input.items())
364

    
365
  return None
366

    
367

    
368
def CheckOSParams(lu, required, node_uuids, osname, osparams):
369
  """OS parameters validation.
370

371
  @type lu: L{LogicalUnit}
372
  @param lu: the logical unit for which we check
373
  @type required: boolean
374
  @param required: whether the validation should fail if the OS is not
375
      found
376
  @type node_uuids: list
377
  @param node_uuids: the list of nodes on which we should check
378
  @type osname: string
379
  @param osname: the name of the hypervisor we should use
380
  @type osparams: dict
381
  @param osparams: the parameters which we need to check
382
  @raise errors.OpPrereqError: if the parameters are not valid
383

384
  """
385
  node_uuids = _FilterVmNodes(lu, node_uuids)
386

    
387
  # Last chance to unwrap private elements.
388
  for key in osparams:
389
    if isinstance(osparams[key], Private):
390
      osparams[key] = osparams[key].Get()
391

    
392
  result = lu.rpc.call_os_validate(node_uuids, required, osname,
393
                                   [constants.OS_VALIDATE_PARAMETERS],
394
                                   osparams)
395
  for node_uuid, nres in result.items():
396
    # we don't check for offline cases since this should be run only
397
    # against the master node and/or an instance's nodes
398
    nres.Raise("OS Parameters validation failed on node %s" %
399
               lu.cfg.GetNodeName(node_uuid))
400
    if not nres.payload:
401
      lu.LogInfo("OS %s not found on node %s, validation skipped",
402
                 osname, lu.cfg.GetNodeName(node_uuid))
403

    
404

    
405
def CheckHVParams(lu, node_uuids, hvname, hvparams):
406
  """Hypervisor parameter validation.
407

408
  This function abstract the hypervisor parameter validation to be
409
  used in both instance create and instance modify.
410

411
  @type lu: L{LogicalUnit}
412
  @param lu: the logical unit for which we check
413
  @type node_uuids: list
414
  @param node_uuids: the list of nodes on which we should check
415
  @type hvname: string
416
  @param hvname: the name of the hypervisor we should use
417
  @type hvparams: dict
418
  @param hvparams: the parameters which we need to check
419
  @raise errors.OpPrereqError: if the parameters are not valid
420

421
  """
422
  node_uuids = _FilterVmNodes(lu, node_uuids)
423

    
424
  cluster = lu.cfg.GetClusterInfo()
425
  hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
426

    
427
  hvinfo = lu.rpc.call_hypervisor_validate_params(node_uuids, hvname, hvfull)
428
  for node_uuid in node_uuids:
429
    info = hvinfo[node_uuid]
430
    if info.offline:
431
      continue
432
    info.Raise("Hypervisor parameter validation failed on node %s" %
433
               lu.cfg.GetNodeName(node_uuid))
434

    
435

    
436
def AdjustCandidatePool(lu, exceptions, feedback_fn):
437
  """Adjust the candidate pool after node operations.
438

439
  """
440
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
441
  if mod_list:
442
    lu.LogInfo("Promoted nodes to master candidate role: %s",
443
               utils.CommaJoin(node.name for node in mod_list))
444
    for node in mod_list:
445
      lu.context.ReaddNode(node)
446
      cluster = lu.cfg.GetClusterInfo()
447
      AddNodeCertToCandidateCerts(lu, node.uuid, cluster)
448
      lu.cfg.Update(cluster, feedback_fn)
449
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
450
  if mc_now > mc_max:
451
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
452
               (mc_now, mc_max))
453

    
454

    
455
def CheckNodePVs(nresult, exclusive_storage):
456
  """Check node PVs.
457

458
  """
459
  pvlist_dict = nresult.get(constants.NV_PVLIST, None)
460
  if pvlist_dict is None:
461
    return (["Can't get PV list from node"], None)
462
  pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
463
  errlist = []
464
  # check that ':' is not present in PV names, since it's a
465
  # special character for lvcreate (denotes the range of PEs to
466
  # use on the PV)
467
  for pv in pvlist:
468
    if ":" in pv.name:
469
      errlist.append("Invalid character ':' in PV '%s' of VG '%s'" %
470
                     (pv.name, pv.vg_name))
471
  es_pvinfo = None
472
  if exclusive_storage:
473
    (errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist)
474
    errlist.extend(errmsgs)
475
    shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None)
476
    if shared_pvs:
477
      for (pvname, lvlist) in shared_pvs:
478
        # TODO: Check that LVs are really unrelated (snapshots, DRBD meta...)
479
        errlist.append("PV %s is shared among unrelated LVs (%s)" %
480
                       (pvname, utils.CommaJoin(lvlist)))
481
  return (errlist, es_pvinfo)
482

    
483

    
484
def _ComputeMinMaxSpec(name, qualifier, ispecs, value):
485
  """Computes if value is in the desired range.
486

487
  @param name: name of the parameter for which we perform the check
488
  @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
489
      not just 'disk')
490
  @param ispecs: dictionary containing min and max values
491
  @param value: actual value that we want to use
492
  @return: None or an error string
493

494
  """
495
  if value in [None, constants.VALUE_AUTO]:
496
    return None
497
  max_v = ispecs[constants.ISPECS_MAX].get(name, value)
498
  min_v = ispecs[constants.ISPECS_MIN].get(name, value)
499
  if value > max_v or min_v > value:
500
    if qualifier:
501
      fqn = "%s/%s" % (name, qualifier)
502
    else:
503
      fqn = name
504
    return ("%s value %s is not in range [%s, %s]" %
505
            (fqn, value, min_v, max_v))
506
  return None
507

    
508

    
509
def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
510
                                nic_count, disk_sizes, spindle_use,
511
                                disk_template,
512
                                _compute_fn=_ComputeMinMaxSpec):
513
  """Verifies ipolicy against provided specs.
514

515
  @type ipolicy: dict
516
  @param ipolicy: The ipolicy
517
  @type mem_size: int
518
  @param mem_size: The memory size
519
  @type cpu_count: int
520
  @param cpu_count: Used cpu cores
521
  @type disk_count: int
522
  @param disk_count: Number of disks used
523
  @type nic_count: int
524
  @param nic_count: Number of nics used
525
  @type disk_sizes: list of ints
526
  @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
527
  @type spindle_use: int
528
  @param spindle_use: The number of spindles this instance uses
529
  @type disk_template: string
530
  @param disk_template: The disk template of the instance
531
  @param _compute_fn: The compute function (unittest only)
532
  @return: A list of violations, or an empty list of no violations are found
533

534
  """
535
  assert disk_count == len(disk_sizes)
536

    
537
  test_settings = [
538
    (constants.ISPEC_MEM_SIZE, "", mem_size),
539
    (constants.ISPEC_CPU_COUNT, "", cpu_count),
540
    (constants.ISPEC_NIC_COUNT, "", nic_count),
541
    (constants.ISPEC_SPINDLE_USE, "", spindle_use),
542
    ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
543
         for idx, d in enumerate(disk_sizes)]
544
  if disk_template != constants.DT_DISKLESS:
545
    # This check doesn't make sense for diskless instances
546
    test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
547
  ret = []
548
  allowed_dts = ipolicy[constants.IPOLICY_DTS]
549
  if disk_template not in allowed_dts:
550
    ret.append("Disk template %s is not allowed (allowed templates: %s)" %
551
               (disk_template, utils.CommaJoin(allowed_dts)))
552

    
553
  min_errs = None
554
  for minmax in ipolicy[constants.ISPECS_MINMAX]:
555
    errs = filter(None,
556
                  (_compute_fn(name, qualifier, minmax, value)
557
                   for (name, qualifier, value) in test_settings))
558
    if min_errs is None or len(errs) < len(min_errs):
559
      min_errs = errs
560
  assert min_errs is not None
561
  return ret + min_errs
562

    
563

    
564
def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
565
                                    _compute_fn=ComputeIPolicySpecViolation):
566
  """Compute if instance meets the specs of ipolicy.
567

568
  @type ipolicy: dict
569
  @param ipolicy: The ipolicy to verify against
570
  @type instance: L{objects.Instance}
571
  @param instance: The instance to verify
572
  @type cfg: L{config.ConfigWriter}
573
  @param cfg: Cluster configuration
574
  @param _compute_fn: The function to verify ipolicy (unittest only)
575
  @see: L{ComputeIPolicySpecViolation}
576

577
  """
578
  ret = []
579
  be_full = cfg.GetClusterInfo().FillBE(instance)
580
  mem_size = be_full[constants.BE_MAXMEM]
581
  cpu_count = be_full[constants.BE_VCPUS]
582
  inst_nodes = cfg.GetInstanceNodes(instance)
583
  es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes)
584
  if any(es_flags.values()):
585
    # With exclusive storage use the actual spindles
586
    try:
587
      spindle_use = sum([disk.spindles for disk in instance.disks])
588
    except TypeError:
589
      ret.append("Number of spindles not configured for disks of instance %s"
590
                 " while exclusive storage is enabled, try running gnt-cluster"
591
                 " repair-disk-sizes" % instance.name)
592
      # _ComputeMinMaxSpec ignores 'None's
593
      spindle_use = None
594
  else:
595
    spindle_use = be_full[constants.BE_SPINDLE_USE]
596
  disk_count = len(instance.disks)
597
  disk_sizes = [disk.size for disk in instance.disks]
598
  nic_count = len(instance.nics)
599
  disk_template = instance.disk_template
600

    
601
  return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
602
                           disk_sizes, spindle_use, disk_template)
603

    
604

    
605
def _ComputeViolatingInstances(ipolicy, instances, cfg):
606
  """Computes a set of instances who violates given ipolicy.
607

608
  @param ipolicy: The ipolicy to verify
609
  @type instances: L{objects.Instance}
610
  @param instances: List of instances to verify
611
  @type cfg: L{config.ConfigWriter}
612
  @param cfg: Cluster configuration
613
  @return: A frozenset of instance names violating the ipolicy
614

615
  """
616
  return frozenset([inst.name for inst in instances
617
                    if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
618

    
619

    
620
def ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
621
  """Computes a set of any instances that would violate the new ipolicy.
622

623
  @param old_ipolicy: The current (still in-place) ipolicy
624
  @param new_ipolicy: The new (to become) ipolicy
625
  @param instances: List of instances to verify
626
  @type cfg: L{config.ConfigWriter}
627
  @param cfg: Cluster configuration
628
  @return: A list of instances which violates the new ipolicy but
629
      did not before
630

631
  """
632
  return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
633
          _ComputeViolatingInstances(old_ipolicy, instances, cfg))
634

    
635

    
636
def GetUpdatedParams(old_params, update_dict,
637
                      use_default=True, use_none=False):
638
  """Return the new version of a parameter dictionary.
639

640
  @type old_params: dict
641
  @param old_params: old parameters
642
  @type update_dict: dict
643
  @param update_dict: dict containing new parameter values, or
644
      constants.VALUE_DEFAULT to reset the parameter to its default
645
      value
646
  @param use_default: boolean
647
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
648
      values as 'to be deleted' values
649
  @param use_none: boolean
650
  @type use_none: whether to recognise C{None} values as 'to be
651
      deleted' values
652
  @rtype: dict
653
  @return: the new parameter dictionary
654

655
  """
656
  params_copy = copy.deepcopy(old_params)
657
  for key, val in update_dict.iteritems():
658
    if ((use_default and val == constants.VALUE_DEFAULT) or
659
          (use_none and val is None)):
660
      try:
661
        del params_copy[key]
662
      except KeyError:
663
        pass
664
    else:
665
      params_copy[key] = val
666
  return params_copy
667

    
668

    
669
def GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
670
  """Return the new version of an instance policy.
671

672
  @param group_policy: whether this policy applies to a group and thus
673
    we should support removal of policy entries
674

675
  """
676
  ipolicy = copy.deepcopy(old_ipolicy)
677
  for key, value in new_ipolicy.items():
678
    if key not in constants.IPOLICY_ALL_KEYS:
679
      raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
680
                                 errors.ECODE_INVAL)
681
    if (not value or value == [constants.VALUE_DEFAULT] or
682
            value == constants.VALUE_DEFAULT):
683
      if group_policy:
684
        if key in ipolicy:
685
          del ipolicy[key]
686
      else:
687
        raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
688
                                   " on the cluster'" % key,
689
                                   errors.ECODE_INVAL)
690
    else:
691
      if key in constants.IPOLICY_PARAMETERS:
692
        # FIXME: we assume all such values are float
693
        try:
694
          ipolicy[key] = float(value)
695
        except (TypeError, ValueError), err:
696
          raise errors.OpPrereqError("Invalid value for attribute"
697
                                     " '%s': '%s', error: %s" %
698
                                     (key, value, err), errors.ECODE_INVAL)
699
      elif key == constants.ISPECS_MINMAX:
700
        for minmax in value:
701
          for k in minmax.keys():
702
            utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
703
        ipolicy[key] = value
704
      elif key == constants.ISPECS_STD:
705
        if group_policy:
706
          msg = "%s cannot appear in group instance specs" % key
707
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
708
        ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value,
709
                                        use_none=False, use_default=False)
710
        utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
711
      else:
712
        # FIXME: we assume all others are lists; this should be redone
713
        # in a nicer way
714
        ipolicy[key] = list(value)
715
  try:
716
    objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
717
  except errors.ConfigurationError, err:
718
    raise errors.OpPrereqError("Invalid instance policy: %s" % err,
719
                               errors.ECODE_INVAL)
720
  return ipolicy
721

    
722

    
723
def AnnotateDiskParams(instance, devs, cfg):
724
  """Little helper wrapper to the rpc annotation method.
725

726
  @param instance: The instance object
727
  @type devs: List of L{objects.Disk}
728
  @param devs: The root devices (not any of its children!)
729
  @param cfg: The config object
730
  @returns The annotated disk copies
731
  @see L{rpc.node.AnnotateDiskParams}
732

733
  """
734
  return rpc.AnnotateDiskParams(devs, cfg.GetInstanceDiskParams(instance))
735

    
736

    
737
def SupportsOob(cfg, node):
738
  """Tells if node supports OOB.
739

740
  @type cfg: L{config.ConfigWriter}
741
  @param cfg: The cluster configuration
742
  @type node: L{objects.Node}
743
  @param node: The node
744
  @return: The OOB script if supported or an empty string otherwise
745

746
  """
747
  return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
748

    
749

    
750
def _UpdateAndVerifySubDict(base, updates, type_check):
751
  """Updates and verifies a dict with sub dicts of the same type.
752

753
  @param base: The dict with the old data
754
  @param updates: The dict with the new data
755
  @param type_check: Dict suitable to ForceDictType to verify correct types
756
  @returns: A new dict with updated and verified values
757

758
  """
759
  def fn(old, value):
760
    new = GetUpdatedParams(old, value)
761
    utils.ForceDictType(new, type_check)
762
    return new
763

    
764
  ret = copy.deepcopy(base)
765
  ret.update(dict((key, fn(base.get(key, {}), value))
766
                  for key, value in updates.items()))
767
  return ret
768

    
769

    
770
def _FilterVmNodes(lu, node_uuids):
771
  """Filters out non-vm_capable nodes from a list.
772

773
  @type lu: L{LogicalUnit}
774
  @param lu: the logical unit for which we check
775
  @type node_uuids: list
776
  @param node_uuids: the list of nodes on which we should check
777
  @rtype: list
778
  @return: the list of vm-capable nodes
779

780
  """
781
  vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
782
  return [uuid for uuid in node_uuids if uuid not in vm_nodes]
783

    
784

    
785
def GetDefaultIAllocator(cfg, ialloc):
786
  """Decides on which iallocator to use.
787

788
  @type cfg: L{config.ConfigWriter}
789
  @param cfg: Cluster configuration object
790
  @type ialloc: string or None
791
  @param ialloc: Iallocator specified in opcode
792
  @rtype: string
793
  @return: Iallocator name
794

795
  """
796
  if not ialloc:
797
    # Use default iallocator
798
    ialloc = cfg.GetDefaultIAllocator()
799

    
800
  if not ialloc:
801
    raise errors.OpPrereqError("No iallocator was specified, neither in the"
802
                               " opcode nor as a cluster-wide default",
803
                               errors.ECODE_INVAL)
804

    
805
  return ialloc
806

    
807

    
808
def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_node_uuids,
809
                             cur_group_uuid):
810
  """Checks if node groups for locked instances are still correct.
811

812
  @type cfg: L{config.ConfigWriter}
813
  @param cfg: Cluster configuration
814
  @type instances: dict; string as key, L{objects.Instance} as value
815
  @param instances: Dictionary, instance UUID as key, instance object as value
816
  @type owned_groups: iterable of string
817
  @param owned_groups: List of owned groups
818
  @type owned_node_uuids: iterable of string
819
  @param owned_node_uuids: List of owned nodes
820
  @type cur_group_uuid: string or None
821
  @param cur_group_uuid: Optional group UUID to check against instance's groups
822

823
  """
824
  for (uuid, inst) in instances.items():
825
    inst_nodes = cfg.GetInstanceNodes(inst)
826
    assert owned_node_uuids.issuperset(inst_nodes), \
827
      "Instance %s's nodes changed while we kept the lock" % inst.name
828

    
829
    inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
830

    
831
    assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
832
      "Instance %s has no node in group %s" % (inst.name, cur_group_uuid)
833

    
834

    
835
def CheckInstanceNodeGroups(cfg, inst_uuid, owned_groups, primary_only=False):
836
  """Checks if the owned node groups are still correct for an instance.
837

838
  @type cfg: L{config.ConfigWriter}
839
  @param cfg: The cluster configuration
840
  @type inst_uuid: string
841
  @param inst_uuid: Instance UUID
842
  @type owned_groups: set or frozenset
843
  @param owned_groups: List of currently owned node groups
844
  @type primary_only: boolean
845
  @param primary_only: Whether to check node groups for only the primary node
846

847
  """
848
  inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only)
849

    
850
  if not owned_groups.issuperset(inst_groups):
851
    raise errors.OpPrereqError("Instance %s's node groups changed since"
852
                               " locks were acquired, current groups are"
853
                               " are '%s', owning groups '%s'; retry the"
854
                               " operation" %
855
                               (cfg.GetInstanceName(inst_uuid),
856
                                utils.CommaJoin(inst_groups),
857
                                utils.CommaJoin(owned_groups)),
858
                               errors.ECODE_STATE)
859

    
860
  return inst_groups
861

    
862

    
863
def LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
864
  """Unpacks the result of change-group and node-evacuate iallocator requests.
865

866
  Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
867
  L{constants.IALLOCATOR_MODE_CHG_GROUP}.
868

869
  @type lu: L{LogicalUnit}
870
  @param lu: Logical unit instance
871
  @type alloc_result: tuple/list
872
  @param alloc_result: Result from iallocator
873
  @type early_release: bool
874
  @param early_release: Whether to release locks early if possible
875
  @type use_nodes: bool
876
  @param use_nodes: Whether to display node names instead of groups
877

878
  """
879
  (moved, failed, jobs) = alloc_result
880

    
881
  if failed:
882
    failreason = utils.CommaJoin("%s (%s)" % (name, reason)
883
                                 for (name, reason) in failed)
884
    lu.LogWarning("Unable to evacuate instances %s", failreason)
885
    raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
886

    
887
  if moved:
888
    lu.LogInfo("Instances to be moved: %s",
889
               utils.CommaJoin(
890
                 "%s (to %s)" %
891
                 (name, _NodeEvacDest(use_nodes, group, node_names))
892
                 for (name, group, node_names) in moved))
893

    
894
  return [map(compat.partial(_SetOpEarlyRelease, early_release),
895
              map(opcodes.OpCode.LoadOpCode, ops))
896
          for ops in jobs]
897

    
898

    
899
def _NodeEvacDest(use_nodes, group, node_names):
900
  """Returns group or nodes depending on caller's choice.
901

902
  """
903
  if use_nodes:
904
    return utils.CommaJoin(node_names)
905
  else:
906
    return group
907

    
908

    
909
def _SetOpEarlyRelease(early_release, op):
910
  """Sets C{early_release} flag on opcodes if available.
911

912
  """
913
  try:
914
    op.early_release = early_release
915
  except AttributeError:
916
    assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
917

    
918
  return op
919

    
920

    
921
def MapInstanceLvsToNodes(instances):
922
  """Creates a map from (node, volume) to instance name.
923

924
  @type instances: list of L{objects.Instance}
925
  @rtype: dict; tuple of (node uuid, volume name) as key, L{objects.Instance}
926
          object as value
927

928
  """
929
  return dict(((node_uuid, vol), inst)
930
              for inst in instances
931
              for (node_uuid, vols) in inst.MapLVsByNode().items()
932
              for vol in vols)
933

    
934

    
935
def CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
936
  """Make sure that none of the given paramters is global.
937

938
  If a global parameter is found, an L{errors.OpPrereqError} exception is
939
  raised. This is used to avoid setting global parameters for individual nodes.
940

941
  @type params: dictionary
942
  @param params: Parameters to check
943
  @type glob_pars: dictionary
944
  @param glob_pars: Forbidden parameters
945
  @type kind: string
946
  @param kind: Kind of parameters (e.g. "node")
947
  @type bad_levels: string
948
  @param bad_levels: Level(s) at which the parameters are forbidden (e.g.
949
      "instance")
950
  @type good_levels: strings
951
  @param good_levels: Level(s) at which the parameters are allowed (e.g.
952
      "cluster or group")
953

954
  """
955
  used_globals = glob_pars.intersection(params)
956
  if used_globals:
957
    msg = ("The following %s parameters are global and cannot"
958
           " be customized at %s level, please modify them at"
959
           " %s level: %s" %
960
           (kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
961
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
962

    
963

    
964
def IsExclusiveStorageEnabledNode(cfg, node):
965
  """Whether exclusive_storage is in effect for the given node.
966

967
  @type cfg: L{config.ConfigWriter}
968
  @param cfg: The cluster configuration
969
  @type node: L{objects.Node}
970
  @param node: The node
971
  @rtype: bool
972
  @return: The effective value of exclusive_storage
973

974
  """
975
  return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
976

    
977

    
978
def CheckInstanceState(lu, instance, req_states, msg=None):
979
  """Ensure that an instance is in one of the required states.
980

981
  @param lu: the LU on behalf of which we make the check
982
  @param instance: the instance to check
983
  @param msg: if passed, should be a message to replace the default one
984
  @raise errors.OpPrereqError: if the instance is not in the required state
985

986
  """
987
  if msg is None:
988
    msg = ("can't use instance from outside %s states" %
989
           utils.CommaJoin(req_states))
990
  if instance.admin_state not in req_states:
991
    raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
992
                               (instance.name, instance.admin_state, msg),
993
                               errors.ECODE_STATE)
994

    
995
  if constants.ADMINST_UP not in req_states:
996
    pnode_uuid = instance.primary_node
997
    if not lu.cfg.GetNodeInfo(pnode_uuid).offline:
998
      all_hvparams = lu.cfg.GetClusterInfo().hvparams
999
      ins_l = lu.rpc.call_instance_list(
1000
                [pnode_uuid], [instance.hypervisor], all_hvparams)[pnode_uuid]
1001
      ins_l.Raise("Can't contact node %s for instance information" %
1002
                  lu.cfg.GetNodeName(pnode_uuid),
1003
                  prereq=True, ecode=errors.ECODE_ENVIRON)
1004
      if instance.name in ins_l.payload:
1005
        raise errors.OpPrereqError("Instance %s is running, %s" %
1006
                                   (instance.name, msg), errors.ECODE_STATE)
1007
    else:
1008
      lu.LogWarning("Primary node offline, ignoring check that instance"
1009
                     " is down")
1010

    
1011

    
1012
def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1013
  """Check the sanity of iallocator and node arguments and use the
1014
  cluster-wide iallocator if appropriate.
1015

1016
  Check that at most one of (iallocator, node) is specified. If none is
1017
  specified, or the iallocator is L{constants.DEFAULT_IALLOCATOR_SHORTCUT},
1018
  then the LU's opcode's iallocator slot is filled with the cluster-wide
1019
  default iallocator.
1020

1021
  @type iallocator_slot: string
1022
  @param iallocator_slot: the name of the opcode iallocator slot
1023
  @type node_slot: string
1024
  @param node_slot: the name of the opcode target node slot
1025

1026
  """
1027
  node = getattr(lu.op, node_slot, None)
1028
  ialloc = getattr(lu.op, iallocator_slot, None)
1029
  if node == []:
1030
    node = None
1031

    
1032
  if node is not None and ialloc is not None:
1033
    raise errors.OpPrereqError("Do not specify both, iallocator and node",
1034
                               errors.ECODE_INVAL)
1035
  elif ((node is None and ialloc is None) or
1036
        ialloc == constants.DEFAULT_IALLOCATOR_SHORTCUT):
1037
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1038
    if default_iallocator:
1039
      setattr(lu.op, iallocator_slot, default_iallocator)
1040
    else:
1041
      raise errors.OpPrereqError("No iallocator or node given and no"
1042
                                 " cluster-wide default iallocator found;"
1043
                                 " please specify either an iallocator or a"
1044
                                 " node, or set a cluster-wide default"
1045
                                 " iallocator", errors.ECODE_INVAL)
1046

    
1047

    
1048
def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq):
1049
  faulty = []
1050

    
1051
  result = rpc_runner.call_blockdev_getmirrorstatus(
1052
             node_uuid, (instance.disks, instance))
1053
  result.Raise("Failed to get disk status from node %s" %
1054
               cfg.GetNodeName(node_uuid),
1055
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1056

    
1057
  for idx, bdev_status in enumerate(result.payload):
1058
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1059
      faulty.append(idx)
1060

    
1061
  return faulty
1062

    
1063

    
1064
def CheckNodeOnline(lu, node_uuid, msg=None):
1065
  """Ensure that a given node is online.
1066

1067
  @param lu: the LU on behalf of which we make the check
1068
  @param node_uuid: the node to check
1069
  @param msg: if passed, should be a message to replace the default one
1070
  @raise errors.OpPrereqError: if the node is offline
1071

1072
  """
1073
  if msg is None:
1074
    msg = "Can't use offline node"
1075
  if lu.cfg.GetNodeInfo(node_uuid).offline:
1076
    raise errors.OpPrereqError("%s: %s" % (msg, lu.cfg.GetNodeName(node_uuid)),
1077
                               errors.ECODE_STATE)
1078

    
1079

    
1080
def CheckDiskTemplateEnabled(cluster, disk_template):
1081
  """Helper function to check if a disk template is enabled.
1082

1083
  @type cluster: C{objects.Cluster}
1084
  @param cluster: the cluster's configuration
1085
  @type disk_template: str
1086
  @param disk_template: the disk template to be checked
1087

1088
  """
1089
  assert disk_template is not None
1090
  if disk_template not in constants.DISK_TEMPLATES:
1091
    raise errors.OpPrereqError("'%s' is not a valid disk template."
1092
                               " Valid disk templates are: %s" %
1093
                               (disk_template,
1094
                                ",".join(constants.DISK_TEMPLATES)))
1095
  if not disk_template in cluster.enabled_disk_templates:
1096
    raise errors.OpPrereqError("Disk template '%s' is not enabled in cluster."
1097
                               " Enabled disk templates are: %s" %
1098
                               (disk_template,
1099
                                ",".join(cluster.enabled_disk_templates)))
1100

    
1101

    
1102
def CheckStorageTypeEnabled(cluster, storage_type):
1103
  """Helper function to check if a storage type is enabled.
1104

1105
  @type cluster: C{objects.Cluster}
1106
  @param cluster: the cluster's configuration
1107
  @type storage_type: str
1108
  @param storage_type: the storage type to be checked
1109

1110
  """
1111
  assert storage_type is not None
1112
  assert storage_type in constants.STORAGE_TYPES
1113
  # special case for lvm-pv, because it cannot be enabled
1114
  # via disk templates
1115
  if storage_type == constants.ST_LVM_PV:
1116
    CheckStorageTypeEnabled(cluster, constants.ST_LVM_VG)
1117
  else:
1118
    possible_disk_templates = \
1119
        utils.storage.GetDiskTemplatesOfStorageTypes(storage_type)
1120
    for disk_template in possible_disk_templates:
1121
      if disk_template in cluster.enabled_disk_templates:
1122
        return
1123
    raise errors.OpPrereqError("No disk template of storage type '%s' is"
1124
                               " enabled in this cluster. Enabled disk"
1125
                               " templates are: %s" % (storage_type,
1126
                               ",".join(cluster.enabled_disk_templates)))
1127

    
1128

    
1129
def CheckIpolicyVsDiskTemplates(ipolicy, enabled_disk_templates):
1130
  """Checks ipolicy disk templates against enabled disk tempaltes.
1131

1132
  @type ipolicy: dict
1133
  @param ipolicy: the new ipolicy
1134
  @type enabled_disk_templates: list of string
1135
  @param enabled_disk_templates: list of enabled disk templates on the
1136
    cluster
1137
  @raises errors.OpPrereqError: if there is at least one allowed disk
1138
    template that is not also enabled.
1139

1140
  """
1141
  assert constants.IPOLICY_DTS in ipolicy
1142
  allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
1143
  not_enabled = set(allowed_disk_templates) - set(enabled_disk_templates)
1144
  if not_enabled:
1145
    raise errors.OpPrereqError("The following disk template are allowed"
1146
                               " by the ipolicy, but not enabled on the"
1147
                               " cluster: %s" % utils.CommaJoin(not_enabled))
1148

    
1149

    
1150
def CheckDiskAccessModeValidity(parameters):
1151
  """Checks if the access parameter is legal.
1152

1153
  @see: L{CheckDiskAccessModeConsistency} for cluster consistency checks.
1154
  @raise errors.OpPrereqError: if the check fails.
1155

1156
  """
1157
  for disk_template in parameters:
1158
    access = parameters[disk_template].get(constants.LDP_ACCESS,
1159
                                           constants.DISK_KERNELSPACE)
1160
    if access not in constants.DISK_VALID_ACCESS_MODES:
1161
      valid_vals_str = utils.CommaJoin(constants.DISK_VALID_ACCESS_MODES)
1162
      raise errors.OpPrereqError("Invalid value of '{d}:{a}': '{v}' (expected"
1163
                                 " one of {o})".format(d=disk_template,
1164
                                                       a=constants.LDP_ACCESS,
1165
                                                       v=access,
1166
                                                       o=valid_vals_str))
1167

    
1168

    
1169
def CheckDiskAccessModeConsistency(parameters, cfg, group=None):
1170
  """Checks if the access param is consistent with the cluster configuration.
1171

1172
  @note: requires a configuration lock to run.
1173
  @param parameters: the parameters to validate
1174
  @param cfg: the cfg object of the cluster
1175
  @param group: if set, only check for consistency within this group.
1176
  @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1177
                               to an invalid value, such as "pink bunny".
1178
  @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1179
                               to an inconsistent value, such as asking for RBD
1180
                               userspace access to the chroot hypervisor.
1181

1182
  """
1183
  CheckDiskAccessModeValidity(parameters)
1184

    
1185
  for disk_template in parameters:
1186
    access = parameters[disk_template].get(constants.LDP_ACCESS,
1187
                                           constants.DISK_KERNELSPACE)
1188

    
1189
    if disk_template not in constants.DTS_HAVE_ACCESS:
1190
      continue
1191

    
1192
    #Check the combination of instance hypervisor, disk template and access
1193
    #protocol is sane.
1194
    inst_uuids = cfg.GetNodeGroupInstances(group) if group else \
1195
                 cfg.GetInstanceList()
1196

    
1197
    for entry in inst_uuids:
1198
      inst = cfg.GetInstanceInfo(entry)
1199
      hv = inst.hypervisor
1200
      dt = inst.disk_template
1201

    
1202
      if not IsValidDiskAccessModeCombination(hv, dt, access):
1203
        raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access"
1204
                                   " setting with {h} hypervisor and {d} disk"
1205
                                   " type.".format(i=inst.name,
1206
                                                   a=access,
1207
                                                   h=hv,
1208
                                                   d=dt))
1209

    
1210

    
1211
def IsValidDiskAccessModeCombination(hv, disk_template, mode):
1212
  """Checks if an hypervisor can read a disk template with given mode.
1213

1214
  @param hv: the hypervisor that will access the data
1215
  @param disk_template: the disk template the data is stored as
1216
  @param mode: how the hypervisor should access the data
1217
  @return: True if the hypervisor can read a given read disk_template
1218
           in the specified mode.
1219

1220
  """
1221
  if mode == constants.DISK_KERNELSPACE:
1222
    return True
1223

    
1224
  if (hv == constants.HT_KVM and
1225
      disk_template in (constants.DT_RBD, constants.DT_GLUSTER) and
1226
      mode == constants.DISK_USERSPACE):
1227
    return True
1228

    
1229
  # Everything else:
1230
  return False
1231

    
1232

    
1233
def AddNodeCertToCandidateCerts(lu, node_uuid, cluster):
1234
  """Add the node's client SSL certificate digest to the candidate certs.
1235

1236
  @type node_uuid: string
1237
  @param node_uuid: the node's UUID
1238
  @type cluster: C{object.Cluster}
1239
  @param cluster: the cluster's configuration
1240

1241
  """
1242
  result = lu.rpc.call_node_crypto_tokens(
1243
             node_uuid,
1244
             [(constants.CRYPTO_TYPE_SSL_DIGEST, constants.CRYPTO_ACTION_GET,
1245
               None)])
1246
  result.Raise("Could not retrieve the node's (uuid %s) SSL digest."
1247
               % node_uuid)
1248
  ((crypto_type, digest), ) = result.payload
1249
  assert crypto_type == constants.CRYPTO_TYPE_SSL_DIGEST
1250

    
1251
  utils.AddNodeToCandidateCerts(node_uuid, digest, cluster.candidate_certs)
1252

    
1253

    
1254
def RemoveNodeCertFromCandidateCerts(node_uuid, cluster):
1255
  """Removes the node's certificate from the candidate certificates list.
1256

1257
  @type node_uuid: string
1258
  @param node_uuid: the node's UUID
1259
  @type cluster: C{objects.Cluster}
1260
  @param cluster: the cluster's configuration
1261

1262
  """
1263
  utils.RemoveNodeFromCandidateCerts(node_uuid, cluster.candidate_certs)
1264

    
1265

    
1266
def CreateNewClientCert(lu, node_uuid, filename=None):
1267
  """Creates a new client SSL certificate for the node.
1268

1269
  @type node_uuid: string
1270
  @param node_uuid: the node's UUID
1271
  @type filename: string
1272
  @param filename: the certificate's filename
1273
  @rtype: string
1274
  @return: the digest of the newly created certificate
1275

1276
  """
1277
  options = {}
1278
  if filename:
1279
    options[constants.CRYPTO_OPTION_CERT_FILE] = filename
1280
  options[constants.CRYPTO_OPTION_SERIAL_NO] = utils.UuidToInt(node_uuid)
1281
  result = lu.rpc.call_node_crypto_tokens(
1282
             node_uuid,
1283
             [(constants.CRYPTO_TYPE_SSL_DIGEST,
1284
               constants.CRYPTO_ACTION_CREATE,
1285
               options)])
1286
  result.Raise("Could not create the node's (uuid %s) SSL client"
1287
               " certificate." % node_uuid)
1288
  ((crypto_type, new_digest), ) = result.payload
1289
  assert crypto_type == constants.CRYPTO_TYPE_SSL_DIGEST
1290
  return new_digest
1291

    
1292

    
1293
def AddInstanceCommunicationNetworkOp(network):
1294
  """Create an OpCode that adds the instance communication network.
1295

1296
  This OpCode contains the configuration necessary for the instance
1297
  communication network.
1298

1299
  @type network: string
1300
  @param network: name or UUID of the instance communication network
1301

1302
  @rtype: L{ganeti.opcodes.OpCode}
1303
  @return: OpCode that creates the instance communication network
1304

1305
  """
1306
  return opcodes.OpNetworkAdd(
1307
    network_name=network,
1308
    gateway=None,
1309
    network=constants.INSTANCE_COMMUNICATION_NETWORK4,
1310
    gateway6=None,
1311
    network6=constants.INSTANCE_COMMUNICATION_NETWORK6,
1312
    mac_prefix=constants.INSTANCE_COMMUNICATION_MAC_PREFIX,
1313
    add_reserved_ips=None,
1314
    conflicts_check=True,
1315
    tags=[])
1316

    
1317

    
1318
def ConnectInstanceCommunicationNetworkOp(group_uuid, network):
1319
  """Create an OpCode that connects a group to the instance
1320
  communication network.
1321

1322
  This OpCode contains the configuration necessary for the instance
1323
  communication network.
1324

1325
  @type group_uuid: string
1326
  @param group_uuid: UUID of the group to connect
1327

1328
  @type network: string
1329
  @param network: name or UUID of the network to connect to, i.e., the
1330
                  instance communication network
1331

1332
  @rtype: L{ganeti.opcodes.OpCode}
1333
  @return: OpCode that connects the group to the instance
1334
           communication network
1335

1336
  """
1337
  return opcodes.OpNetworkConnect(
1338
    group_name=group_uuid,
1339
    network_name=network,
1340
    network_mode=constants.INSTANCE_COMMUNICATION_NETWORK_MODE,
1341
    network_link=constants.INSTANCE_COMMUNICATION_NETWORK_LINK,
1342
    conflicts_check=True)