Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / common.py @ b6dd32db

History | View | Annotate | Download (47 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Common functions used by multiple logical units."""
23

    
24
import copy
25
import os
26

    
27
from ganeti import compat
28
from ganeti import constants
29
from ganeti import errors
30
from ganeti import hypervisor
31
from ganeti import locking
32
from ganeti import objects
33
from ganeti import opcodes
34
from ganeti import pathutils
35
from ganeti.serializer import Private
36
import ganeti.rpc.node as rpc
37
from ganeti import ssconf
38
from ganeti import utils
39

    
40

    
41
# States of instance
42
INSTANCE_DOWN = [constants.ADMINST_DOWN]
43
INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
44
INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
45

    
46
#: Instance status in which an instance can be marked as offline/online
47
CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
48
  constants.ADMINST_OFFLINE,
49
  ]))
50

    
51

    
52
def _ExpandItemName(expand_fn, name, kind):
53
  """Expand an item name.
54

55
  @param expand_fn: the function to use for expansion
56
  @param name: requested item name
57
  @param kind: text description ('Node' or 'Instance')
58
  @return: the result of the expand_fn, if successful
59
  @raise errors.OpPrereqError: if the item is not found
60

61
  """
62
  (uuid, full_name) = expand_fn(name)
63
  if uuid is None or full_name is None:
64
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
65
                               errors.ECODE_NOENT)
66
  return (uuid, full_name)
67

    
68

    
69
def ExpandInstanceUuidAndName(cfg, expected_uuid, name):
70
  """Wrapper over L{_ExpandItemName} for instance."""
71
  (uuid, full_name) = _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
72
  if expected_uuid is not None and uuid != expected_uuid:
73
    raise errors.OpPrereqError(
74
      "The instances UUID '%s' does not match the expected UUID '%s' for"
75
      " instance '%s'. Maybe the instance changed since you submitted this"
76
      " job." % (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
77
  return (uuid, full_name)
78

    
79

    
80
def ExpandNodeUuidAndName(cfg, expected_uuid, name):
81
  """Expand a short node name into the node UUID and full name.
82

83
  @type cfg: L{config.ConfigWriter}
84
  @param cfg: The cluster configuration
85
  @type expected_uuid: string
86
  @param expected_uuid: expected UUID for the node (or None if there is no
87
        expectation). If it does not match, a L{errors.OpPrereqError} is
88
        raised.
89
  @type name: string
90
  @param name: the short node name
91

92
  """
93
  (uuid, full_name) = _ExpandItemName(cfg.ExpandNodeName, name, "Node")
94
  if expected_uuid is not None and uuid != expected_uuid:
95
    raise errors.OpPrereqError(
96
      "The nodes UUID '%s' does not match the expected UUID '%s' for node"
97
      " '%s'. Maybe the node changed since you submitted this job." %
98
      (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
99
  return (uuid, full_name)
100

    
101

    
102
def ShareAll():
103
  """Returns a dict declaring all lock levels shared.
104

105
  """
106
  return dict.fromkeys(locking.LEVELS, 1)
107

    
108

    
109
def CheckNodeGroupInstances(cfg, group_uuid, owned_instance_names):
110
  """Checks if the instances in a node group are still correct.
111

112
  @type cfg: L{config.ConfigWriter}
113
  @param cfg: The cluster configuration
114
  @type group_uuid: string
115
  @param group_uuid: Node group UUID
116
  @type owned_instance_names: set or frozenset
117
  @param owned_instance_names: List of currently owned instances
118

119
  """
120
  wanted_instances = frozenset(cfg.GetInstanceNames(
121
                                 cfg.GetNodeGroupInstances(group_uuid)))
122
  if owned_instance_names != wanted_instances:
123
    raise errors.OpPrereqError("Instances in node group '%s' changed since"
124
                               " locks were acquired, wanted '%s', have '%s';"
125
                               " retry the operation" %
126
                               (group_uuid,
127
                                utils.CommaJoin(wanted_instances),
128
                                utils.CommaJoin(owned_instance_names)),
129
                               errors.ECODE_STATE)
130

    
131
  return wanted_instances
132

    
133

    
134
def GetWantedNodes(lu, short_node_names):
135
  """Returns list of checked and expanded node names.
136

137
  @type lu: L{LogicalUnit}
138
  @param lu: the logical unit on whose behalf we execute
139
  @type short_node_names: list
140
  @param short_node_names: list of node names or None for all nodes
141
  @rtype: tuple of lists
142
  @return: tupe with (list of node UUIDs, list of node names)
143
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
144

145
  """
146
  if short_node_names:
147
    node_uuids = [ExpandNodeUuidAndName(lu.cfg, None, name)[0]
148
                  for name in short_node_names]
149
  else:
150
    node_uuids = lu.cfg.GetNodeList()
151

    
152
  return (node_uuids, [lu.cfg.GetNodeName(uuid) for uuid in node_uuids])
153

    
154

    
155
def GetWantedInstances(lu, short_inst_names):
156
  """Returns list of checked and expanded instance names.
157

158
  @type lu: L{LogicalUnit}
159
  @param lu: the logical unit on whose behalf we execute
160
  @type short_inst_names: list
161
  @param short_inst_names: list of instance names or None for all instances
162
  @rtype: tuple of lists
163
  @return: tuple of (instance UUIDs, instance names)
164
  @raise errors.OpPrereqError: if the instances parameter is wrong type
165
  @raise errors.OpPrereqError: if any of the passed instances is not found
166

167
  """
168
  if short_inst_names:
169
    inst_uuids = [ExpandInstanceUuidAndName(lu.cfg, None, name)[0]
170
                  for name in short_inst_names]
171
  else:
172
    inst_uuids = lu.cfg.GetInstanceList()
173
  return (inst_uuids, [lu.cfg.GetInstanceName(uuid) for uuid in inst_uuids])
174

    
175

    
176
def RunPostHook(lu, node_name):
177
  """Runs the post-hook for an opcode on a single node.
178

179
  """
180
  hm = lu.proc.BuildHooksManager(lu)
181
  try:
182
    hm.RunPhase(constants.HOOKS_PHASE_POST, node_names=[node_name])
183
  except Exception, err: # pylint: disable=W0703
184
    lu.LogWarning("Errors occurred running hooks on %s: %s",
185
                  node_name, err)
186

    
187

    
188
def RedistributeAncillaryFiles(lu):
189
  """Distribute additional files which are part of the cluster configuration.
190

191
  ConfigWriter takes care of distributing the config and ssconf files, but
192
  there are more files which should be distributed to all nodes. This function
193
  makes sure those are copied.
194

195
  """
196
  # Gather target nodes
197
  cluster = lu.cfg.GetClusterInfo()
198
  master_info = lu.cfg.GetMasterNodeInfo()
199

    
200
  online_node_uuids = lu.cfg.GetOnlineNodeList()
201
  online_node_uuid_set = frozenset(online_node_uuids)
202
  vm_node_uuids = list(online_node_uuid_set.intersection(
203
                         lu.cfg.GetVmCapableNodeList()))
204

    
205
  # Never distribute to master node
206
  for node_uuids in [online_node_uuids, vm_node_uuids]:
207
    if master_info.uuid in node_uuids:
208
      node_uuids.remove(master_info.uuid)
209

    
210
  # Gather file lists
211
  (files_all, _, files_mc, files_vm) = \
212
    ComputeAncillaryFiles(cluster, True)
213

    
214
  # Never re-distribute configuration file from here
215
  assert not (pathutils.CLUSTER_CONF_FILE in files_all or
216
              pathutils.CLUSTER_CONF_FILE in files_vm)
217
  assert not files_mc, "Master candidates not handled in this function"
218

    
219
  filemap = [
220
    (online_node_uuids, files_all),
221
    (vm_node_uuids, files_vm),
222
    ]
223

    
224
  # Upload the files
225
  for (node_uuids, files) in filemap:
226
    for fname in files:
227
      UploadHelper(lu, node_uuids, fname)
228

    
229

    
230
def ComputeAncillaryFiles(cluster, redist):
231
  """Compute files external to Ganeti which need to be consistent.
232

233
  @type redist: boolean
234
  @param redist: Whether to include files which need to be redistributed
235

236
  """
237
  # Compute files for all nodes
238
  files_all = set([
239
    pathutils.SSH_KNOWN_HOSTS_FILE,
240
    pathutils.CONFD_HMAC_KEY,
241
    pathutils.CLUSTER_DOMAIN_SECRET_FILE,
242
    pathutils.SPICE_CERT_FILE,
243
    pathutils.SPICE_CACERT_FILE,
244
    pathutils.RAPI_USERS_FILE,
245
    ])
246

    
247
  if redist:
248
    # we need to ship at least the RAPI certificate
249
    files_all.add(pathutils.RAPI_CERT_FILE)
250
  else:
251
    files_all.update(pathutils.ALL_CERT_FILES)
252
    files_all.update(ssconf.SimpleStore().GetFileList())
253

    
254
  if cluster.modify_etc_hosts:
255
    files_all.add(pathutils.ETC_HOSTS)
256

    
257
  if cluster.use_external_mip_script:
258
    files_all.add(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
259

    
260
  # Files which are optional, these must:
261
  # - be present in one other category as well
262
  # - either exist or not exist on all nodes of that category (mc, vm all)
263
  files_opt = set([
264
    pathutils.RAPI_USERS_FILE,
265
    ])
266

    
267
  # Files which should only be on master candidates
268
  files_mc = set()
269

    
270
  if not redist:
271
    files_mc.add(pathutils.CLUSTER_CONF_FILE)
272

    
273
  # File storage
274
  if (not redist and (cluster.IsFileStorageEnabled() or
275
                        cluster.IsSharedFileStorageEnabled())):
276
    files_all.add(pathutils.FILE_STORAGE_PATHS_FILE)
277
    files_opt.add(pathutils.FILE_STORAGE_PATHS_FILE)
278

    
279
  # Files which should only be on VM-capable nodes
280
  files_vm = set(
281
    filename
282
    for hv_name in cluster.enabled_hypervisors
283
    for filename in
284
    hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
285

    
286
  files_opt |= set(
287
    filename
288
    for hv_name in cluster.enabled_hypervisors
289
    for filename in
290
    hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
291

    
292
  # Filenames in each category must be unique
293
  all_files_set = files_all | files_mc | files_vm
294
  assert (len(all_files_set) ==
295
          sum(map(len, [files_all, files_mc, files_vm]))), \
296
    "Found file listed in more than one file list"
297

    
298
  # Optional files must be present in one other category
299
  assert all_files_set.issuperset(files_opt), \
300
    "Optional file not in a different required list"
301

    
302
  # This one file should never ever be re-distributed via RPC
303
  assert not (redist and
304
              pathutils.FILE_STORAGE_PATHS_FILE in all_files_set)
305

    
306
  return (files_all, files_opt, files_mc, files_vm)
307

    
308

    
309
def UploadHelper(lu, node_uuids, fname):
310
  """Helper for uploading a file and showing warnings.
311

312
  """
313
  if os.path.exists(fname):
314
    result = lu.rpc.call_upload_file(node_uuids, fname)
315
    for to_node_uuids, to_result in result.items():
316
      msg = to_result.fail_msg
317
      if msg:
318
        msg = ("Copy of file %s to node %s failed: %s" %
319
               (fname, lu.cfg.GetNodeName(to_node_uuids), msg))
320
        lu.LogWarning(msg)
321

    
322

    
323
def MergeAndVerifyHvState(op_input, obj_input):
324
  """Combines the hv state from an opcode with the one of the object
325

326
  @param op_input: The input dict from the opcode
327
  @param obj_input: The input dict from the objects
328
  @return: The verified and updated dict
329

330
  """
331
  if op_input:
332
    invalid_hvs = set(op_input) - constants.HYPER_TYPES
333
    if invalid_hvs:
334
      raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
335
                                 " %s" % utils.CommaJoin(invalid_hvs),
336
                                 errors.ECODE_INVAL)
337
    if obj_input is None:
338
      obj_input = {}
339
    type_check = constants.HVSTS_PARAMETER_TYPES
340
    return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
341

    
342
  return None
343

    
344

    
345
def MergeAndVerifyDiskState(op_input, obj_input):
346
  """Combines the disk state from an opcode with the one of the object
347

348
  @param op_input: The input dict from the opcode
349
  @param obj_input: The input dict from the objects
350
  @return: The verified and updated dict
351
  """
352
  if op_input:
353
    invalid_dst = set(op_input) - constants.DS_VALID_TYPES
354
    if invalid_dst:
355
      raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
356
                                 utils.CommaJoin(invalid_dst),
357
                                 errors.ECODE_INVAL)
358
    type_check = constants.DSS_PARAMETER_TYPES
359
    if obj_input is None:
360
      obj_input = {}
361
    return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
362
                                              type_check))
363
                for key, value in op_input.items())
364

    
365
  return None
366

    
367

    
368
def CheckOSParams(lu, required, node_uuids, osname, osparams):
369
  """OS parameters validation.
370

371
  @type lu: L{LogicalUnit}
372
  @param lu: the logical unit for which we check
373
  @type required: boolean
374
  @param required: whether the validation should fail if the OS is not
375
      found
376
  @type node_uuids: list
377
  @param node_uuids: the list of nodes on which we should check
378
  @type osname: string
379
  @param osname: the name of the hypervisor we should use
380
  @type osparams: dict
381
  @param osparams: the parameters which we need to check
382
  @raise errors.OpPrereqError: if the parameters are not valid
383

384
  """
385
  node_uuids = _FilterVmNodes(lu, node_uuids)
386

    
387
  # Last chance to unwrap private elements.
388
  for key in osparams:
389
    if isinstance(osparams[key], Private):
390
      osparams[key] = osparams[key].Get()
391

    
392
  result = lu.rpc.call_os_validate(node_uuids, required, osname,
393
                                   [constants.OS_VALIDATE_PARAMETERS],
394
                                   osparams)
395
  for node_uuid, nres in result.items():
396
    # we don't check for offline cases since this should be run only
397
    # against the master node and/or an instance's nodes
398
    nres.Raise("OS Parameters validation failed on node %s" %
399
               lu.cfg.GetNodeName(node_uuid))
400
    if not nres.payload:
401
      lu.LogInfo("OS %s not found on node %s, validation skipped",
402
                 osname, lu.cfg.GetNodeName(node_uuid))
403

    
404

    
405
def CheckHVParams(lu, node_uuids, hvname, hvparams):
406
  """Hypervisor parameter validation.
407

408
  This function abstract the hypervisor parameter validation to be
409
  used in both instance create and instance modify.
410

411
  @type lu: L{LogicalUnit}
412
  @param lu: the logical unit for which we check
413
  @type node_uuids: list
414
  @param node_uuids: the list of nodes on which we should check
415
  @type hvname: string
416
  @param hvname: the name of the hypervisor we should use
417
  @type hvparams: dict
418
  @param hvparams: the parameters which we need to check
419
  @raise errors.OpPrereqError: if the parameters are not valid
420

421
  """
422
  node_uuids = _FilterVmNodes(lu, node_uuids)
423

    
424
  cluster = lu.cfg.GetClusterInfo()
425
  hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
426

    
427
  hvinfo = lu.rpc.call_hypervisor_validate_params(node_uuids, hvname, hvfull)
428
  for node_uuid in node_uuids:
429
    info = hvinfo[node_uuid]
430
    if info.offline:
431
      continue
432
    info.Raise("Hypervisor parameter validation failed on node %s" %
433
               lu.cfg.GetNodeName(node_uuid))
434

    
435

    
436
def AdjustCandidatePool(lu, exceptions, feedback_fn):
437
  """Adjust the candidate pool after node operations.
438

439
  """
440
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
441
  if mod_list:
442
    lu.LogInfo("Promoted nodes to master candidate role: %s",
443
               utils.CommaJoin(node.name for node in mod_list))
444
    for node in mod_list:
445
      lu.context.ReaddNode(node)
446
      cluster = lu.cfg.GetClusterInfo()
447
      AddNodeCertToCandidateCerts(lu, node.uuid, cluster)
448
      lu.cfg.Update(cluster, feedback_fn)
449
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
450
  if mc_now > mc_max:
451
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
452
               (mc_now, mc_max))
453

    
454

    
455
def CheckNodePVs(nresult, exclusive_storage):
456
  """Check node PVs.
457

458
  """
459
  pvlist_dict = nresult.get(constants.NV_PVLIST, None)
460
  if pvlist_dict is None:
461
    return (["Can't get PV list from node"], None)
462
  pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
463
  errlist = []
464
  # check that ':' is not present in PV names, since it's a
465
  # special character for lvcreate (denotes the range of PEs to
466
  # use on the PV)
467
  for pv in pvlist:
468
    if ":" in pv.name:
469
      errlist.append("Invalid character ':' in PV '%s' of VG '%s'" %
470
                     (pv.name, pv.vg_name))
471
  es_pvinfo = None
472
  if exclusive_storage:
473
    (errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist)
474
    errlist.extend(errmsgs)
475
    shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None)
476
    if shared_pvs:
477
      for (pvname, lvlist) in shared_pvs:
478
        # TODO: Check that LVs are really unrelated (snapshots, DRBD meta...)
479
        errlist.append("PV %s is shared among unrelated LVs (%s)" %
480
                       (pvname, utils.CommaJoin(lvlist)))
481
  return (errlist, es_pvinfo)
482

    
483

    
484
def _ComputeMinMaxSpec(name, qualifier, ispecs, value):
485
  """Computes if value is in the desired range.
486

487
  @param name: name of the parameter for which we perform the check
488
  @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
489
      not just 'disk')
490
  @param ispecs: dictionary containing min and max values
491
  @param value: actual value that we want to use
492
  @return: None or an error string
493

494
  """
495
  if value in [None, constants.VALUE_AUTO]:
496
    return None
497
  max_v = ispecs[constants.ISPECS_MAX].get(name, value)
498
  min_v = ispecs[constants.ISPECS_MIN].get(name, value)
499
  if value > max_v or min_v > value:
500
    if qualifier:
501
      fqn = "%s/%s" % (name, qualifier)
502
    else:
503
      fqn = name
504
    return ("%s value %s is not in range [%s, %s]" %
505
            (fqn, value, min_v, max_v))
506
  return None
507

    
508

    
509
def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
510
                                nic_count, disk_sizes, spindle_use,
511
                                disk_template,
512
                                _compute_fn=_ComputeMinMaxSpec):
513
  """Verifies ipolicy against provided specs.
514

515
  @type ipolicy: dict
516
  @param ipolicy: The ipolicy
517
  @type mem_size: int
518
  @param mem_size: The memory size
519
  @type cpu_count: int
520
  @param cpu_count: Used cpu cores
521
  @type disk_count: int
522
  @param disk_count: Number of disks used
523
  @type nic_count: int
524
  @param nic_count: Number of nics used
525
  @type disk_sizes: list of ints
526
  @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
527
  @type spindle_use: int
528
  @param spindle_use: The number of spindles this instance uses
529
  @type disk_template: string
530
  @param disk_template: The disk template of the instance
531
  @param _compute_fn: The compute function (unittest only)
532
  @return: A list of violations, or an empty list of no violations are found
533

534
  """
535
  assert disk_count == len(disk_sizes)
536

    
537
  test_settings = [
538
    (constants.ISPEC_MEM_SIZE, "", mem_size),
539
    (constants.ISPEC_CPU_COUNT, "", cpu_count),
540
    (constants.ISPEC_NIC_COUNT, "", nic_count),
541
    (constants.ISPEC_SPINDLE_USE, "", spindle_use),
542
    ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
543
         for idx, d in enumerate(disk_sizes)]
544
  if disk_template != constants.DT_DISKLESS:
545
    # This check doesn't make sense for diskless instances
546
    test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
547
  ret = []
548
  allowed_dts = ipolicy[constants.IPOLICY_DTS]
549
  if disk_template not in allowed_dts:
550
    ret.append("Disk template %s is not allowed (allowed templates: %s)" %
551
               (disk_template, utils.CommaJoin(allowed_dts)))
552

    
553
  min_errs = None
554
  for minmax in ipolicy[constants.ISPECS_MINMAX]:
555
    errs = filter(None,
556
                  (_compute_fn(name, qualifier, minmax, value)
557
                   for (name, qualifier, value) in test_settings))
558
    if min_errs is None or len(errs) < len(min_errs):
559
      min_errs = errs
560
  assert min_errs is not None
561
  return ret + min_errs
562

    
563

    
564
def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
565
                                    _compute_fn=ComputeIPolicySpecViolation):
566
  """Compute if instance meets the specs of ipolicy.
567

568
  @type ipolicy: dict
569
  @param ipolicy: The ipolicy to verify against
570
  @type instance: L{objects.Instance}
571
  @param instance: The instance to verify
572
  @type cfg: L{config.ConfigWriter}
573
  @param cfg: Cluster configuration
574
  @param _compute_fn: The function to verify ipolicy (unittest only)
575
  @see: L{ComputeIPolicySpecViolation}
576

577
  """
578
  ret = []
579
  be_full = cfg.GetClusterInfo().FillBE(instance)
580
  mem_size = be_full[constants.BE_MAXMEM]
581
  cpu_count = be_full[constants.BE_VCPUS]
582
  inst_nodes = cfg.GetInstanceNodes(instance)
583
  es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes)
584
  if any(es_flags.values()):
585
    # With exclusive storage use the actual spindles
586
    try:
587
      spindle_use = sum([disk.spindles for disk in instance.disks])
588
    except TypeError:
589
      ret.append("Number of spindles not configured for disks of instance %s"
590
                 " while exclusive storage is enabled, try running gnt-cluster"
591
                 " repair-disk-sizes" % instance.name)
592
      # _ComputeMinMaxSpec ignores 'None's
593
      spindle_use = None
594
  else:
595
    spindle_use = be_full[constants.BE_SPINDLE_USE]
596
  disk_count = len(instance.disks)
597
  disk_sizes = [disk.size for disk in instance.disks]
598
  nic_count = len(instance.nics)
599
  disk_template = instance.disk_template
600

    
601
  return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
602
                           disk_sizes, spindle_use, disk_template)
603

    
604

    
605
def _ComputeViolatingInstances(ipolicy, instances, cfg):
606
  """Computes a set of instances who violates given ipolicy.
607

608
  @param ipolicy: The ipolicy to verify
609
  @type instances: L{objects.Instance}
610
  @param instances: List of instances to verify
611
  @type cfg: L{config.ConfigWriter}
612
  @param cfg: Cluster configuration
613
  @return: A frozenset of instance names violating the ipolicy
614

615
  """
616
  return frozenset([inst.name for inst in instances
617
                    if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
618

    
619

    
620
def ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
621
  """Computes a set of any instances that would violate the new ipolicy.
622

623
  @param old_ipolicy: The current (still in-place) ipolicy
624
  @param new_ipolicy: The new (to become) ipolicy
625
  @param instances: List of instances to verify
626
  @type cfg: L{config.ConfigWriter}
627
  @param cfg: Cluster configuration
628
  @return: A list of instances which violates the new ipolicy but
629
      did not before
630

631
  """
632
  return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
633
          _ComputeViolatingInstances(old_ipolicy, instances, cfg))
634

    
635

    
636
def GetUpdatedParams(old_params, update_dict,
637
                      use_default=True, use_none=False):
638
  """Return the new version of a parameter dictionary.
639

640
  @type old_params: dict
641
  @param old_params: old parameters
642
  @type update_dict: dict
643
  @param update_dict: dict containing new parameter values, or
644
      constants.VALUE_DEFAULT to reset the parameter to its default
645
      value
646
  @param use_default: boolean
647
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
648
      values as 'to be deleted' values
649
  @param use_none: boolean
650
  @type use_none: whether to recognise C{None} values as 'to be
651
      deleted' values
652
  @rtype: dict
653
  @return: the new parameter dictionary
654

655
  """
656
  params_copy = copy.deepcopy(old_params)
657
  for key, val in update_dict.iteritems():
658
    if ((use_default and val == constants.VALUE_DEFAULT) or
659
          (use_none and val is None)):
660
      try:
661
        del params_copy[key]
662
      except KeyError:
663
        pass
664
    else:
665
      params_copy[key] = val
666
  return params_copy
667

    
668

    
669
def GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
670
  """Return the new version of an instance policy.
671

672
  @param group_policy: whether this policy applies to a group and thus
673
    we should support removal of policy entries
674

675
  """
676
  ipolicy = copy.deepcopy(old_ipolicy)
677
  for key, value in new_ipolicy.items():
678
    if key not in constants.IPOLICY_ALL_KEYS:
679
      raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
680
                                 errors.ECODE_INVAL)
681
    if (not value or value == [constants.VALUE_DEFAULT] or
682
            value == constants.VALUE_DEFAULT):
683
      if group_policy:
684
        if key in ipolicy:
685
          del ipolicy[key]
686
      else:
687
        raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
688
                                   " on the cluster'" % key,
689
                                   errors.ECODE_INVAL)
690
    else:
691
      if key in constants.IPOLICY_PARAMETERS:
692
        # FIXME: we assume all such values are float
693
        try:
694
          ipolicy[key] = float(value)
695
        except (TypeError, ValueError), err:
696
          raise errors.OpPrereqError("Invalid value for attribute"
697
                                     " '%s': '%s', error: %s" %
698
                                     (key, value, err), errors.ECODE_INVAL)
699
      elif key == constants.ISPECS_MINMAX:
700
        for minmax in value:
701
          for k in minmax.keys():
702
            utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
703
        ipolicy[key] = value
704
      elif key == constants.ISPECS_STD:
705
        if group_policy:
706
          msg = "%s cannot appear in group instance specs" % key
707
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
708
        ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value,
709
                                        use_none=False, use_default=False)
710
        utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
711
      else:
712
        # FIXME: we assume all others are lists; this should be redone
713
        # in a nicer way
714
        ipolicy[key] = list(value)
715
  try:
716
    objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
717
  except errors.ConfigurationError, err:
718
    raise errors.OpPrereqError("Invalid instance policy: %s" % err,
719
                               errors.ECODE_INVAL)
720
  return ipolicy
721

    
722

    
723
def AnnotateDiskParams(instance, devs, cfg):
724
  """Little helper wrapper to the rpc annotation method.
725

726
  @param instance: The instance object
727
  @type devs: List of L{objects.Disk}
728
  @param devs: The root devices (not any of its children!)
729
  @param cfg: The config object
730
  @returns The annotated disk copies
731
  @see L{rpc.node.AnnotateDiskParams}
732

733
  """
734
  return rpc.AnnotateDiskParams(devs, cfg.GetInstanceDiskParams(instance))
735

    
736

    
737
def SupportsOob(cfg, node):
738
  """Tells if node supports OOB.
739

740
  @type cfg: L{config.ConfigWriter}
741
  @param cfg: The cluster configuration
742
  @type node: L{objects.Node}
743
  @param node: The node
744
  @return: The OOB script if supported or an empty string otherwise
745

746
  """
747
  return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
748

    
749

    
750
def _UpdateAndVerifySubDict(base, updates, type_check):
751
  """Updates and verifies a dict with sub dicts of the same type.
752

753
  @param base: The dict with the old data
754
  @param updates: The dict with the new data
755
  @param type_check: Dict suitable to ForceDictType to verify correct types
756
  @returns: A new dict with updated and verified values
757

758
  """
759
  def fn(old, value):
760
    new = GetUpdatedParams(old, value)
761
    utils.ForceDictType(new, type_check)
762
    return new
763

    
764
  ret = copy.deepcopy(base)
765
  ret.update(dict((key, fn(base.get(key, {}), value))
766
                  for key, value in updates.items()))
767
  return ret
768

    
769

    
770
def _FilterVmNodes(lu, node_uuids):
771
  """Filters out non-vm_capable nodes from a list.
772

773
  @type lu: L{LogicalUnit}
774
  @param lu: the logical unit for which we check
775
  @type node_uuids: list
776
  @param node_uuids: the list of nodes on which we should check
777
  @rtype: list
778
  @return: the list of vm-capable nodes
779

780
  """
781
  vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
782
  return [uuid for uuid in node_uuids if uuid not in vm_nodes]
783

    
784

    
785
def GetDefaultIAllocator(cfg, ialloc):
786
  """Decides on which iallocator to use.
787

788
  @type cfg: L{config.ConfigWriter}
789
  @param cfg: Cluster configuration object
790
  @type ialloc: string or None
791
  @param ialloc: Iallocator specified in opcode
792
  @rtype: string
793
  @return: Iallocator name
794

795
  """
796
  if not ialloc:
797
    # Use default iallocator
798
    ialloc = cfg.GetDefaultIAllocator()
799

    
800
  if not ialloc:
801
    raise errors.OpPrereqError("No iallocator was specified, neither in the"
802
                               " opcode nor as a cluster-wide default",
803
                               errors.ECODE_INVAL)
804

    
805
  return ialloc
806

    
807

    
808
def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_node_uuids,
809
                             cur_group_uuid):
810
  """Checks if node groups for locked instances are still correct.
811

812
  @type cfg: L{config.ConfigWriter}
813
  @param cfg: Cluster configuration
814
  @type instances: dict; string as key, L{objects.Instance} as value
815
  @param instances: Dictionary, instance UUID as key, instance object as value
816
  @type owned_groups: iterable of string
817
  @param owned_groups: List of owned groups
818
  @type owned_node_uuids: iterable of string
819
  @param owned_node_uuids: List of owned nodes
820
  @type cur_group_uuid: string or None
821
  @param cur_group_uuid: Optional group UUID to check against instance's groups
822

823
  """
824
  for (uuid, inst) in instances.items():
825
    inst_nodes = cfg.GetInstanceNodes(inst)
826
    assert owned_node_uuids.issuperset(inst_nodes), \
827
      "Instance %s's nodes changed while we kept the lock" % inst.name
828

    
829
    inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
830

    
831
    assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
832
      "Instance %s has no node in group %s" % (inst.name, cur_group_uuid)
833

    
834

    
835
def CheckInstanceNodeGroups(cfg, inst_uuid, owned_groups, primary_only=False):
836
  """Checks if the owned node groups are still correct for an instance.
837

838
  @type cfg: L{config.ConfigWriter}
839
  @param cfg: The cluster configuration
840
  @type inst_uuid: string
841
  @param inst_uuid: Instance UUID
842
  @type owned_groups: set or frozenset
843
  @param owned_groups: List of currently owned node groups
844
  @type primary_only: boolean
845
  @param primary_only: Whether to check node groups for only the primary node
846

847
  """
848
  inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only)
849

    
850
  if not owned_groups.issuperset(inst_groups):
851
    raise errors.OpPrereqError("Instance %s's node groups changed since"
852
                               " locks were acquired, current groups are"
853
                               " are '%s', owning groups '%s'; retry the"
854
                               " operation" %
855
                               (cfg.GetInstanceName(inst_uuid),
856
                                utils.CommaJoin(inst_groups),
857
                                utils.CommaJoin(owned_groups)),
858
                               errors.ECODE_STATE)
859

    
860
  return inst_groups
861

    
862

    
863
def LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
864
  """Unpacks the result of change-group and node-evacuate iallocator requests.
865

866
  Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
867
  L{constants.IALLOCATOR_MODE_CHG_GROUP}.
868

869
  @type lu: L{LogicalUnit}
870
  @param lu: Logical unit instance
871
  @type alloc_result: tuple/list
872
  @param alloc_result: Result from iallocator
873
  @type early_release: bool
874
  @param early_release: Whether to release locks early if possible
875
  @type use_nodes: bool
876
  @param use_nodes: Whether to display node names instead of groups
877

878
  """
879
  (moved, failed, jobs) = alloc_result
880

    
881
  if failed:
882
    failreason = utils.CommaJoin("%s (%s)" % (name, reason)
883
                                 for (name, reason) in failed)
884
    lu.LogWarning("Unable to evacuate instances %s", failreason)
885
    raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
886

    
887
  if moved:
888
    lu.LogInfo("Instances to be moved: %s",
889
               utils.CommaJoin(
890
                 "%s (to %s)" %
891
                 (name, _NodeEvacDest(use_nodes, group, node_names))
892
                 for (name, group, node_names) in moved))
893

    
894
  return [map(compat.partial(_SetOpEarlyRelease, early_release),
895
              map(opcodes.OpCode.LoadOpCode, ops))
896
          for ops in jobs]
897

    
898

    
899
def _NodeEvacDest(use_nodes, group, node_names):
900
  """Returns group or nodes depending on caller's choice.
901

902
  """
903
  if use_nodes:
904
    return utils.CommaJoin(node_names)
905
  else:
906
    return group
907

    
908

    
909
def _SetOpEarlyRelease(early_release, op):
910
  """Sets C{early_release} flag on opcodes if available.
911

912
  """
913
  try:
914
    op.early_release = early_release
915
  except AttributeError:
916
    assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
917

    
918
  return op
919

    
920

    
921
def MapInstanceLvsToNodes(cfg, instances):
922
  """Creates a map from (node, volume) to instance name.
923

924
  @type cfg: L{config.ConfigWriter}
925
  @param cfg: The cluster configuration
926
  @type instances: list of L{objects.Instance}
927
  @rtype: dict; tuple of (node uuid, volume name) as key, L{objects.Instance}
928
          object as value
929

930
  """
931
  return dict(((node_uuid, vol), inst)
932
              for inst in instances
933
              for (node_uuid, vols) in cfg.GetInstanceLVsByNode(inst).items()
934
              for vol in vols)
935

    
936

    
937
def CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
938
  """Make sure that none of the given paramters is global.
939

940
  If a global parameter is found, an L{errors.OpPrereqError} exception is
941
  raised. This is used to avoid setting global parameters for individual nodes.
942

943
  @type params: dictionary
944
  @param params: Parameters to check
945
  @type glob_pars: dictionary
946
  @param glob_pars: Forbidden parameters
947
  @type kind: string
948
  @param kind: Kind of parameters (e.g. "node")
949
  @type bad_levels: string
950
  @param bad_levels: Level(s) at which the parameters are forbidden (e.g.
951
      "instance")
952
  @type good_levels: strings
953
  @param good_levels: Level(s) at which the parameters are allowed (e.g.
954
      "cluster or group")
955

956
  """
957
  used_globals = glob_pars.intersection(params)
958
  if used_globals:
959
    msg = ("The following %s parameters are global and cannot"
960
           " be customized at %s level, please modify them at"
961
           " %s level: %s" %
962
           (kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
963
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
964

    
965

    
966
def IsExclusiveStorageEnabledNode(cfg, node):
967
  """Whether exclusive_storage is in effect for the given node.
968

969
  @type cfg: L{config.ConfigWriter}
970
  @param cfg: The cluster configuration
971
  @type node: L{objects.Node}
972
  @param node: The node
973
  @rtype: bool
974
  @return: The effective value of exclusive_storage
975

976
  """
977
  return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
978

    
979

    
980
def CheckInstanceState(lu, instance, req_states, msg=None):
981
  """Ensure that an instance is in one of the required states.
982

983
  @param lu: the LU on behalf of which we make the check
984
  @param instance: the instance to check
985
  @param msg: if passed, should be a message to replace the default one
986
  @raise errors.OpPrereqError: if the instance is not in the required state
987

988
  """
989
  if msg is None:
990
    msg = ("can't use instance from outside %s states" %
991
           utils.CommaJoin(req_states))
992
  if instance.admin_state not in req_states:
993
    raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
994
                               (instance.name, instance.admin_state, msg),
995
                               errors.ECODE_STATE)
996

    
997
  if constants.ADMINST_UP not in req_states:
998
    pnode_uuid = instance.primary_node
999
    if not lu.cfg.GetNodeInfo(pnode_uuid).offline:
1000
      all_hvparams = lu.cfg.GetClusterInfo().hvparams
1001
      ins_l = lu.rpc.call_instance_list(
1002
                [pnode_uuid], [instance.hypervisor], all_hvparams)[pnode_uuid]
1003
      ins_l.Raise("Can't contact node %s for instance information" %
1004
                  lu.cfg.GetNodeName(pnode_uuid),
1005
                  prereq=True, ecode=errors.ECODE_ENVIRON)
1006
      if instance.name in ins_l.payload:
1007
        raise errors.OpPrereqError("Instance %s is running, %s" %
1008
                                   (instance.name, msg), errors.ECODE_STATE)
1009
    else:
1010
      lu.LogWarning("Primary node offline, ignoring check that instance"
1011
                     " is down")
1012

    
1013

    
1014
def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1015
  """Check the sanity of iallocator and node arguments and use the
1016
  cluster-wide iallocator if appropriate.
1017

1018
  Check that at most one of (iallocator, node) is specified. If none is
1019
  specified, or the iallocator is L{constants.DEFAULT_IALLOCATOR_SHORTCUT},
1020
  then the LU's opcode's iallocator slot is filled with the cluster-wide
1021
  default iallocator.
1022

1023
  @type iallocator_slot: string
1024
  @param iallocator_slot: the name of the opcode iallocator slot
1025
  @type node_slot: string
1026
  @param node_slot: the name of the opcode target node slot
1027

1028
  """
1029
  node = getattr(lu.op, node_slot, None)
1030
  ialloc = getattr(lu.op, iallocator_slot, None)
1031
  if node == []:
1032
    node = None
1033

    
1034
  if node is not None and ialloc is not None:
1035
    raise errors.OpPrereqError("Do not specify both, iallocator and node",
1036
                               errors.ECODE_INVAL)
1037
  elif ((node is None and ialloc is None) or
1038
        ialloc == constants.DEFAULT_IALLOCATOR_SHORTCUT):
1039
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1040
    if default_iallocator:
1041
      setattr(lu.op, iallocator_slot, default_iallocator)
1042
    else:
1043
      raise errors.OpPrereqError("No iallocator or node given and no"
1044
                                 " cluster-wide default iallocator found;"
1045
                                 " please specify either an iallocator or a"
1046
                                 " node, or set a cluster-wide default"
1047
                                 " iallocator", errors.ECODE_INVAL)
1048

    
1049

    
1050
def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq):
1051
  faulty = []
1052

    
1053
  result = rpc_runner.call_blockdev_getmirrorstatus(
1054
             node_uuid, (instance.disks, instance))
1055
  result.Raise("Failed to get disk status from node %s" %
1056
               cfg.GetNodeName(node_uuid),
1057
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1058

    
1059
  for idx, bdev_status in enumerate(result.payload):
1060
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1061
      faulty.append(idx)
1062

    
1063
  return faulty
1064

    
1065

    
1066
def CheckNodeOnline(lu, node_uuid, msg=None):
1067
  """Ensure that a given node is online.
1068

1069
  @param lu: the LU on behalf of which we make the check
1070
  @param node_uuid: the node to check
1071
  @param msg: if passed, should be a message to replace the default one
1072
  @raise errors.OpPrereqError: if the node is offline
1073

1074
  """
1075
  if msg is None:
1076
    msg = "Can't use offline node"
1077
  if lu.cfg.GetNodeInfo(node_uuid).offline:
1078
    raise errors.OpPrereqError("%s: %s" % (msg, lu.cfg.GetNodeName(node_uuid)),
1079
                               errors.ECODE_STATE)
1080

    
1081

    
1082
def CheckDiskTemplateEnabled(cluster, disk_template):
1083
  """Helper function to check if a disk template is enabled.
1084

1085
  @type cluster: C{objects.Cluster}
1086
  @param cluster: the cluster's configuration
1087
  @type disk_template: str
1088
  @param disk_template: the disk template to be checked
1089

1090
  """
1091
  assert disk_template is not None
1092
  if disk_template not in constants.DISK_TEMPLATES:
1093
    raise errors.OpPrereqError("'%s' is not a valid disk template."
1094
                               " Valid disk templates are: %s" %
1095
                               (disk_template,
1096
                                ",".join(constants.DISK_TEMPLATES)))
1097
  if not disk_template in cluster.enabled_disk_templates:
1098
    raise errors.OpPrereqError("Disk template '%s' is not enabled in cluster."
1099
                               " Enabled disk templates are: %s" %
1100
                               (disk_template,
1101
                                ",".join(cluster.enabled_disk_templates)))
1102

    
1103

    
1104
def CheckStorageTypeEnabled(cluster, storage_type):
1105
  """Helper function to check if a storage type is enabled.
1106

1107
  @type cluster: C{objects.Cluster}
1108
  @param cluster: the cluster's configuration
1109
  @type storage_type: str
1110
  @param storage_type: the storage type to be checked
1111

1112
  """
1113
  assert storage_type is not None
1114
  assert storage_type in constants.STORAGE_TYPES
1115
  # special case for lvm-pv, because it cannot be enabled
1116
  # via disk templates
1117
  if storage_type == constants.ST_LVM_PV:
1118
    CheckStorageTypeEnabled(cluster, constants.ST_LVM_VG)
1119
  else:
1120
    possible_disk_templates = \
1121
        utils.storage.GetDiskTemplatesOfStorageTypes(storage_type)
1122
    for disk_template in possible_disk_templates:
1123
      if disk_template in cluster.enabled_disk_templates:
1124
        return
1125
    raise errors.OpPrereqError("No disk template of storage type '%s' is"
1126
                               " enabled in this cluster. Enabled disk"
1127
                               " templates are: %s" % (storage_type,
1128
                               ",".join(cluster.enabled_disk_templates)))
1129

    
1130

    
1131
def CheckIpolicyVsDiskTemplates(ipolicy, enabled_disk_templates):
1132
  """Checks ipolicy disk templates against enabled disk tempaltes.
1133

1134
  @type ipolicy: dict
1135
  @param ipolicy: the new ipolicy
1136
  @type enabled_disk_templates: list of string
1137
  @param enabled_disk_templates: list of enabled disk templates on the
1138
    cluster
1139
  @raises errors.OpPrereqError: if there is at least one allowed disk
1140
    template that is not also enabled.
1141

1142
  """
1143
  assert constants.IPOLICY_DTS in ipolicy
1144
  allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
1145
  not_enabled = set(allowed_disk_templates) - set(enabled_disk_templates)
1146
  if not_enabled:
1147
    raise errors.OpPrereqError("The following disk template are allowed"
1148
                               " by the ipolicy, but not enabled on the"
1149
                               " cluster: %s" % utils.CommaJoin(not_enabled))
1150

    
1151

    
1152
def CheckDiskAccessModeValidity(parameters):
1153
  """Checks if the access parameter is legal.
1154

1155
  @see: L{CheckDiskAccessModeConsistency} for cluster consistency checks.
1156
  @raise errors.OpPrereqError: if the check fails.
1157

1158
  """
1159
  for disk_template in parameters:
1160
    access = parameters[disk_template].get(constants.LDP_ACCESS,
1161
                                           constants.DISK_KERNELSPACE)
1162
    if access not in constants.DISK_VALID_ACCESS_MODES:
1163
      valid_vals_str = utils.CommaJoin(constants.DISK_VALID_ACCESS_MODES)
1164
      raise errors.OpPrereqError("Invalid value of '{d}:{a}': '{v}' (expected"
1165
                                 " one of {o})".format(d=disk_template,
1166
                                                       a=constants.LDP_ACCESS,
1167
                                                       v=access,
1168
                                                       o=valid_vals_str))
1169

    
1170

    
1171
def CheckDiskAccessModeConsistency(parameters, cfg, group=None):
1172
  """Checks if the access param is consistent with the cluster configuration.
1173

1174
  @note: requires a configuration lock to run.
1175
  @param parameters: the parameters to validate
1176
  @param cfg: the cfg object of the cluster
1177
  @param group: if set, only check for consistency within this group.
1178
  @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1179
                               to an invalid value, such as "pink bunny".
1180
  @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1181
                               to an inconsistent value, such as asking for RBD
1182
                               userspace access to the chroot hypervisor.
1183

1184
  """
1185
  CheckDiskAccessModeValidity(parameters)
1186

    
1187
  for disk_template in parameters:
1188
    access = parameters[disk_template].get(constants.LDP_ACCESS,
1189
                                           constants.DISK_KERNELSPACE)
1190

    
1191
    if disk_template not in constants.DTS_HAVE_ACCESS:
1192
      continue
1193

    
1194
    #Check the combination of instance hypervisor, disk template and access
1195
    #protocol is sane.
1196
    inst_uuids = cfg.GetNodeGroupInstances(group) if group else \
1197
                 cfg.GetInstanceList()
1198

    
1199
    for entry in inst_uuids:
1200
      inst = cfg.GetInstanceInfo(entry)
1201
      hv = inst.hypervisor
1202
      dt = inst.disk_template
1203

    
1204
      if not IsValidDiskAccessModeCombination(hv, dt, access):
1205
        raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access"
1206
                                   " setting with {h} hypervisor and {d} disk"
1207
                                   " type.".format(i=inst.name,
1208
                                                   a=access,
1209
                                                   h=hv,
1210
                                                   d=dt))
1211

    
1212

    
1213
def IsValidDiskAccessModeCombination(hv, disk_template, mode):
1214
  """Checks if an hypervisor can read a disk template with given mode.
1215

1216
  @param hv: the hypervisor that will access the data
1217
  @param disk_template: the disk template the data is stored as
1218
  @param mode: how the hypervisor should access the data
1219
  @return: True if the hypervisor can read a given read disk_template
1220
           in the specified mode.
1221

1222
  """
1223
  if mode == constants.DISK_KERNELSPACE:
1224
    return True
1225

    
1226
  if (hv == constants.HT_KVM and
1227
      disk_template in (constants.DT_RBD, constants.DT_GLUSTER) and
1228
      mode == constants.DISK_USERSPACE):
1229
    return True
1230

    
1231
  # Everything else:
1232
  return False
1233

    
1234

    
1235
def AddNodeCertToCandidateCerts(lu, node_uuid, cluster):
1236
  """Add the node's client SSL certificate digest to the candidate certs.
1237

1238
  @type node_uuid: string
1239
  @param node_uuid: the node's UUID
1240
  @type cluster: C{object.Cluster}
1241
  @param cluster: the cluster's configuration
1242

1243
  """
1244
  result = lu.rpc.call_node_crypto_tokens(
1245
             node_uuid,
1246
             [(constants.CRYPTO_TYPE_SSL_DIGEST, constants.CRYPTO_ACTION_GET,
1247
               None)])
1248
  result.Raise("Could not retrieve the node's (uuid %s) SSL digest."
1249
               % node_uuid)
1250
  ((crypto_type, digest), ) = result.payload
1251
  assert crypto_type == constants.CRYPTO_TYPE_SSL_DIGEST
1252

    
1253
  utils.AddNodeToCandidateCerts(node_uuid, digest, cluster.candidate_certs)
1254

    
1255

    
1256
def RemoveNodeCertFromCandidateCerts(node_uuid, cluster):
1257
  """Removes the node's certificate from the candidate certificates list.
1258

1259
  @type node_uuid: string
1260
  @param node_uuid: the node's UUID
1261
  @type cluster: C{objects.Cluster}
1262
  @param cluster: the cluster's configuration
1263

1264
  """
1265
  utils.RemoveNodeFromCandidateCerts(node_uuid, cluster.candidate_certs)
1266

    
1267

    
1268
def CreateNewClientCert(lu, node_uuid, filename=None):
1269
  """Creates a new client SSL certificate for the node.
1270

1271
  @type node_uuid: string
1272
  @param node_uuid: the node's UUID
1273
  @type filename: string
1274
  @param filename: the certificate's filename
1275
  @rtype: string
1276
  @return: the digest of the newly created certificate
1277

1278
  """
1279
  options = {}
1280
  if filename:
1281
    options[constants.CRYPTO_OPTION_CERT_FILE] = filename
1282
  options[constants.CRYPTO_OPTION_SERIAL_NO] = utils.UuidToInt(node_uuid)
1283
  result = lu.rpc.call_node_crypto_tokens(
1284
             node_uuid,
1285
             [(constants.CRYPTO_TYPE_SSL_DIGEST,
1286
               constants.CRYPTO_ACTION_CREATE,
1287
               options)])
1288
  result.Raise("Could not create the node's (uuid %s) SSL client"
1289
               " certificate." % node_uuid)
1290
  ((crypto_type, new_digest), ) = result.payload
1291
  assert crypto_type == constants.CRYPTO_TYPE_SSL_DIGEST
1292
  return new_digest
1293

    
1294

    
1295
def AddInstanceCommunicationNetworkOp(network):
1296
  """Create an OpCode that adds the instance communication network.
1297

1298
  This OpCode contains the configuration necessary for the instance
1299
  communication network.
1300

1301
  @type network: string
1302
  @param network: name or UUID of the instance communication network
1303

1304
  @rtype: L{ganeti.opcodes.OpCode}
1305
  @return: OpCode that creates the instance communication network
1306

1307
  """
1308
  return opcodes.OpNetworkAdd(
1309
    network_name=network,
1310
    gateway=None,
1311
    network=constants.INSTANCE_COMMUNICATION_NETWORK4,
1312
    gateway6=None,
1313
    network6=constants.INSTANCE_COMMUNICATION_NETWORK6,
1314
    mac_prefix=constants.INSTANCE_COMMUNICATION_MAC_PREFIX,
1315
    add_reserved_ips=None,
1316
    conflicts_check=True,
1317
    tags=[])
1318

    
1319

    
1320
def ConnectInstanceCommunicationNetworkOp(group_uuid, network):
1321
  """Create an OpCode that connects a group to the instance
1322
  communication network.
1323

1324
  This OpCode contains the configuration necessary for the instance
1325
  communication network.
1326

1327
  @type group_uuid: string
1328
  @param group_uuid: UUID of the group to connect
1329

1330
  @type network: string
1331
  @param network: name or UUID of the network to connect to, i.e., the
1332
                  instance communication network
1333

1334
  @rtype: L{ganeti.opcodes.OpCode}
1335
  @return: OpCode that connects the group to the instance
1336
           communication network
1337

1338
  """
1339
  return opcodes.OpNetworkConnect(
1340
    group_name=group_uuid,
1341
    network_name=network,
1342
    network_mode=constants.INSTANCE_COMMUNICATION_NETWORK_MODE,
1343
    network_link=constants.INSTANCE_COMMUNICATION_NETWORK_LINK,
1344
    conflicts_check=True)