Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / common.py @ 1c4910f7

History | View | Annotate | Download (47.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Common functions used by multiple logical units."""
23

    
24
import copy
25
import os
26

    
27
from ganeti import compat
28
from ganeti import constants
29
from ganeti import errors
30
from ganeti import hypervisor
31
from ganeti import locking
32
from ganeti import objects
33
from ganeti import opcodes
34
from ganeti import pathutils
35
from ganeti.serializer import Private
36
import ganeti.rpc.node as rpc
37
from ganeti import ssconf
38
from ganeti import utils
39

    
40

    
41
# States of instance
42
INSTANCE_DOWN = [constants.ADMINST_DOWN]
43
INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
44
INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
45

    
46
#: Instance status in which an instance can be marked as offline/online
47
CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
48
  constants.ADMINST_OFFLINE,
49
  ]))
50

    
51

    
52
def _ExpandItemName(expand_fn, name, kind):
53
  """Expand an item name.
54

55
  @param expand_fn: the function to use for expansion
56
  @param name: requested item name
57
  @param kind: text description ('Node' or 'Instance')
58
  @return: the result of the expand_fn, if successful
59
  @raise errors.OpPrereqError: if the item is not found
60

61
  """
62
  (uuid, full_name) = expand_fn(name)
63
  if uuid is None or full_name is None:
64
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
65
                               errors.ECODE_NOENT)
66
  return (uuid, full_name)
67

    
68

    
69
def ExpandInstanceUuidAndName(cfg, expected_uuid, name):
70
  """Wrapper over L{_ExpandItemName} for instance."""
71
  (uuid, full_name) = _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
72
  if expected_uuid is not None and uuid != expected_uuid:
73
    raise errors.OpPrereqError(
74
      "The instances UUID '%s' does not match the expected UUID '%s' for"
75
      " instance '%s'. Maybe the instance changed since you submitted this"
76
      " job." % (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
77
  return (uuid, full_name)
78

    
79

    
80
def ExpandNodeUuidAndName(cfg, expected_uuid, name):
81
  """Expand a short node name into the node UUID and full name.
82

83
  @type cfg: L{config.ConfigWriter}
84
  @param cfg: The cluster configuration
85
  @type expected_uuid: string
86
  @param expected_uuid: expected UUID for the node (or None if there is no
87
        expectation). If it does not match, a L{errors.OpPrereqError} is
88
        raised.
89
  @type name: string
90
  @param name: the short node name
91

92
  """
93
  (uuid, full_name) = _ExpandItemName(cfg.ExpandNodeName, name, "Node")
94
  if expected_uuid is not None and uuid != expected_uuid:
95
    raise errors.OpPrereqError(
96
      "The nodes UUID '%s' does not match the expected UUID '%s' for node"
97
      " '%s'. Maybe the node changed since you submitted this job." %
98
      (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
99
  return (uuid, full_name)
100

    
101

    
102
def ShareAll():
103
  """Returns a dict declaring all lock levels shared.
104

105
  """
106
  return dict.fromkeys(locking.LEVELS, 1)
107

    
108

    
109
def CheckNodeGroupInstances(cfg, group_uuid, owned_instance_names):
110
  """Checks if the instances in a node group are still correct.
111

112
  @type cfg: L{config.ConfigWriter}
113
  @param cfg: The cluster configuration
114
  @type group_uuid: string
115
  @param group_uuid: Node group UUID
116
  @type owned_instance_names: set or frozenset
117
  @param owned_instance_names: List of currently owned instances
118

119
  """
120
  wanted_instances = frozenset(cfg.GetInstanceNames(
121
                                 cfg.GetNodeGroupInstances(group_uuid)))
122
  if owned_instance_names != wanted_instances:
123
    raise errors.OpPrereqError("Instances in node group '%s' changed since"
124
                               " locks were acquired, wanted '%s', have '%s';"
125
                               " retry the operation" %
126
                               (group_uuid,
127
                                utils.CommaJoin(wanted_instances),
128
                                utils.CommaJoin(owned_instance_names)),
129
                               errors.ECODE_STATE)
130

    
131
  return wanted_instances
132

    
133

    
134
def GetWantedNodes(lu, short_node_names):
135
  """Returns list of checked and expanded node names.
136

137
  @type lu: L{LogicalUnit}
138
  @param lu: the logical unit on whose behalf we execute
139
  @type short_node_names: list
140
  @param short_node_names: list of node names or None for all nodes
141
  @rtype: tuple of lists
142
  @return: tupe with (list of node UUIDs, list of node names)
143
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
144

145
  """
146
  if short_node_names:
147
    node_uuids = [ExpandNodeUuidAndName(lu.cfg, None, name)[0]
148
                  for name in short_node_names]
149
  else:
150
    node_uuids = lu.cfg.GetNodeList()
151

    
152
  return (node_uuids, [lu.cfg.GetNodeName(uuid) for uuid in node_uuids])
153

    
154

    
155
def GetWantedInstances(lu, short_inst_names):
156
  """Returns list of checked and expanded instance names.
157

158
  @type lu: L{LogicalUnit}
159
  @param lu: the logical unit on whose behalf we execute
160
  @type short_inst_names: list
161
  @param short_inst_names: list of instance names or None for all instances
162
  @rtype: tuple of lists
163
  @return: tuple of (instance UUIDs, instance names)
164
  @raise errors.OpPrereqError: if the instances parameter is wrong type
165
  @raise errors.OpPrereqError: if any of the passed instances is not found
166

167
  """
168
  if short_inst_names:
169
    inst_uuids = [ExpandInstanceUuidAndName(lu.cfg, None, name)[0]
170
                  for name in short_inst_names]
171
  else:
172
    inst_uuids = lu.cfg.GetInstanceList()
173
  return (inst_uuids, [lu.cfg.GetInstanceName(uuid) for uuid in inst_uuids])
174

    
175

    
176
def RunPostHook(lu, node_name):
177
  """Runs the post-hook for an opcode on a single node.
178

179
  """
180
  hm = lu.proc.BuildHooksManager(lu)
181
  try:
182
    hm.RunPhase(constants.HOOKS_PHASE_POST, node_names=[node_name])
183
  except Exception, err: # pylint: disable=W0703
184
    lu.LogWarning("Errors occurred running hooks on %s: %s",
185
                  node_name, err)
186

    
187

    
188
def RedistributeAncillaryFiles(lu):
189
  """Distribute additional files which are part of the cluster configuration.
190

191
  ConfigWriter takes care of distributing the config and ssconf files, but
192
  there are more files which should be distributed to all nodes. This function
193
  makes sure those are copied.
194

195
  """
196
  # Gather target nodes
197
  cluster = lu.cfg.GetClusterInfo()
198
  master_info = lu.cfg.GetMasterNodeInfo()
199

    
200
  online_node_uuids = lu.cfg.GetOnlineNodeList()
201
  online_node_uuid_set = frozenset(online_node_uuids)
202
  vm_node_uuids = list(online_node_uuid_set.intersection(
203
                         lu.cfg.GetVmCapableNodeList()))
204

    
205
  # Never distribute to master node
206
  for node_uuids in [online_node_uuids, vm_node_uuids]:
207
    if master_info.uuid in node_uuids:
208
      node_uuids.remove(master_info.uuid)
209

    
210
  # Gather file lists
211
  (files_all, _, files_mc, files_vm) = \
212
    ComputeAncillaryFiles(cluster, True)
213

    
214
  # Never re-distribute configuration file from here
215
  assert not (pathutils.CLUSTER_CONF_FILE in files_all or
216
              pathutils.CLUSTER_CONF_FILE in files_vm)
217
  assert not files_mc, "Master candidates not handled in this function"
218

    
219
  filemap = [
220
    (online_node_uuids, files_all),
221
    (vm_node_uuids, files_vm),
222
    ]
223

    
224
  # Upload the files
225
  for (node_uuids, files) in filemap:
226
    for fname in files:
227
      UploadHelper(lu, node_uuids, fname)
228

    
229

    
230
def ComputeAncillaryFiles(cluster, redist):
231
  """Compute files external to Ganeti which need to be consistent.
232

233
  @type redist: boolean
234
  @param redist: Whether to include files which need to be redistributed
235

236
  """
237
  # Compute files for all nodes
238
  files_all = set([
239
    pathutils.SSH_KNOWN_HOSTS_FILE,
240
    pathutils.CONFD_HMAC_KEY,
241
    pathutils.CLUSTER_DOMAIN_SECRET_FILE,
242
    pathutils.SPICE_CERT_FILE,
243
    pathutils.SPICE_CACERT_FILE,
244
    pathutils.RAPI_USERS_FILE,
245
    ])
246

    
247
  if redist:
248
    # we need to ship at least the RAPI certificate
249
    files_all.add(pathutils.RAPI_CERT_FILE)
250
  else:
251
    files_all.update(pathutils.ALL_CERT_FILES)
252
    files_all.update(ssconf.SimpleStore().GetFileList())
253

    
254
  if cluster.modify_etc_hosts:
255
    files_all.add(pathutils.ETC_HOSTS)
256

    
257
  if cluster.use_external_mip_script:
258
    files_all.add(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
259

    
260
  # Files which are optional, these must:
261
  # - be present in one other category as well
262
  # - either exist or not exist on all nodes of that category (mc, vm all)
263
  files_opt = set([
264
    pathutils.RAPI_USERS_FILE,
265
    ])
266

    
267
  # Files which should only be on master candidates
268
  files_mc = set()
269

    
270
  if not redist:
271
    files_mc.add(pathutils.CLUSTER_CONF_FILE)
272

    
273
  # File storage
274
  if (not redist and (cluster.IsFileStorageEnabled() or
275
                        cluster.IsSharedFileStorageEnabled())):
276
    files_all.add(pathutils.FILE_STORAGE_PATHS_FILE)
277
    files_opt.add(pathutils.FILE_STORAGE_PATHS_FILE)
278

    
279
  # Files which should only be on VM-capable nodes
280
  files_vm = set(
281
    filename
282
    for hv_name in cluster.enabled_hypervisors
283
    for filename in
284
    hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
285

    
286
  files_opt |= set(
287
    filename
288
    for hv_name in cluster.enabled_hypervisors
289
    for filename in
290
    hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
291

    
292
  # Filenames in each category must be unique
293
  all_files_set = files_all | files_mc | files_vm
294
  assert (len(all_files_set) ==
295
          sum(map(len, [files_all, files_mc, files_vm]))), \
296
    "Found file listed in more than one file list"
297

    
298
  # Optional files must be present in one other category
299
  assert all_files_set.issuperset(files_opt), \
300
    "Optional file not in a different required list"
301

    
302
  # This one file should never ever be re-distributed via RPC
303
  assert not (redist and
304
              pathutils.FILE_STORAGE_PATHS_FILE in all_files_set)
305

    
306
  return (files_all, files_opt, files_mc, files_vm)
307

    
308

    
309
def UploadHelper(lu, node_uuids, fname):
310
  """Helper for uploading a file and showing warnings.
311

312
  """
313
  if os.path.exists(fname):
314
    result = lu.rpc.call_upload_file(node_uuids, fname)
315
    for to_node_uuids, to_result in result.items():
316
      msg = to_result.fail_msg
317
      if msg:
318
        msg = ("Copy of file %s to node %s failed: %s" %
319
               (fname, lu.cfg.GetNodeName(to_node_uuids), msg))
320
        lu.LogWarning(msg)
321

    
322

    
323
def MergeAndVerifyHvState(op_input, obj_input):
324
  """Combines the hv state from an opcode with the one of the object
325

326
  @param op_input: The input dict from the opcode
327
  @param obj_input: The input dict from the objects
328
  @return: The verified and updated dict
329

330
  """
331
  if op_input:
332
    invalid_hvs = set(op_input) - constants.HYPER_TYPES
333
    if invalid_hvs:
334
      raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
335
                                 " %s" % utils.CommaJoin(invalid_hvs),
336
                                 errors.ECODE_INVAL)
337
    if obj_input is None:
338
      obj_input = {}
339
    type_check = constants.HVSTS_PARAMETER_TYPES
340
    return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
341

    
342
  return None
343

    
344

    
345
def MergeAndVerifyDiskState(op_input, obj_input):
346
  """Combines the disk state from an opcode with the one of the object
347

348
  @param op_input: The input dict from the opcode
349
  @param obj_input: The input dict from the objects
350
  @return: The verified and updated dict
351
  """
352
  if op_input:
353
    invalid_dst = set(op_input) - constants.DS_VALID_TYPES
354
    if invalid_dst:
355
      raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
356
                                 utils.CommaJoin(invalid_dst),
357
                                 errors.ECODE_INVAL)
358
    type_check = constants.DSS_PARAMETER_TYPES
359
    if obj_input is None:
360
      obj_input = {}
361
    return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
362
                                              type_check))
363
                for key, value in op_input.items())
364

    
365
  return None
366

    
367

    
368
def CheckOSParams(lu, required, node_uuids, osname, osparams):
369
  """OS parameters validation.
370

371
  @type lu: L{LogicalUnit}
372
  @param lu: the logical unit for which we check
373
  @type required: boolean
374
  @param required: whether the validation should fail if the OS is not
375
      found
376
  @type node_uuids: list
377
  @param node_uuids: the list of nodes on which we should check
378
  @type osname: string
379
  @param osname: the name of the OS we should use
380
  @type osparams: dict
381
  @param osparams: the parameters which we need to check
382
  @raise errors.OpPrereqError: if the parameters are not valid
383

384
  """
385
  node_uuids = _FilterVmNodes(lu, node_uuids)
386

    
387
  # Last chance to unwrap private elements.
388
  for key in osparams:
389
    if isinstance(osparams[key], Private):
390
      osparams[key] = osparams[key].Get()
391

    
392
  if osname:
393
    result = lu.rpc.call_os_validate(node_uuids, required, osname,
394
                                     [constants.OS_VALIDATE_PARAMETERS],
395
                                     osparams)
396
    for node_uuid, nres in result.items():
397
      # we don't check for offline cases since this should be run only
398
      # against the master node and/or an instance's nodes
399
      nres.Raise("OS Parameters validation failed on node %s" %
400
                 lu.cfg.GetNodeName(node_uuid))
401
      if not nres.payload:
402
        lu.LogInfo("OS %s not found on node %s, validation skipped",
403
                   osname, lu.cfg.GetNodeName(node_uuid))
404

    
405

    
406
def CheckOSImage(op):
407
  """Checks if the OS image in the OS parameters of an opcode is
408
  valid.
409

410
  This function can also be used in LUs as they carry an opcode.
411

412
  @type op: L{opcodes.OpCode}
413
  @param op: opcode containing the OS params
414

415
  @rtype: string or NoneType
416
  @return:
417
    None if the OS parameters in the opcode do not contain the OS
418
    image, otherwise the OS image value contained in the OS parameters
419
  @raise errors.OpPrereqError: if OS image is not a URL or an absolute path
420

421
  """
422
  os_image = objects.GetOSImage(op.osparams)
423

    
424
  if os_image is None:
425
    return None
426
  elif utils.IsUrl(os_image) or os.path.exists(os_image):
427
    return os_image
428
  else:
429
    raise errors.OpPrereqError("OS image must be a URL or an absolute path")
430

    
431

    
432
def CheckHVParams(lu, node_uuids, hvname, hvparams):
433
  """Hypervisor parameter validation.
434

435
  This function abstract the hypervisor parameter validation to be
436
  used in both instance create and instance modify.
437

438
  @type lu: L{LogicalUnit}
439
  @param lu: the logical unit for which we check
440
  @type node_uuids: list
441
  @param node_uuids: the list of nodes on which we should check
442
  @type hvname: string
443
  @param hvname: the name of the hypervisor we should use
444
  @type hvparams: dict
445
  @param hvparams: the parameters which we need to check
446
  @raise errors.OpPrereqError: if the parameters are not valid
447

448
  """
449
  node_uuids = _FilterVmNodes(lu, node_uuids)
450

    
451
  cluster = lu.cfg.GetClusterInfo()
452
  hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
453

    
454
  hvinfo = lu.rpc.call_hypervisor_validate_params(node_uuids, hvname, hvfull)
455
  for node_uuid in node_uuids:
456
    info = hvinfo[node_uuid]
457
    if info.offline:
458
      continue
459
    info.Raise("Hypervisor parameter validation failed on node %s" %
460
               lu.cfg.GetNodeName(node_uuid))
461

    
462

    
463
def AdjustCandidatePool(lu, exceptions, feedback_fn):
464
  """Adjust the candidate pool after node operations.
465

466
  """
467
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
468
  if mod_list:
469
    lu.LogInfo("Promoted nodes to master candidate role: %s",
470
               utils.CommaJoin(node.name for node in mod_list))
471
    for node in mod_list:
472
      lu.context.ReaddNode(node)
473
      cluster = lu.cfg.GetClusterInfo()
474
      AddNodeCertToCandidateCerts(lu, node.uuid, cluster)
475
      lu.cfg.Update(cluster, feedback_fn)
476
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
477
  if mc_now > mc_max:
478
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
479
               (mc_now, mc_max))
480

    
481

    
482
def CheckNodePVs(nresult, exclusive_storage):
483
  """Check node PVs.
484

485
  """
486
  pvlist_dict = nresult.get(constants.NV_PVLIST, None)
487
  if pvlist_dict is None:
488
    return (["Can't get PV list from node"], None)
489
  pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
490
  errlist = []
491
  # check that ':' is not present in PV names, since it's a
492
  # special character for lvcreate (denotes the range of PEs to
493
  # use on the PV)
494
  for pv in pvlist:
495
    if ":" in pv.name:
496
      errlist.append("Invalid character ':' in PV '%s' of VG '%s'" %
497
                     (pv.name, pv.vg_name))
498
  es_pvinfo = None
499
  if exclusive_storage:
500
    (errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist)
501
    errlist.extend(errmsgs)
502
    shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None)
503
    if shared_pvs:
504
      for (pvname, lvlist) in shared_pvs:
505
        # TODO: Check that LVs are really unrelated (snapshots, DRBD meta...)
506
        errlist.append("PV %s is shared among unrelated LVs (%s)" %
507
                       (pvname, utils.CommaJoin(lvlist)))
508
  return (errlist, es_pvinfo)
509

    
510

    
511
def _ComputeMinMaxSpec(name, qualifier, ispecs, value):
512
  """Computes if value is in the desired range.
513

514
  @param name: name of the parameter for which we perform the check
515
  @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
516
      not just 'disk')
517
  @param ispecs: dictionary containing min and max values
518
  @param value: actual value that we want to use
519
  @return: None or an error string
520

521
  """
522
  if value in [None, constants.VALUE_AUTO]:
523
    return None
524
  max_v = ispecs[constants.ISPECS_MAX].get(name, value)
525
  min_v = ispecs[constants.ISPECS_MIN].get(name, value)
526
  if value > max_v or min_v > value:
527
    if qualifier:
528
      fqn = "%s/%s" % (name, qualifier)
529
    else:
530
      fqn = name
531
    return ("%s value %s is not in range [%s, %s]" %
532
            (fqn, value, min_v, max_v))
533
  return None
534

    
535

    
536
def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
537
                                nic_count, disk_sizes, spindle_use,
538
                                disk_template,
539
                                _compute_fn=_ComputeMinMaxSpec):
540
  """Verifies ipolicy against provided specs.
541

542
  @type ipolicy: dict
543
  @param ipolicy: The ipolicy
544
  @type mem_size: int
545
  @param mem_size: The memory size
546
  @type cpu_count: int
547
  @param cpu_count: Used cpu cores
548
  @type disk_count: int
549
  @param disk_count: Number of disks used
550
  @type nic_count: int
551
  @param nic_count: Number of nics used
552
  @type disk_sizes: list of ints
553
  @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
554
  @type spindle_use: int
555
  @param spindle_use: The number of spindles this instance uses
556
  @type disk_template: string
557
  @param disk_template: The disk template of the instance
558
  @param _compute_fn: The compute function (unittest only)
559
  @return: A list of violations, or an empty list of no violations are found
560

561
  """
562
  assert disk_count == len(disk_sizes)
563

    
564
  test_settings = [
565
    (constants.ISPEC_MEM_SIZE, "", mem_size),
566
    (constants.ISPEC_CPU_COUNT, "", cpu_count),
567
    (constants.ISPEC_NIC_COUNT, "", nic_count),
568
    (constants.ISPEC_SPINDLE_USE, "", spindle_use),
569
    ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
570
         for idx, d in enumerate(disk_sizes)]
571
  if disk_template != constants.DT_DISKLESS:
572
    # This check doesn't make sense for diskless instances
573
    test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
574
  ret = []
575
  allowed_dts = ipolicy[constants.IPOLICY_DTS]
576
  if disk_template not in allowed_dts:
577
    ret.append("Disk template %s is not allowed (allowed templates: %s)" %
578
               (disk_template, utils.CommaJoin(allowed_dts)))
579

    
580
  min_errs = None
581
  for minmax in ipolicy[constants.ISPECS_MINMAX]:
582
    errs = filter(None,
583
                  (_compute_fn(name, qualifier, minmax, value)
584
                   for (name, qualifier, value) in test_settings))
585
    if min_errs is None or len(errs) < len(min_errs):
586
      min_errs = errs
587
  assert min_errs is not None
588
  return ret + min_errs
589

    
590

    
591
def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
592
                                    _compute_fn=ComputeIPolicySpecViolation):
593
  """Compute if instance meets the specs of ipolicy.
594

595
  @type ipolicy: dict
596
  @param ipolicy: The ipolicy to verify against
597
  @type instance: L{objects.Instance}
598
  @param instance: The instance to verify
599
  @type cfg: L{config.ConfigWriter}
600
  @param cfg: Cluster configuration
601
  @param _compute_fn: The function to verify ipolicy (unittest only)
602
  @see: L{ComputeIPolicySpecViolation}
603

604
  """
605
  ret = []
606
  be_full = cfg.GetClusterInfo().FillBE(instance)
607
  mem_size = be_full[constants.BE_MAXMEM]
608
  cpu_count = be_full[constants.BE_VCPUS]
609
  es_flags = rpc.GetExclusiveStorageForNodes(cfg, instance.all_nodes)
610
  if any(es_flags.values()):
611
    # With exclusive storage use the actual spindles
612
    try:
613
      spindle_use = sum([disk.spindles for disk in instance.disks])
614
    except TypeError:
615
      ret.append("Number of spindles not configured for disks of instance %s"
616
                 " while exclusive storage is enabled, try running gnt-cluster"
617
                 " repair-disk-sizes" % instance.name)
618
      # _ComputeMinMaxSpec ignores 'None's
619
      spindle_use = None
620
  else:
621
    spindle_use = be_full[constants.BE_SPINDLE_USE]
622
  disk_count = len(instance.disks)
623
  disk_sizes = [disk.size for disk in instance.disks]
624
  nic_count = len(instance.nics)
625
  disk_template = instance.disk_template
626

    
627
  return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
628
                           disk_sizes, spindle_use, disk_template)
629

    
630

    
631
def _ComputeViolatingInstances(ipolicy, instances, cfg):
632
  """Computes a set of instances who violates given ipolicy.
633

634
  @param ipolicy: The ipolicy to verify
635
  @type instances: L{objects.Instance}
636
  @param instances: List of instances to verify
637
  @type cfg: L{config.ConfigWriter}
638
  @param cfg: Cluster configuration
639
  @return: A frozenset of instance names violating the ipolicy
640

641
  """
642
  return frozenset([inst.name for inst in instances
643
                    if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
644

    
645

    
646
def ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
647
  """Computes a set of any instances that would violate the new ipolicy.
648

649
  @param old_ipolicy: The current (still in-place) ipolicy
650
  @param new_ipolicy: The new (to become) ipolicy
651
  @param instances: List of instances to verify
652
  @type cfg: L{config.ConfigWriter}
653
  @param cfg: Cluster configuration
654
  @return: A list of instances which violates the new ipolicy but
655
      did not before
656

657
  """
658
  return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
659
          _ComputeViolatingInstances(old_ipolicy, instances, cfg))
660

    
661

    
662
def GetUpdatedParams(old_params, update_dict,
663
                      use_default=True, use_none=False):
664
  """Return the new version of a parameter dictionary.
665

666
  @type old_params: dict
667
  @param old_params: old parameters
668
  @type update_dict: dict
669
  @param update_dict: dict containing new parameter values, or
670
      constants.VALUE_DEFAULT to reset the parameter to its default
671
      value
672
  @param use_default: boolean
673
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
674
      values as 'to be deleted' values
675
  @param use_none: boolean
676
  @type use_none: whether to recognise C{None} values as 'to be
677
      deleted' values
678
  @rtype: dict
679
  @return: the new parameter dictionary
680

681
  """
682
  params_copy = copy.deepcopy(old_params)
683
  for key, val in update_dict.iteritems():
684
    if ((use_default and val == constants.VALUE_DEFAULT) or
685
          (use_none and val is None)):
686
      try:
687
        del params_copy[key]
688
      except KeyError:
689
        pass
690
    else:
691
      params_copy[key] = val
692
  return params_copy
693

    
694

    
695
def GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
696
  """Return the new version of an instance policy.
697

698
  @param group_policy: whether this policy applies to a group and thus
699
    we should support removal of policy entries
700

701
  """
702
  ipolicy = copy.deepcopy(old_ipolicy)
703
  for key, value in new_ipolicy.items():
704
    if key not in constants.IPOLICY_ALL_KEYS:
705
      raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
706
                                 errors.ECODE_INVAL)
707
    if (not value or value == [constants.VALUE_DEFAULT] or
708
            value == constants.VALUE_DEFAULT):
709
      if group_policy:
710
        if key in ipolicy:
711
          del ipolicy[key]
712
      else:
713
        raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
714
                                   " on the cluster'" % key,
715
                                   errors.ECODE_INVAL)
716
    else:
717
      if key in constants.IPOLICY_PARAMETERS:
718
        # FIXME: we assume all such values are float
719
        try:
720
          ipolicy[key] = float(value)
721
        except (TypeError, ValueError), err:
722
          raise errors.OpPrereqError("Invalid value for attribute"
723
                                     " '%s': '%s', error: %s" %
724
                                     (key, value, err), errors.ECODE_INVAL)
725
      elif key == constants.ISPECS_MINMAX:
726
        for minmax in value:
727
          for k in minmax.keys():
728
            utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
729
        ipolicy[key] = value
730
      elif key == constants.ISPECS_STD:
731
        if group_policy:
732
          msg = "%s cannot appear in group instance specs" % key
733
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
734
        ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value,
735
                                        use_none=False, use_default=False)
736
        utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
737
      else:
738
        # FIXME: we assume all others are lists; this should be redone
739
        # in a nicer way
740
        ipolicy[key] = list(value)
741
  try:
742
    objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
743
  except errors.ConfigurationError, err:
744
    raise errors.OpPrereqError("Invalid instance policy: %s" % err,
745
                               errors.ECODE_INVAL)
746
  return ipolicy
747

    
748

    
749
def AnnotateDiskParams(instance, devs, cfg):
750
  """Little helper wrapper to the rpc annotation method.
751

752
  @param instance: The instance object
753
  @type devs: List of L{objects.Disk}
754
  @param devs: The root devices (not any of its children!)
755
  @param cfg: The config object
756
  @returns The annotated disk copies
757
  @see L{rpc.node.AnnotateDiskParams}
758

759
  """
760
  return rpc.AnnotateDiskParams(devs, cfg.GetInstanceDiskParams(instance))
761

    
762

    
763
def SupportsOob(cfg, node):
764
  """Tells if node supports OOB.
765

766
  @type cfg: L{config.ConfigWriter}
767
  @param cfg: The cluster configuration
768
  @type node: L{objects.Node}
769
  @param node: The node
770
  @return: The OOB script if supported or an empty string otherwise
771

772
  """
773
  return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
774

    
775

    
776
def _UpdateAndVerifySubDict(base, updates, type_check):
777
  """Updates and verifies a dict with sub dicts of the same type.
778

779
  @param base: The dict with the old data
780
  @param updates: The dict with the new data
781
  @param type_check: Dict suitable to ForceDictType to verify correct types
782
  @returns: A new dict with updated and verified values
783

784
  """
785
  def fn(old, value):
786
    new = GetUpdatedParams(old, value)
787
    utils.ForceDictType(new, type_check)
788
    return new
789

    
790
  ret = copy.deepcopy(base)
791
  ret.update(dict((key, fn(base.get(key, {}), value))
792
                  for key, value in updates.items()))
793
  return ret
794

    
795

    
796
def _FilterVmNodes(lu, node_uuids):
797
  """Filters out non-vm_capable nodes from a list.
798

799
  @type lu: L{LogicalUnit}
800
  @param lu: the logical unit for which we check
801
  @type node_uuids: list
802
  @param node_uuids: the list of nodes on which we should check
803
  @rtype: list
804
  @return: the list of vm-capable nodes
805

806
  """
807
  vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
808
  return [uuid for uuid in node_uuids if uuid not in vm_nodes]
809

    
810

    
811
def GetDefaultIAllocator(cfg, ialloc):
812
  """Decides on which iallocator to use.
813

814
  @type cfg: L{config.ConfigWriter}
815
  @param cfg: Cluster configuration object
816
  @type ialloc: string or None
817
  @param ialloc: Iallocator specified in opcode
818
  @rtype: string
819
  @return: Iallocator name
820

821
  """
822
  if not ialloc:
823
    # Use default iallocator
824
    ialloc = cfg.GetDefaultIAllocator()
825

    
826
  if not ialloc:
827
    raise errors.OpPrereqError("No iallocator was specified, neither in the"
828
                               " opcode nor as a cluster-wide default",
829
                               errors.ECODE_INVAL)
830

    
831
  return ialloc
832

    
833

    
834
def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_node_uuids,
835
                             cur_group_uuid):
836
  """Checks if node groups for locked instances are still correct.
837

838
  @type cfg: L{config.ConfigWriter}
839
  @param cfg: Cluster configuration
840
  @type instances: dict; string as key, L{objects.Instance} as value
841
  @param instances: Dictionary, instance UUID as key, instance object as value
842
  @type owned_groups: iterable of string
843
  @param owned_groups: List of owned groups
844
  @type owned_node_uuids: iterable of string
845
  @param owned_node_uuids: List of owned nodes
846
  @type cur_group_uuid: string or None
847
  @param cur_group_uuid: Optional group UUID to check against instance's groups
848

849
  """
850
  for (uuid, inst) in instances.items():
851
    assert owned_node_uuids.issuperset(inst.all_nodes), \
852
      "Instance %s's nodes changed while we kept the lock" % inst.name
853

    
854
    inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
855

    
856
    assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
857
      "Instance %s has no node in group %s" % (inst.name, cur_group_uuid)
858

    
859

    
860
def CheckInstanceNodeGroups(cfg, inst_uuid, owned_groups, primary_only=False):
861
  """Checks if the owned node groups are still correct for an instance.
862

863
  @type cfg: L{config.ConfigWriter}
864
  @param cfg: The cluster configuration
865
  @type inst_uuid: string
866
  @param inst_uuid: Instance UUID
867
  @type owned_groups: set or frozenset
868
  @param owned_groups: List of currently owned node groups
869
  @type primary_only: boolean
870
  @param primary_only: Whether to check node groups for only the primary node
871

872
  """
873
  inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only)
874

    
875
  if not owned_groups.issuperset(inst_groups):
876
    raise errors.OpPrereqError("Instance %s's node groups changed since"
877
                               " locks were acquired, current groups are"
878
                               " are '%s', owning groups '%s'; retry the"
879
                               " operation" %
880
                               (cfg.GetInstanceName(inst_uuid),
881
                                utils.CommaJoin(inst_groups),
882
                                utils.CommaJoin(owned_groups)),
883
                               errors.ECODE_STATE)
884

    
885
  return inst_groups
886

    
887

    
888
def LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
889
  """Unpacks the result of change-group and node-evacuate iallocator requests.
890

891
  Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
892
  L{constants.IALLOCATOR_MODE_CHG_GROUP}.
893

894
  @type lu: L{LogicalUnit}
895
  @param lu: Logical unit instance
896
  @type alloc_result: tuple/list
897
  @param alloc_result: Result from iallocator
898
  @type early_release: bool
899
  @param early_release: Whether to release locks early if possible
900
  @type use_nodes: bool
901
  @param use_nodes: Whether to display node names instead of groups
902

903
  """
904
  (moved, failed, jobs) = alloc_result
905

    
906
  if failed:
907
    failreason = utils.CommaJoin("%s (%s)" % (name, reason)
908
                                 for (name, reason) in failed)
909
    lu.LogWarning("Unable to evacuate instances %s", failreason)
910
    raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
911

    
912
  if moved:
913
    lu.LogInfo("Instances to be moved: %s",
914
               utils.CommaJoin(
915
                 "%s (to %s)" %
916
                 (name, _NodeEvacDest(use_nodes, group, node_names))
917
                 for (name, group, node_names) in moved))
918

    
919
  return [map(compat.partial(_SetOpEarlyRelease, early_release),
920
              map(opcodes.OpCode.LoadOpCode, ops))
921
          for ops in jobs]
922

    
923

    
924
def _NodeEvacDest(use_nodes, group, node_names):
925
  """Returns group or nodes depending on caller's choice.
926

927
  """
928
  if use_nodes:
929
    return utils.CommaJoin(node_names)
930
  else:
931
    return group
932

    
933

    
934
def _SetOpEarlyRelease(early_release, op):
935
  """Sets C{early_release} flag on opcodes if available.
936

937
  """
938
  try:
939
    op.early_release = early_release
940
  except AttributeError:
941
    assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
942

    
943
  return op
944

    
945

    
946
def MapInstanceLvsToNodes(instances):
947
  """Creates a map from (node, volume) to instance name.
948

949
  @type instances: list of L{objects.Instance}
950
  @rtype: dict; tuple of (node uuid, volume name) as key, L{objects.Instance}
951
          object as value
952

953
  """
954
  return dict(((node_uuid, vol), inst)
955
              for inst in instances
956
              for (node_uuid, vols) in inst.MapLVsByNode().items()
957
              for vol in vols)
958

    
959

    
960
def CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
961
  """Make sure that none of the given paramters is global.
962

963
  If a global parameter is found, an L{errors.OpPrereqError} exception is
964
  raised. This is used to avoid setting global parameters for individual nodes.
965

966
  @type params: dictionary
967
  @param params: Parameters to check
968
  @type glob_pars: dictionary
969
  @param glob_pars: Forbidden parameters
970
  @type kind: string
971
  @param kind: Kind of parameters (e.g. "node")
972
  @type bad_levels: string
973
  @param bad_levels: Level(s) at which the parameters are forbidden (e.g.
974
      "instance")
975
  @type good_levels: strings
976
  @param good_levels: Level(s) at which the parameters are allowed (e.g.
977
      "cluster or group")
978

979
  """
980
  used_globals = glob_pars.intersection(params)
981
  if used_globals:
982
    msg = ("The following %s parameters are global and cannot"
983
           " be customized at %s level, please modify them at"
984
           " %s level: %s" %
985
           (kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
986
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
987

    
988

    
989
def IsExclusiveStorageEnabledNode(cfg, node):
990
  """Whether exclusive_storage is in effect for the given node.
991

992
  @type cfg: L{config.ConfigWriter}
993
  @param cfg: The cluster configuration
994
  @type node: L{objects.Node}
995
  @param node: The node
996
  @rtype: bool
997
  @return: The effective value of exclusive_storage
998

999
  """
1000
  return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
1001

    
1002

    
1003
def CheckInstanceState(lu, instance, req_states, msg=None):
1004
  """Ensure that an instance is in one of the required states.
1005

1006
  @param lu: the LU on behalf of which we make the check
1007
  @param instance: the instance to check
1008
  @param msg: if passed, should be a message to replace the default one
1009
  @raise errors.OpPrereqError: if the instance is not in the required state
1010

1011
  """
1012
  if msg is None:
1013
    msg = ("can't use instance from outside %s states" %
1014
           utils.CommaJoin(req_states))
1015
  if instance.admin_state not in req_states:
1016
    raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
1017
                               (instance.name, instance.admin_state, msg),
1018
                               errors.ECODE_STATE)
1019

    
1020
  if constants.ADMINST_UP not in req_states:
1021
    pnode_uuid = instance.primary_node
1022
    if not lu.cfg.GetNodeInfo(pnode_uuid).offline:
1023
      all_hvparams = lu.cfg.GetClusterInfo().hvparams
1024
      ins_l = lu.rpc.call_instance_list(
1025
                [pnode_uuid], [instance.hypervisor], all_hvparams)[pnode_uuid]
1026
      ins_l.Raise("Can't contact node %s for instance information" %
1027
                  lu.cfg.GetNodeName(pnode_uuid),
1028
                  prereq=True, ecode=errors.ECODE_ENVIRON)
1029
      if instance.name in ins_l.payload:
1030
        raise errors.OpPrereqError("Instance %s is running, %s" %
1031
                                   (instance.name, msg), errors.ECODE_STATE)
1032
    else:
1033
      lu.LogWarning("Primary node offline, ignoring check that instance"
1034
                     " is down")
1035

    
1036

    
1037
def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1038
  """Check the sanity of iallocator and node arguments and use the
1039
  cluster-wide iallocator if appropriate.
1040

1041
  Check that at most one of (iallocator, node) is specified. If none is
1042
  specified, or the iallocator is L{constants.DEFAULT_IALLOCATOR_SHORTCUT},
1043
  then the LU's opcode's iallocator slot is filled with the cluster-wide
1044
  default iallocator.
1045

1046
  @type iallocator_slot: string
1047
  @param iallocator_slot: the name of the opcode iallocator slot
1048
  @type node_slot: string
1049
  @param node_slot: the name of the opcode target node slot
1050

1051
  """
1052
  node = getattr(lu.op, node_slot, None)
1053
  ialloc = getattr(lu.op, iallocator_slot, None)
1054
  if node == []:
1055
    node = None
1056

    
1057
  if node is not None and ialloc is not None:
1058
    raise errors.OpPrereqError("Do not specify both, iallocator and node",
1059
                               errors.ECODE_INVAL)
1060
  elif ((node is None and ialloc is None) or
1061
        ialloc == constants.DEFAULT_IALLOCATOR_SHORTCUT):
1062
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1063
    if default_iallocator:
1064
      setattr(lu.op, iallocator_slot, default_iallocator)
1065
    else:
1066
      raise errors.OpPrereqError("No iallocator or node given and no"
1067
                                 " cluster-wide default iallocator found;"
1068
                                 " please specify either an iallocator or a"
1069
                                 " node, or set a cluster-wide default"
1070
                                 " iallocator", errors.ECODE_INVAL)
1071

    
1072

    
1073
def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq):
1074
  faulty = []
1075

    
1076
  result = rpc_runner.call_blockdev_getmirrorstatus(
1077
             node_uuid, (instance.disks, instance))
1078
  result.Raise("Failed to get disk status from node %s" %
1079
               cfg.GetNodeName(node_uuid),
1080
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1081

    
1082
  for idx, bdev_status in enumerate(result.payload):
1083
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1084
      faulty.append(idx)
1085

    
1086
  return faulty
1087

    
1088

    
1089
def CheckNodeOnline(lu, node_uuid, msg=None):
1090
  """Ensure that a given node is online.
1091

1092
  @param lu: the LU on behalf of which we make the check
1093
  @param node_uuid: the node to check
1094
  @param msg: if passed, should be a message to replace the default one
1095
  @raise errors.OpPrereqError: if the node is offline
1096

1097
  """
1098
  if msg is None:
1099
    msg = "Can't use offline node"
1100
  if lu.cfg.GetNodeInfo(node_uuid).offline:
1101
    raise errors.OpPrereqError("%s: %s" % (msg, lu.cfg.GetNodeName(node_uuid)),
1102
                               errors.ECODE_STATE)
1103

    
1104

    
1105
def CheckDiskTemplateEnabled(cluster, disk_template):
1106
  """Helper function to check if a disk template is enabled.
1107

1108
  @type cluster: C{objects.Cluster}
1109
  @param cluster: the cluster's configuration
1110
  @type disk_template: str
1111
  @param disk_template: the disk template to be checked
1112

1113
  """
1114
  assert disk_template is not None
1115
  if disk_template not in constants.DISK_TEMPLATES:
1116
    raise errors.OpPrereqError("'%s' is not a valid disk template."
1117
                               " Valid disk templates are: %s" %
1118
                               (disk_template,
1119
                                ",".join(constants.DISK_TEMPLATES)))
1120
  if not disk_template in cluster.enabled_disk_templates:
1121
    raise errors.OpPrereqError("Disk template '%s' is not enabled in cluster."
1122
                               " Enabled disk templates are: %s" %
1123
                               (disk_template,
1124
                                ",".join(cluster.enabled_disk_templates)))
1125

    
1126

    
1127
def CheckStorageTypeEnabled(cluster, storage_type):
1128
  """Helper function to check if a storage type is enabled.
1129

1130
  @type cluster: C{objects.Cluster}
1131
  @param cluster: the cluster's configuration
1132
  @type storage_type: str
1133
  @param storage_type: the storage type to be checked
1134

1135
  """
1136
  assert storage_type is not None
1137
  assert storage_type in constants.STORAGE_TYPES
1138
  # special case for lvm-pv, because it cannot be enabled
1139
  # via disk templates
1140
  if storage_type == constants.ST_LVM_PV:
1141
    CheckStorageTypeEnabled(cluster, constants.ST_LVM_VG)
1142
  else:
1143
    possible_disk_templates = \
1144
        utils.storage.GetDiskTemplatesOfStorageTypes(storage_type)
1145
    for disk_template in possible_disk_templates:
1146
      if disk_template in cluster.enabled_disk_templates:
1147
        return
1148
    raise errors.OpPrereqError("No disk template of storage type '%s' is"
1149
                               " enabled in this cluster. Enabled disk"
1150
                               " templates are: %s" % (storage_type,
1151
                               ",".join(cluster.enabled_disk_templates)))
1152

    
1153

    
1154
def CheckIpolicyVsDiskTemplates(ipolicy, enabled_disk_templates):
1155
  """Checks ipolicy disk templates against enabled disk tempaltes.
1156

1157
  @type ipolicy: dict
1158
  @param ipolicy: the new ipolicy
1159
  @type enabled_disk_templates: list of string
1160
  @param enabled_disk_templates: list of enabled disk templates on the
1161
    cluster
1162
  @raises errors.OpPrereqError: if there is at least one allowed disk
1163
    template that is not also enabled.
1164

1165
  """
1166
  assert constants.IPOLICY_DTS in ipolicy
1167
  allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
1168
  not_enabled = set(allowed_disk_templates) - set(enabled_disk_templates)
1169
  if not_enabled:
1170
    raise errors.OpPrereqError("The following disk template are allowed"
1171
                               " by the ipolicy, but not enabled on the"
1172
                               " cluster: %s" % utils.CommaJoin(not_enabled))
1173

    
1174

    
1175
def CheckDiskAccessModeValidity(parameters):
1176
  """Checks if the access parameter is legal.
1177

1178
  @see: L{CheckDiskAccessModeConsistency} for cluster consistency checks.
1179
  @raise errors.OpPrereqError: if the check fails.
1180

1181
  """
1182
  for disk_template in parameters:
1183
    access = parameters[disk_template].get(constants.LDP_ACCESS,
1184
                                           constants.DISK_KERNELSPACE)
1185
    if access not in constants.DISK_VALID_ACCESS_MODES:
1186
      valid_vals_str = utils.CommaJoin(constants.DISK_VALID_ACCESS_MODES)
1187
      raise errors.OpPrereqError("Invalid value of '{d}:{a}': '{v}' (expected"
1188
                                 " one of {o})".format(d=disk_template,
1189
                                                       a=constants.LDP_ACCESS,
1190
                                                       v=access,
1191
                                                       o=valid_vals_str))
1192

    
1193

    
1194
def CheckDiskAccessModeConsistency(parameters, cfg, group=None):
1195
  """Checks if the access param is consistent with the cluster configuration.
1196

1197
  @note: requires a configuration lock to run.
1198
  @param parameters: the parameters to validate
1199
  @param cfg: the cfg object of the cluster
1200
  @param group: if set, only check for consistency within this group.
1201
  @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1202
                               to an invalid value, such as "pink bunny".
1203
  @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1204
                               to an inconsistent value, such as asking for RBD
1205
                               userspace access to the chroot hypervisor.
1206

1207
  """
1208
  CheckDiskAccessModeValidity(parameters)
1209

    
1210
  for disk_template in parameters:
1211
    access = parameters[disk_template].get(constants.LDP_ACCESS,
1212
                                           constants.DISK_KERNELSPACE)
1213

    
1214
    if disk_template not in constants.DTS_HAVE_ACCESS:
1215
      continue
1216

    
1217
    #Check the combination of instance hypervisor, disk template and access
1218
    #protocol is sane.
1219
    inst_uuids = cfg.GetNodeGroupInstances(group) if group else \
1220
                 cfg.GetInstanceList()
1221

    
1222
    for entry in inst_uuids:
1223
      inst = cfg.GetInstanceInfo(entry)
1224
      hv = inst.hypervisor
1225
      dt = inst.disk_template
1226

    
1227
      if not IsValidDiskAccessModeCombination(hv, dt, access):
1228
        raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access"
1229
                                   " setting with {h} hypervisor and {d} disk"
1230
                                   " type.".format(i=inst.name,
1231
                                                   a=access,
1232
                                                   h=hv,
1233
                                                   d=dt))
1234

    
1235

    
1236
def IsValidDiskAccessModeCombination(hv, disk_template, mode):
1237
  """Checks if an hypervisor can read a disk template with given mode.
1238

1239
  @param hv: the hypervisor that will access the data
1240
  @param disk_template: the disk template the data is stored as
1241
  @param mode: how the hypervisor should access the data
1242
  @return: True if the hypervisor can read a given read disk_template
1243
           in the specified mode.
1244

1245
  """
1246
  if mode == constants.DISK_KERNELSPACE:
1247
    return True
1248

    
1249
  if (hv == constants.HT_KVM and
1250
      disk_template in (constants.DT_RBD, constants.DT_GLUSTER) and
1251
      mode == constants.DISK_USERSPACE):
1252
    return True
1253

    
1254
  # Everything else:
1255
  return False
1256

    
1257

    
1258
def AddNodeCertToCandidateCerts(lu, node_uuid, cluster):
1259
  """Add the node's client SSL certificate digest to the candidate certs.
1260

1261
  @type node_uuid: string
1262
  @param node_uuid: the node's UUID
1263
  @type cluster: C{object.Cluster}
1264
  @param cluster: the cluster's configuration
1265

1266
  """
1267
  result = lu.rpc.call_node_crypto_tokens(
1268
             node_uuid,
1269
             [(constants.CRYPTO_TYPE_SSL_DIGEST, constants.CRYPTO_ACTION_GET,
1270
               None)])
1271
  result.Raise("Could not retrieve the node's (uuid %s) SSL digest."
1272
               % node_uuid)
1273
  ((crypto_type, digest), ) = result.payload
1274
  assert crypto_type == constants.CRYPTO_TYPE_SSL_DIGEST
1275

    
1276
  utils.AddNodeToCandidateCerts(node_uuid, digest, cluster.candidate_certs)
1277

    
1278

    
1279
def RemoveNodeCertFromCandidateCerts(node_uuid, cluster):
1280
  """Removes the node's certificate from the candidate certificates list.
1281

1282
  @type node_uuid: string
1283
  @param node_uuid: the node's UUID
1284
  @type cluster: C{objects.Cluster}
1285
  @param cluster: the cluster's configuration
1286

1287
  """
1288
  utils.RemoveNodeFromCandidateCerts(node_uuid, cluster.candidate_certs)
1289

    
1290

    
1291
def CreateNewClientCert(lu, node_uuid, filename=None):
1292
  """Creates a new client SSL certificate for the node.
1293

1294
  @type node_uuid: string
1295
  @param node_uuid: the node's UUID
1296
  @type filename: string
1297
  @param filename: the certificate's filename
1298
  @rtype: string
1299
  @return: the digest of the newly created certificate
1300

1301
  """
1302
  options = {}
1303
  if filename:
1304
    options[constants.CRYPTO_OPTION_CERT_FILE] = filename
1305
  options[constants.CRYPTO_OPTION_SERIAL_NO] = utils.UuidToInt(node_uuid)
1306
  result = lu.rpc.call_node_crypto_tokens(
1307
             node_uuid,
1308
             [(constants.CRYPTO_TYPE_SSL_DIGEST,
1309
               constants.CRYPTO_ACTION_CREATE,
1310
               options)])
1311
  result.Raise("Could not create the node's (uuid %s) SSL client"
1312
               " certificate." % node_uuid)
1313
  ((crypto_type, new_digest), ) = result.payload
1314
  assert crypto_type == constants.CRYPTO_TYPE_SSL_DIGEST
1315
  return new_digest
1316

    
1317

    
1318
def AddInstanceCommunicationNetworkOp(network):
1319
  """Create an OpCode that adds the instance communication network.
1320

1321
  This OpCode contains the configuration necessary for the instance
1322
  communication network.
1323

1324
  @type network: string
1325
  @param network: name or UUID of the instance communication network
1326

1327
  @rtype: L{ganeti.opcodes.OpCode}
1328
  @return: OpCode that creates the instance communication network
1329

1330
  """
1331
  return opcodes.OpNetworkAdd(
1332
    network_name=network,
1333
    gateway=None,
1334
    network=constants.INSTANCE_COMMUNICATION_NETWORK4,
1335
    gateway6=None,
1336
    network6=constants.INSTANCE_COMMUNICATION_NETWORK6,
1337
    mac_prefix=constants.INSTANCE_COMMUNICATION_MAC_PREFIX,
1338
    add_reserved_ips=None,
1339
    conflicts_check=True,
1340
    tags=[])
1341

    
1342

    
1343
def ConnectInstanceCommunicationNetworkOp(group_uuid, network):
1344
  """Create an OpCode that connects a group to the instance
1345
  communication network.
1346

1347
  This OpCode contains the configuration necessary for the instance
1348
  communication network.
1349

1350
  @type group_uuid: string
1351
  @param group_uuid: UUID of the group to connect
1352

1353
  @type network: string
1354
  @param network: name or UUID of the network to connect to, i.e., the
1355
                  instance communication network
1356

1357
  @rtype: L{ganeti.opcodes.OpCode}
1358
  @return: OpCode that connects the group to the instance
1359
           communication network
1360

1361
  """
1362
  return opcodes.OpNetworkConnect(
1363
    group_name=group_uuid,
1364
    network_name=network,
1365
    network_mode=constants.INSTANCE_COMMUNICATION_NETWORK_MODE,
1366
    network_link=constants.INSTANCE_COMMUNICATION_NETWORK_LINK,
1367
    conflicts_check=True)