Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / common.py @ 1c474f2b

History | View | Annotate | Download (47.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Common functions used by multiple logical units."""
23

    
24
import copy
25
import os
26

    
27
from ganeti import compat
28
from ganeti import constants
29
from ganeti import errors
30
from ganeti import hypervisor
31
from ganeti import locking
32
from ganeti import objects
33
from ganeti import opcodes
34
from ganeti import pathutils
35
from ganeti.serializer import Private
36
import ganeti.rpc.node as rpc
37
from ganeti import ssconf
38
from ganeti import utils
39

    
40

    
41
# States of instance
42
INSTANCE_DOWN = [constants.ADMINST_DOWN]
43
INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
44
INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
45

    
46
#: Instance status in which an instance can be marked as offline/online
47
CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
48
  constants.ADMINST_OFFLINE,
49
  ]))
50

    
51

    
52
def _ExpandItemName(expand_fn, name, kind):
53
  """Expand an item name.
54

55
  @param expand_fn: the function to use for expansion
56
  @param name: requested item name
57
  @param kind: text description ('Node' or 'Instance')
58
  @return: the result of the expand_fn, if successful
59
  @raise errors.OpPrereqError: if the item is not found
60

61
  """
62
  (uuid, full_name) = expand_fn(name)
63
  if uuid is None or full_name is None:
64
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
65
                               errors.ECODE_NOENT)
66
  return (uuid, full_name)
67

    
68

    
69
def ExpandInstanceUuidAndName(cfg, expected_uuid, name):
70
  """Wrapper over L{_ExpandItemName} for instance."""
71
  (uuid, full_name) = _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
72
  if expected_uuid is not None and uuid != expected_uuid:
73
    raise errors.OpPrereqError(
74
      "The instances UUID '%s' does not match the expected UUID '%s' for"
75
      " instance '%s'. Maybe the instance changed since you submitted this"
76
      " job." % (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
77
  return (uuid, full_name)
78

    
79

    
80
def ExpandNodeUuidAndName(cfg, expected_uuid, name):
81
  """Expand a short node name into the node UUID and full name.
82

83
  @type cfg: L{config.ConfigWriter}
84
  @param cfg: The cluster configuration
85
  @type expected_uuid: string
86
  @param expected_uuid: expected UUID for the node (or None if there is no
87
        expectation). If it does not match, a L{errors.OpPrereqError} is
88
        raised.
89
  @type name: string
90
  @param name: the short node name
91

92
  """
93
  (uuid, full_name) = _ExpandItemName(cfg.ExpandNodeName, name, "Node")
94
  if expected_uuid is not None and uuid != expected_uuid:
95
    raise errors.OpPrereqError(
96
      "The nodes UUID '%s' does not match the expected UUID '%s' for node"
97
      " '%s'. Maybe the node changed since you submitted this job." %
98
      (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
99
  return (uuid, full_name)
100

    
101

    
102
def ShareAll():
103
  """Returns a dict declaring all lock levels shared.
104

105
  """
106
  return dict.fromkeys(locking.LEVELS, 1)
107

    
108

    
109
def CheckNodeGroupInstances(cfg, group_uuid, owned_instance_names):
110
  """Checks if the instances in a node group are still correct.
111

112
  @type cfg: L{config.ConfigWriter}
113
  @param cfg: The cluster configuration
114
  @type group_uuid: string
115
  @param group_uuid: Node group UUID
116
  @type owned_instance_names: set or frozenset
117
  @param owned_instance_names: List of currently owned instances
118

119
  """
120
  wanted_instances = frozenset(cfg.GetInstanceNames(
121
                                 cfg.GetNodeGroupInstances(group_uuid)))
122
  if owned_instance_names != wanted_instances:
123
    raise errors.OpPrereqError("Instances in node group '%s' changed since"
124
                               " locks were acquired, wanted '%s', have '%s';"
125
                               " retry the operation" %
126
                               (group_uuid,
127
                                utils.CommaJoin(wanted_instances),
128
                                utils.CommaJoin(owned_instance_names)),
129
                               errors.ECODE_STATE)
130

    
131
  return wanted_instances
132

    
133

    
134
def GetWantedNodes(lu, short_node_names):
135
  """Returns list of checked and expanded node names.
136

137
  @type lu: L{LogicalUnit}
138
  @param lu: the logical unit on whose behalf we execute
139
  @type short_node_names: list
140
  @param short_node_names: list of node names or None for all nodes
141
  @rtype: tuple of lists
142
  @return: tupe with (list of node UUIDs, list of node names)
143
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
144

145
  """
146
  if short_node_names:
147
    node_uuids = [ExpandNodeUuidAndName(lu.cfg, None, name)[0]
148
                  for name in short_node_names]
149
  else:
150
    node_uuids = lu.cfg.GetNodeList()
151

    
152
  return (node_uuids, [lu.cfg.GetNodeName(uuid) for uuid in node_uuids])
153

    
154

    
155
def GetWantedInstances(lu, short_inst_names):
156
  """Returns list of checked and expanded instance names.
157

158
  @type lu: L{LogicalUnit}
159
  @param lu: the logical unit on whose behalf we execute
160
  @type short_inst_names: list
161
  @param short_inst_names: list of instance names or None for all instances
162
  @rtype: tuple of lists
163
  @return: tuple of (instance UUIDs, instance names)
164
  @raise errors.OpPrereqError: if the instances parameter is wrong type
165
  @raise errors.OpPrereqError: if any of the passed instances is not found
166

167
  """
168
  if short_inst_names:
169
    inst_uuids = [ExpandInstanceUuidAndName(lu.cfg, None, name)[0]
170
                  for name in short_inst_names]
171
  else:
172
    inst_uuids = lu.cfg.GetInstanceList()
173
  return (inst_uuids, [lu.cfg.GetInstanceName(uuid) for uuid in inst_uuids])
174

    
175

    
176
def RunPostHook(lu, node_name):
177
  """Runs the post-hook for an opcode on a single node.
178

179
  """
180
  hm = lu.proc.BuildHooksManager(lu)
181
  try:
182
    hm.RunPhase(constants.HOOKS_PHASE_POST, node_names=[node_name])
183
  except Exception, err: # pylint: disable=W0703
184
    lu.LogWarning("Errors occurred running hooks on %s: %s",
185
                  node_name, err)
186

    
187

    
188
def RedistributeAncillaryFiles(lu):
189
  """Distribute additional files which are part of the cluster configuration.
190

191
  ConfigWriter takes care of distributing the config and ssconf files, but
192
  there are more files which should be distributed to all nodes. This function
193
  makes sure those are copied.
194

195
  """
196
  # Gather target nodes
197
  cluster = lu.cfg.GetClusterInfo()
198
  master_info = lu.cfg.GetMasterNodeInfo()
199

    
200
  online_node_uuids = lu.cfg.GetOnlineNodeList()
201
  online_node_uuid_set = frozenset(online_node_uuids)
202
  vm_node_uuids = list(online_node_uuid_set.intersection(
203
                         lu.cfg.GetVmCapableNodeList()))
204

    
205
  # Never distribute to master node
206
  for node_uuids in [online_node_uuids, vm_node_uuids]:
207
    if master_info.uuid in node_uuids:
208
      node_uuids.remove(master_info.uuid)
209

    
210
  # Gather file lists
211
  (files_all, _, files_mc, files_vm) = \
212
    ComputeAncillaryFiles(cluster, True)
213

    
214
  # Never re-distribute configuration file from here
215
  assert not (pathutils.CLUSTER_CONF_FILE in files_all or
216
              pathutils.CLUSTER_CONF_FILE in files_vm)
217
  assert not files_mc, "Master candidates not handled in this function"
218

    
219
  filemap = [
220
    (online_node_uuids, files_all),
221
    (vm_node_uuids, files_vm),
222
    ]
223

    
224
  # Upload the files
225
  for (node_uuids, files) in filemap:
226
    for fname in files:
227
      UploadHelper(lu, node_uuids, fname)
228

    
229

    
230
def ComputeAncillaryFiles(cluster, redist):
231
  """Compute files external to Ganeti which need to be consistent.
232

233
  @type redist: boolean
234
  @param redist: Whether to include files which need to be redistributed
235

236
  """
237
  # Compute files for all nodes
238
  files_all = set([
239
    pathutils.SSH_KNOWN_HOSTS_FILE,
240
    pathutils.CONFD_HMAC_KEY,
241
    pathutils.CLUSTER_DOMAIN_SECRET_FILE,
242
    pathutils.SPICE_CERT_FILE,
243
    pathutils.SPICE_CACERT_FILE,
244
    pathutils.RAPI_USERS_FILE,
245
    ])
246

    
247
  if redist:
248
    # we need to ship at least the RAPI certificate
249
    files_all.add(pathutils.RAPI_CERT_FILE)
250
  else:
251
    files_all.update(pathutils.ALL_CERT_FILES)
252
    files_all.update(ssconf.SimpleStore().GetFileList())
253

    
254
  if cluster.modify_etc_hosts:
255
    files_all.add(pathutils.ETC_HOSTS)
256

    
257
  if cluster.use_external_mip_script:
258
    files_all.add(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
259

    
260
  # Files which are optional, these must:
261
  # - be present in one other category as well
262
  # - either exist or not exist on all nodes of that category (mc, vm all)
263
  files_opt = set([
264
    pathutils.RAPI_USERS_FILE,
265
    ])
266

    
267
  # Files which should only be on master candidates
268
  files_mc = set()
269

    
270
  if not redist:
271
    files_mc.add(pathutils.CLUSTER_CONF_FILE)
272

    
273
  # File storage
274
  if (not redist and (cluster.IsFileStorageEnabled() or
275
                        cluster.IsSharedFileStorageEnabled())):
276
    files_all.add(pathutils.FILE_STORAGE_PATHS_FILE)
277
    files_opt.add(pathutils.FILE_STORAGE_PATHS_FILE)
278

    
279
  # Files which should only be on VM-capable nodes
280
  files_vm = set(
281
    filename
282
    for hv_name in cluster.enabled_hypervisors
283
    for filename in
284
    hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
285

    
286
  files_opt |= set(
287
    filename
288
    for hv_name in cluster.enabled_hypervisors
289
    for filename in
290
    hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
291

    
292
  # Filenames in each category must be unique
293
  all_files_set = files_all | files_mc | files_vm
294
  assert (len(all_files_set) ==
295
          sum(map(len, [files_all, files_mc, files_vm]))), \
296
    "Found file listed in more than one file list"
297

    
298
  # Optional files must be present in one other category
299
  assert all_files_set.issuperset(files_opt), \
300
    "Optional file not in a different required list"
301

    
302
  # This one file should never ever be re-distributed via RPC
303
  assert not (redist and
304
              pathutils.FILE_STORAGE_PATHS_FILE in all_files_set)
305

    
306
  return (files_all, files_opt, files_mc, files_vm)
307

    
308

    
309
def UploadHelper(lu, node_uuids, fname):
310
  """Helper for uploading a file and showing warnings.
311

312
  """
313
  if os.path.exists(fname):
314
    result = lu.rpc.call_upload_file(node_uuids, fname)
315
    for to_node_uuids, to_result in result.items():
316
      msg = to_result.fail_msg
317
      if msg:
318
        msg = ("Copy of file %s to node %s failed: %s" %
319
               (fname, lu.cfg.GetNodeName(to_node_uuids), msg))
320
        lu.LogWarning(msg)
321

    
322

    
323
def MergeAndVerifyHvState(op_input, obj_input):
324
  """Combines the hv state from an opcode with the one of the object
325

326
  @param op_input: The input dict from the opcode
327
  @param obj_input: The input dict from the objects
328
  @return: The verified and updated dict
329

330
  """
331
  if op_input:
332
    invalid_hvs = set(op_input) - constants.HYPER_TYPES
333
    if invalid_hvs:
334
      raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
335
                                 " %s" % utils.CommaJoin(invalid_hvs),
336
                                 errors.ECODE_INVAL)
337
    if obj_input is None:
338
      obj_input = {}
339
    type_check = constants.HVSTS_PARAMETER_TYPES
340
    return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
341

    
342
  return None
343

    
344

    
345
def MergeAndVerifyDiskState(op_input, obj_input):
346
  """Combines the disk state from an opcode with the one of the object
347

348
  @param op_input: The input dict from the opcode
349
  @param obj_input: The input dict from the objects
350
  @return: The verified and updated dict
351
  """
352
  if op_input:
353
    invalid_dst = set(op_input) - constants.DS_VALID_TYPES
354
    if invalid_dst:
355
      raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
356
                                 utils.CommaJoin(invalid_dst),
357
                                 errors.ECODE_INVAL)
358
    type_check = constants.DSS_PARAMETER_TYPES
359
    if obj_input is None:
360
      obj_input = {}
361
    return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
362
                                              type_check))
363
                for key, value in op_input.items())
364

    
365
  return None
366

    
367

    
368
def CheckOSParams(lu, required, node_uuids, osname, osparams, force_variant):
369
  """OS parameters validation.
370

371
  @type lu: L{LogicalUnit}
372
  @param lu: the logical unit for which we check
373
  @type required: boolean
374
  @param required: whether the validation should fail if the OS is not
375
      found
376
  @type node_uuids: list
377
  @param node_uuids: the list of nodes on which we should check
378
  @type osname: string
379
  @param osname: the name of the OS we should use
380
  @type osparams: dict
381
  @param osparams: the parameters which we need to check
382
  @raise errors.OpPrereqError: if the parameters are not valid
383

384
  """
385
  node_uuids = _FilterVmNodes(lu, node_uuids)
386

    
387
  # Last chance to unwrap private elements.
388
  for key in osparams:
389
    if isinstance(osparams[key], Private):
390
      osparams[key] = osparams[key].Get()
391

    
392
  if osname:
393
    result = lu.rpc.call_os_validate(node_uuids, required, osname,
394
                                     [constants.OS_VALIDATE_PARAMETERS],
395
                                     osparams, force_variant)
396
    for node_uuid, nres in result.items():
397
      # we don't check for offline cases since this should be run only
398
      # against the master node and/or an instance's nodes
399
      nres.Raise("OS Parameters validation failed on node %s" %
400
                 lu.cfg.GetNodeName(node_uuid))
401
      if not nres.payload:
402
        lu.LogInfo("OS %s not found on node %s, validation skipped",
403
                   osname, lu.cfg.GetNodeName(node_uuid))
404

    
405

    
406
def CheckOSImage(op):
407
  """Checks if the OS image in the OS parameters of an opcode is
408
  valid.
409

410
  This function can also be used in LUs as they carry an opcode.
411

412
  @type op: L{opcodes.OpCode}
413
  @param op: opcode containing the OS params
414

415
  @rtype: string or NoneType
416
  @return:
417
    None if the OS parameters in the opcode do not contain the OS
418
    image, otherwise the OS image value contained in the OS parameters
419
  @raise errors.OpPrereqError: if OS image is not a URL or an absolute path
420

421
  """
422
  os_image = objects.GetOSImage(op.osparams)
423

    
424
  if os_image is None:
425
    return None
426
  elif utils.IsUrl(os_image) or os.path.isabs(os_image):
427
    return os_image
428
  else:
429
    raise errors.OpPrereqError("OS image must be a URL or an absolute path")
430

    
431

    
432
def CheckHVParams(lu, node_uuids, hvname, hvparams):
433
  """Hypervisor parameter validation.
434

435
  This function abstract the hypervisor parameter validation to be
436
  used in both instance create and instance modify.
437

438
  @type lu: L{LogicalUnit}
439
  @param lu: the logical unit for which we check
440
  @type node_uuids: list
441
  @param node_uuids: the list of nodes on which we should check
442
  @type hvname: string
443
  @param hvname: the name of the hypervisor we should use
444
  @type hvparams: dict
445
  @param hvparams: the parameters which we need to check
446
  @raise errors.OpPrereqError: if the parameters are not valid
447

448
  """
449
  node_uuids = _FilterVmNodes(lu, node_uuids)
450

    
451
  cluster = lu.cfg.GetClusterInfo()
452
  hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
453

    
454
  hvinfo = lu.rpc.call_hypervisor_validate_params(node_uuids, hvname, hvfull)
455
  for node_uuid in node_uuids:
456
    info = hvinfo[node_uuid]
457
    if info.offline:
458
      continue
459
    info.Raise("Hypervisor parameter validation failed on node %s" %
460
               lu.cfg.GetNodeName(node_uuid))
461

    
462

    
463
def AdjustCandidatePool(lu, exceptions):
464
  """Adjust the candidate pool after node operations.
465

466
  """
467
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
468
  if mod_list:
469
    lu.LogInfo("Promoted nodes to master candidate role: %s",
470
               utils.CommaJoin(node.name for node in mod_list))
471
    for node in mod_list:
472
      lu.context.ReaddNode(node)
473
      AddNodeCertToCandidateCerts(lu, lu.cfg, node.uuid)
474
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
475
  if mc_now > mc_max:
476
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
477
               (mc_now, mc_max))
478

    
479

    
480
def CheckNodePVs(nresult, exclusive_storage):
481
  """Check node PVs.
482

483
  """
484
  pvlist_dict = nresult.get(constants.NV_PVLIST, None)
485
  if pvlist_dict is None:
486
    return (["Can't get PV list from node"], None)
487
  pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
488
  errlist = []
489
  # check that ':' is not present in PV names, since it's a
490
  # special character for lvcreate (denotes the range of PEs to
491
  # use on the PV)
492
  for pv in pvlist:
493
    if ":" in pv.name:
494
      errlist.append("Invalid character ':' in PV '%s' of VG '%s'" %
495
                     (pv.name, pv.vg_name))
496
  es_pvinfo = None
497
  if exclusive_storage:
498
    (errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist)
499
    errlist.extend(errmsgs)
500
    shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None)
501
    if shared_pvs:
502
      for (pvname, lvlist) in shared_pvs:
503
        # TODO: Check that LVs are really unrelated (snapshots, DRBD meta...)
504
        errlist.append("PV %s is shared among unrelated LVs (%s)" %
505
                       (pvname, utils.CommaJoin(lvlist)))
506
  return (errlist, es_pvinfo)
507

    
508

    
509
def _ComputeMinMaxSpec(name, qualifier, ispecs, value):
510
  """Computes if value is in the desired range.
511

512
  @param name: name of the parameter for which we perform the check
513
  @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
514
      not just 'disk')
515
  @param ispecs: dictionary containing min and max values
516
  @param value: actual value that we want to use
517
  @return: None or an error string
518

519
  """
520
  if value in [None, constants.VALUE_AUTO]:
521
    return None
522
  max_v = ispecs[constants.ISPECS_MAX].get(name, value)
523
  min_v = ispecs[constants.ISPECS_MIN].get(name, value)
524
  if value > max_v or min_v > value:
525
    if qualifier:
526
      fqn = "%s/%s" % (name, qualifier)
527
    else:
528
      fqn = name
529
    return ("%s value %s is not in range [%s, %s]" %
530
            (fqn, value, min_v, max_v))
531
  return None
532

    
533

    
534
def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
535
                                nic_count, disk_sizes, spindle_use,
536
                                disk_template,
537
                                _compute_fn=_ComputeMinMaxSpec):
538
  """Verifies ipolicy against provided specs.
539

540
  @type ipolicy: dict
541
  @param ipolicy: The ipolicy
542
  @type mem_size: int
543
  @param mem_size: The memory size
544
  @type cpu_count: int
545
  @param cpu_count: Used cpu cores
546
  @type disk_count: int
547
  @param disk_count: Number of disks used
548
  @type nic_count: int
549
  @param nic_count: Number of nics used
550
  @type disk_sizes: list of ints
551
  @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
552
  @type spindle_use: int
553
  @param spindle_use: The number of spindles this instance uses
554
  @type disk_template: string
555
  @param disk_template: The disk template of the instance
556
  @param _compute_fn: The compute function (unittest only)
557
  @return: A list of violations, or an empty list of no violations are found
558

559
  """
560
  assert disk_count == len(disk_sizes)
561

    
562
  test_settings = [
563
    (constants.ISPEC_MEM_SIZE, "", mem_size),
564
    (constants.ISPEC_CPU_COUNT, "", cpu_count),
565
    (constants.ISPEC_NIC_COUNT, "", nic_count),
566
    (constants.ISPEC_SPINDLE_USE, "", spindle_use),
567
    ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
568
         for idx, d in enumerate(disk_sizes)]
569
  if disk_template != constants.DT_DISKLESS:
570
    # This check doesn't make sense for diskless instances
571
    test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
572
  ret = []
573
  allowed_dts = ipolicy[constants.IPOLICY_DTS]
574
  if disk_template not in allowed_dts:
575
    ret.append("Disk template %s is not allowed (allowed templates: %s)" %
576
               (disk_template, utils.CommaJoin(allowed_dts)))
577

    
578
  min_errs = None
579
  for minmax in ipolicy[constants.ISPECS_MINMAX]:
580
    errs = filter(None,
581
                  (_compute_fn(name, qualifier, minmax, value)
582
                   for (name, qualifier, value) in test_settings))
583
    if min_errs is None or len(errs) < len(min_errs):
584
      min_errs = errs
585
  assert min_errs is not None
586
  return ret + min_errs
587

    
588

    
589
def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
590
                                    _compute_fn=ComputeIPolicySpecViolation):
591
  """Compute if instance meets the specs of ipolicy.
592

593
  @type ipolicy: dict
594
  @param ipolicy: The ipolicy to verify against
595
  @type instance: L{objects.Instance}
596
  @param instance: The instance to verify
597
  @type cfg: L{config.ConfigWriter}
598
  @param cfg: Cluster configuration
599
  @param _compute_fn: The function to verify ipolicy (unittest only)
600
  @see: L{ComputeIPolicySpecViolation}
601

602
  """
603
  ret = []
604
  be_full = cfg.GetClusterInfo().FillBE(instance)
605
  mem_size = be_full[constants.BE_MAXMEM]
606
  cpu_count = be_full[constants.BE_VCPUS]
607
  inst_nodes = cfg.GetInstanceNodes(instance)
608
  es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes)
609
  disks = cfg.GetInstanceDisks(instance)
610
  if any(es_flags.values()):
611
    # With exclusive storage use the actual spindles
612
    try:
613
      spindle_use = sum([disk.spindles for disk in disks])
614
    except TypeError:
615
      ret.append("Number of spindles not configured for disks of instance %s"
616
                 " while exclusive storage is enabled, try running gnt-cluster"
617
                 " repair-disk-sizes" % instance.name)
618
      # _ComputeMinMaxSpec ignores 'None's
619
      spindle_use = None
620
  else:
621
    spindle_use = be_full[constants.BE_SPINDLE_USE]
622
  disk_count = len(disks)
623
  disk_sizes = [disk.size for disk in disks]
624
  nic_count = len(instance.nics)
625
  disk_template = instance.disk_template
626

    
627
  return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
628
                           disk_sizes, spindle_use, disk_template)
629

    
630

    
631
def _ComputeViolatingInstances(ipolicy, instances, cfg):
632
  """Computes a set of instances who violates given ipolicy.
633

634
  @param ipolicy: The ipolicy to verify
635
  @type instances: L{objects.Instance}
636
  @param instances: List of instances to verify
637
  @type cfg: L{config.ConfigWriter}
638
  @param cfg: Cluster configuration
639
  @return: A frozenset of instance names violating the ipolicy
640

641
  """
642
  return frozenset([inst.name for inst in instances
643
                    if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
644

    
645

    
646
def ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
647
  """Computes a set of any instances that would violate the new ipolicy.
648

649
  @param old_ipolicy: The current (still in-place) ipolicy
650
  @param new_ipolicy: The new (to become) ipolicy
651
  @param instances: List of instances to verify
652
  @type cfg: L{config.ConfigWriter}
653
  @param cfg: Cluster configuration
654
  @return: A list of instances which violates the new ipolicy but
655
      did not before
656

657
  """
658
  return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
659
          _ComputeViolatingInstances(old_ipolicy, instances, cfg))
660

    
661

    
662
def GetUpdatedParams(old_params, update_dict,
663
                      use_default=True, use_none=False):
664
  """Return the new version of a parameter dictionary.
665

666
  @type old_params: dict
667
  @param old_params: old parameters
668
  @type update_dict: dict
669
  @param update_dict: dict containing new parameter values, or
670
      constants.VALUE_DEFAULT to reset the parameter to its default
671
      value
672
  @param use_default: boolean
673
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
674
      values as 'to be deleted' values
675
  @param use_none: boolean
676
  @type use_none: whether to recognise C{None} values as 'to be
677
      deleted' values
678
  @rtype: dict
679
  @return: the new parameter dictionary
680

681
  """
682
  params_copy = copy.deepcopy(old_params)
683
  for key, val in update_dict.iteritems():
684
    if ((use_default and val == constants.VALUE_DEFAULT) or
685
          (use_none and val is None)):
686
      try:
687
        del params_copy[key]
688
      except KeyError:
689
        pass
690
    else:
691
      params_copy[key] = val
692
  return params_copy
693

    
694

    
695
def GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
696
  """Return the new version of an instance policy.
697

698
  @param group_policy: whether this policy applies to a group and thus
699
    we should support removal of policy entries
700

701
  """
702
  ipolicy = copy.deepcopy(old_ipolicy)
703
  for key, value in new_ipolicy.items():
704
    if key not in constants.IPOLICY_ALL_KEYS:
705
      raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
706
                                 errors.ECODE_INVAL)
707
    if (not value or value == [constants.VALUE_DEFAULT] or
708
            value == constants.VALUE_DEFAULT):
709
      if group_policy:
710
        if key in ipolicy:
711
          del ipolicy[key]
712
      else:
713
        raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
714
                                   " on the cluster'" % key,
715
                                   errors.ECODE_INVAL)
716
    else:
717
      if key in constants.IPOLICY_PARAMETERS:
718
        # FIXME: we assume all such values are float
719
        try:
720
          ipolicy[key] = float(value)
721
        except (TypeError, ValueError), err:
722
          raise errors.OpPrereqError("Invalid value for attribute"
723
                                     " '%s': '%s', error: %s" %
724
                                     (key, value, err), errors.ECODE_INVAL)
725
      elif key == constants.ISPECS_MINMAX:
726
        for minmax in value:
727
          for k in minmax.keys():
728
            utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
729
        ipolicy[key] = value
730
      elif key == constants.ISPECS_STD:
731
        if group_policy:
732
          msg = "%s cannot appear in group instance specs" % key
733
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
734
        ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value,
735
                                        use_none=False, use_default=False)
736
        utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
737
      else:
738
        # FIXME: we assume all others are lists; this should be redone
739
        # in a nicer way
740
        ipolicy[key] = list(value)
741
  try:
742
    objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
743
  except errors.ConfigurationError, err:
744
    raise errors.OpPrereqError("Invalid instance policy: %s" % err,
745
                               errors.ECODE_INVAL)
746
  return ipolicy
747

    
748

    
749
def AnnotateDiskParams(instance, devs, cfg):
750
  """Little helper wrapper to the rpc annotation method.
751

752
  @param instance: The instance object
753
  @type devs: List of L{objects.Disk}
754
  @param devs: The root devices (not any of its children!)
755
  @param cfg: The config object
756
  @returns The annotated disk copies
757
  @see L{rpc.node.AnnotateDiskParams}
758

759
  """
760
  return rpc.AnnotateDiskParams(devs, cfg.GetInstanceDiskParams(instance))
761

    
762

    
763
def SupportsOob(cfg, node):
764
  """Tells if node supports OOB.
765

766
  @type cfg: L{config.ConfigWriter}
767
  @param cfg: The cluster configuration
768
  @type node: L{objects.Node}
769
  @param node: The node
770
  @return: The OOB script if supported or an empty string otherwise
771

772
  """
773
  return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
774

    
775

    
776
def _UpdateAndVerifySubDict(base, updates, type_check):
777
  """Updates and verifies a dict with sub dicts of the same type.
778

779
  @param base: The dict with the old data
780
  @param updates: The dict with the new data
781
  @param type_check: Dict suitable to ForceDictType to verify correct types
782
  @returns: A new dict with updated and verified values
783

784
  """
785
  def fn(old, value):
786
    new = GetUpdatedParams(old, value)
787
    utils.ForceDictType(new, type_check)
788
    return new
789

    
790
  ret = copy.deepcopy(base)
791
  ret.update(dict((key, fn(base.get(key, {}), value))
792
                  for key, value in updates.items()))
793
  return ret
794

    
795

    
796
def _FilterVmNodes(lu, node_uuids):
797
  """Filters out non-vm_capable nodes from a list.
798

799
  @type lu: L{LogicalUnit}
800
  @param lu: the logical unit for which we check
801
  @type node_uuids: list
802
  @param node_uuids: the list of nodes on which we should check
803
  @rtype: list
804
  @return: the list of vm-capable nodes
805

806
  """
807
  vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
808
  return [uuid for uuid in node_uuids if uuid not in vm_nodes]
809

    
810

    
811
def GetDefaultIAllocator(cfg, ialloc):
812
  """Decides on which iallocator to use.
813

814
  @type cfg: L{config.ConfigWriter}
815
  @param cfg: Cluster configuration object
816
  @type ialloc: string or None
817
  @param ialloc: Iallocator specified in opcode
818
  @rtype: string
819
  @return: Iallocator name
820

821
  """
822
  if not ialloc:
823
    # Use default iallocator
824
    ialloc = cfg.GetDefaultIAllocator()
825

    
826
  if not ialloc:
827
    raise errors.OpPrereqError("No iallocator was specified, neither in the"
828
                               " opcode nor as a cluster-wide default",
829
                               errors.ECODE_INVAL)
830

    
831
  return ialloc
832

    
833

    
834
def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_node_uuids,
835
                             cur_group_uuid):
836
  """Checks if node groups for locked instances are still correct.
837

838
  @type cfg: L{config.ConfigWriter}
839
  @param cfg: Cluster configuration
840
  @type instances: dict; string as key, L{objects.Instance} as value
841
  @param instances: Dictionary, instance UUID as key, instance object as value
842
  @type owned_groups: iterable of string
843
  @param owned_groups: List of owned groups
844
  @type owned_node_uuids: iterable of string
845
  @param owned_node_uuids: List of owned nodes
846
  @type cur_group_uuid: string or None
847
  @param cur_group_uuid: Optional group UUID to check against instance's groups
848

849
  """
850
  for (uuid, inst) in instances.items():
851
    inst_nodes = cfg.GetInstanceNodes(inst)
852
    assert owned_node_uuids.issuperset(inst_nodes), \
853
      "Instance %s's nodes changed while we kept the lock" % inst.name
854

    
855
    inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
856

    
857
    assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
858
      "Instance %s has no node in group %s" % (inst.name, cur_group_uuid)
859

    
860

    
861
def CheckInstanceNodeGroups(cfg, inst_uuid, owned_groups, primary_only=False):
862
  """Checks if the owned node groups are still correct for an instance.
863

864
  @type cfg: L{config.ConfigWriter}
865
  @param cfg: The cluster configuration
866
  @type inst_uuid: string
867
  @param inst_uuid: Instance UUID
868
  @type owned_groups: set or frozenset
869
  @param owned_groups: List of currently owned node groups
870
  @type primary_only: boolean
871
  @param primary_only: Whether to check node groups for only the primary node
872

873
  """
874
  inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only)
875

    
876
  if not owned_groups.issuperset(inst_groups):
877
    raise errors.OpPrereqError("Instance %s's node groups changed since"
878
                               " locks were acquired, current groups are"
879
                               " are '%s', owning groups '%s'; retry the"
880
                               " operation" %
881
                               (cfg.GetInstanceName(inst_uuid),
882
                                utils.CommaJoin(inst_groups),
883
                                utils.CommaJoin(owned_groups)),
884
                               errors.ECODE_STATE)
885

    
886
  return inst_groups
887

    
888

    
889
def LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
890
  """Unpacks the result of change-group and node-evacuate iallocator requests.
891

892
  Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
893
  L{constants.IALLOCATOR_MODE_CHG_GROUP}.
894

895
  @type lu: L{LogicalUnit}
896
  @param lu: Logical unit instance
897
  @type alloc_result: tuple/list
898
  @param alloc_result: Result from iallocator
899
  @type early_release: bool
900
  @param early_release: Whether to release locks early if possible
901
  @type use_nodes: bool
902
  @param use_nodes: Whether to display node names instead of groups
903

904
  """
905
  (moved, failed, jobs) = alloc_result
906

    
907
  if failed:
908
    failreason = utils.CommaJoin("%s (%s)" % (name, reason)
909
                                 for (name, reason) in failed)
910
    lu.LogWarning("Unable to evacuate instances %s", failreason)
911
    raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
912

    
913
  if moved:
914
    lu.LogInfo("Instances to be moved: %s",
915
               utils.CommaJoin(
916
                 "%s (to %s)" %
917
                 (name, _NodeEvacDest(use_nodes, group, node_names))
918
                 for (name, group, node_names) in moved))
919

    
920
  return [map(compat.partial(_SetOpEarlyRelease, early_release),
921
              map(opcodes.OpCode.LoadOpCode, ops))
922
          for ops in jobs]
923

    
924

    
925
def _NodeEvacDest(use_nodes, group, node_names):
926
  """Returns group or nodes depending on caller's choice.
927

928
  """
929
  if use_nodes:
930
    return utils.CommaJoin(node_names)
931
  else:
932
    return group
933

    
934

    
935
def _SetOpEarlyRelease(early_release, op):
936
  """Sets C{early_release} flag on opcodes if available.
937

938
  """
939
  try:
940
    op.early_release = early_release
941
  except AttributeError:
942
    assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
943

    
944
  return op
945

    
946

    
947
def MapInstanceLvsToNodes(cfg, instances):
948
  """Creates a map from (node, volume) to instance name.
949

950
  @type cfg: L{config.ConfigWriter}
951
  @param cfg: The cluster configuration
952
  @type instances: list of L{objects.Instance}
953
  @rtype: dict; tuple of (node uuid, volume name) as key, L{objects.Instance}
954
          object as value
955

956
  """
957
  return dict(((node_uuid, vol), inst)
958
              for inst in instances
959
              for (node_uuid, vols) in cfg.GetInstanceLVsByNode(inst).items()
960
              for vol in vols)
961

    
962

    
963
def CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
964
  """Make sure that none of the given paramters is global.
965

966
  If a global parameter is found, an L{errors.OpPrereqError} exception is
967
  raised. This is used to avoid setting global parameters for individual nodes.
968

969
  @type params: dictionary
970
  @param params: Parameters to check
971
  @type glob_pars: dictionary
972
  @param glob_pars: Forbidden parameters
973
  @type kind: string
974
  @param kind: Kind of parameters (e.g. "node")
975
  @type bad_levels: string
976
  @param bad_levels: Level(s) at which the parameters are forbidden (e.g.
977
      "instance")
978
  @type good_levels: strings
979
  @param good_levels: Level(s) at which the parameters are allowed (e.g.
980
      "cluster or group")
981

982
  """
983
  used_globals = glob_pars.intersection(params)
984
  if used_globals:
985
    msg = ("The following %s parameters are global and cannot"
986
           " be customized at %s level, please modify them at"
987
           " %s level: %s" %
988
           (kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
989
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
990

    
991

    
992
def IsExclusiveStorageEnabledNode(cfg, node):
993
  """Whether exclusive_storage is in effect for the given node.
994

995
  @type cfg: L{config.ConfigWriter}
996
  @param cfg: The cluster configuration
997
  @type node: L{objects.Node}
998
  @param node: The node
999
  @rtype: bool
1000
  @return: The effective value of exclusive_storage
1001

1002
  """
1003
  return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
1004

    
1005

    
1006
def CheckInstanceState(lu, instance, req_states, msg=None):
1007
  """Ensure that an instance is in one of the required states.
1008

1009
  @param lu: the LU on behalf of which we make the check
1010
  @param instance: the instance to check
1011
  @param msg: if passed, should be a message to replace the default one
1012
  @raise errors.OpPrereqError: if the instance is not in the required state
1013

1014
  """
1015
  if msg is None:
1016
    msg = ("can't use instance from outside %s states" %
1017
           utils.CommaJoin(req_states))
1018
  if instance.admin_state not in req_states:
1019
    raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
1020
                               (instance.name, instance.admin_state, msg),
1021
                               errors.ECODE_STATE)
1022

    
1023
  if constants.ADMINST_UP not in req_states:
1024
    pnode_uuid = instance.primary_node
1025
    if not lu.cfg.GetNodeInfo(pnode_uuid).offline:
1026
      all_hvparams = lu.cfg.GetClusterInfo().hvparams
1027
      ins_l = lu.rpc.call_instance_list(
1028
                [pnode_uuid], [instance.hypervisor], all_hvparams)[pnode_uuid]
1029
      ins_l.Raise("Can't contact node %s for instance information" %
1030
                  lu.cfg.GetNodeName(pnode_uuid),
1031
                  prereq=True, ecode=errors.ECODE_ENVIRON)
1032
      if instance.name in ins_l.payload:
1033
        raise errors.OpPrereqError("Instance %s is running, %s" %
1034
                                   (instance.name, msg), errors.ECODE_STATE)
1035
    else:
1036
      lu.LogWarning("Primary node offline, ignoring check that instance"
1037
                     " is down")
1038

    
1039

    
1040
def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1041
  """Check the sanity of iallocator and node arguments and use the
1042
  cluster-wide iallocator if appropriate.
1043

1044
  Check that at most one of (iallocator, node) is specified. If none is
1045
  specified, or the iallocator is L{constants.DEFAULT_IALLOCATOR_SHORTCUT},
1046
  then the LU's opcode's iallocator slot is filled with the cluster-wide
1047
  default iallocator.
1048

1049
  @type iallocator_slot: string
1050
  @param iallocator_slot: the name of the opcode iallocator slot
1051
  @type node_slot: string
1052
  @param node_slot: the name of the opcode target node slot
1053

1054
  """
1055
  node = getattr(lu.op, node_slot, None)
1056
  ialloc = getattr(lu.op, iallocator_slot, None)
1057
  if node == []:
1058
    node = None
1059

    
1060
  if node is not None and ialloc is not None:
1061
    raise errors.OpPrereqError("Do not specify both, iallocator and node",
1062
                               errors.ECODE_INVAL)
1063
  elif ((node is None and ialloc is None) or
1064
        ialloc == constants.DEFAULT_IALLOCATOR_SHORTCUT):
1065
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1066
    if default_iallocator:
1067
      setattr(lu.op, iallocator_slot, default_iallocator)
1068
    else:
1069
      raise errors.OpPrereqError("No iallocator or node given and no"
1070
                                 " cluster-wide default iallocator found;"
1071
                                 " please specify either an iallocator or a"
1072
                                 " node, or set a cluster-wide default"
1073
                                 " iallocator", errors.ECODE_INVAL)
1074

    
1075

    
1076
def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq):
1077
  faulty = []
1078

    
1079
  disks = cfg.GetInstanceDisks(instance)
1080
  result = rpc_runner.call_blockdev_getmirrorstatus(
1081
             node_uuid, (disks, instance))
1082
  result.Raise("Failed to get disk status from node %s" %
1083
               cfg.GetNodeName(node_uuid),
1084
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1085

    
1086
  for idx, bdev_status in enumerate(result.payload):
1087
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1088
      faulty.append(idx)
1089

    
1090
  return faulty
1091

    
1092

    
1093
def CheckNodeOnline(lu, node_uuid, msg=None):
1094
  """Ensure that a given node is online.
1095

1096
  @param lu: the LU on behalf of which we make the check
1097
  @param node_uuid: the node to check
1098
  @param msg: if passed, should be a message to replace the default one
1099
  @raise errors.OpPrereqError: if the node is offline
1100

1101
  """
1102
  if msg is None:
1103
    msg = "Can't use offline node"
1104
  if lu.cfg.GetNodeInfo(node_uuid).offline:
1105
    raise errors.OpPrereqError("%s: %s" % (msg, lu.cfg.GetNodeName(node_uuid)),
1106
                               errors.ECODE_STATE)
1107

    
1108

    
1109
def CheckDiskTemplateEnabled(cluster, disk_template):
1110
  """Helper function to check if a disk template is enabled.
1111

1112
  @type cluster: C{objects.Cluster}
1113
  @param cluster: the cluster's configuration
1114
  @type disk_template: str
1115
  @param disk_template: the disk template to be checked
1116

1117
  """
1118
  assert disk_template is not None
1119
  if disk_template not in constants.DISK_TEMPLATES:
1120
    raise errors.OpPrereqError("'%s' is not a valid disk template."
1121
                               " Valid disk templates are: %s" %
1122
                               (disk_template,
1123
                                ",".join(constants.DISK_TEMPLATES)))
1124
  if not disk_template in cluster.enabled_disk_templates:
1125
    raise errors.OpPrereqError("Disk template '%s' is not enabled in cluster."
1126
                               " Enabled disk templates are: %s" %
1127
                               (disk_template,
1128
                                ",".join(cluster.enabled_disk_templates)))
1129

    
1130

    
1131
def CheckStorageTypeEnabled(cluster, storage_type):
1132
  """Helper function to check if a storage type is enabled.
1133

1134
  @type cluster: C{objects.Cluster}
1135
  @param cluster: the cluster's configuration
1136
  @type storage_type: str
1137
  @param storage_type: the storage type to be checked
1138

1139
  """
1140
  assert storage_type is not None
1141
  assert storage_type in constants.STORAGE_TYPES
1142
  # special case for lvm-pv, because it cannot be enabled
1143
  # via disk templates
1144
  if storage_type == constants.ST_LVM_PV:
1145
    CheckStorageTypeEnabled(cluster, constants.ST_LVM_VG)
1146
  else:
1147
    possible_disk_templates = \
1148
        utils.storage.GetDiskTemplatesOfStorageTypes(storage_type)
1149
    for disk_template in possible_disk_templates:
1150
      if disk_template in cluster.enabled_disk_templates:
1151
        return
1152
    raise errors.OpPrereqError("No disk template of storage type '%s' is"
1153
                               " enabled in this cluster. Enabled disk"
1154
                               " templates are: %s" % (storage_type,
1155
                               ",".join(cluster.enabled_disk_templates)))
1156

    
1157

    
1158
def CheckIpolicyVsDiskTemplates(ipolicy, enabled_disk_templates):
1159
  """Checks ipolicy disk templates against enabled disk tempaltes.
1160

1161
  @type ipolicy: dict
1162
  @param ipolicy: the new ipolicy
1163
  @type enabled_disk_templates: list of string
1164
  @param enabled_disk_templates: list of enabled disk templates on the
1165
    cluster
1166
  @raises errors.OpPrereqError: if there is at least one allowed disk
1167
    template that is not also enabled.
1168

1169
  """
1170
  assert constants.IPOLICY_DTS in ipolicy
1171
  allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
1172
  not_enabled = set(allowed_disk_templates) - set(enabled_disk_templates)
1173
  if not_enabled:
1174
    raise errors.OpPrereqError("The following disk template are allowed"
1175
                               " by the ipolicy, but not enabled on the"
1176
                               " cluster: %s" % utils.CommaJoin(not_enabled))
1177

    
1178

    
1179
def CheckDiskAccessModeValidity(parameters):
1180
  """Checks if the access parameter is legal.
1181

1182
  @see: L{CheckDiskAccessModeConsistency} for cluster consistency checks.
1183
  @raise errors.OpPrereqError: if the check fails.
1184

1185
  """
1186
  for disk_template in parameters:
1187
    access = parameters[disk_template].get(constants.LDP_ACCESS,
1188
                                           constants.DISK_KERNELSPACE)
1189
    if access not in constants.DISK_VALID_ACCESS_MODES:
1190
      valid_vals_str = utils.CommaJoin(constants.DISK_VALID_ACCESS_MODES)
1191
      raise errors.OpPrereqError("Invalid value of '{d}:{a}': '{v}' (expected"
1192
                                 " one of {o})".format(d=disk_template,
1193
                                                       a=constants.LDP_ACCESS,
1194
                                                       v=access,
1195
                                                       o=valid_vals_str))
1196

    
1197

    
1198
def CheckDiskAccessModeConsistency(parameters, cfg, group=None):
1199
  """Checks if the access param is consistent with the cluster configuration.
1200

1201
  @note: requires a configuration lock to run.
1202
  @param parameters: the parameters to validate
1203
  @param cfg: the cfg object of the cluster
1204
  @param group: if set, only check for consistency within this group.
1205
  @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1206
                               to an invalid value, such as "pink bunny".
1207
  @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1208
                               to an inconsistent value, such as asking for RBD
1209
                               userspace access to the chroot hypervisor.
1210

1211
  """
1212
  CheckDiskAccessModeValidity(parameters)
1213

    
1214
  for disk_template in parameters:
1215
    access = parameters[disk_template].get(constants.LDP_ACCESS,
1216
                                           constants.DISK_KERNELSPACE)
1217

    
1218
    if disk_template not in constants.DTS_HAVE_ACCESS:
1219
      continue
1220

    
1221
    #Check the combination of instance hypervisor, disk template and access
1222
    #protocol is sane.
1223
    inst_uuids = cfg.GetNodeGroupInstances(group) if group else \
1224
                 cfg.GetInstanceList()
1225

    
1226
    for entry in inst_uuids:
1227
      inst = cfg.GetInstanceInfo(entry)
1228
      hv = inst.hypervisor
1229
      dt = inst.disk_template
1230

    
1231
      if not IsValidDiskAccessModeCombination(hv, dt, access):
1232
        raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access"
1233
                                   " setting with {h} hypervisor and {d} disk"
1234
                                   " type.".format(i=inst.name,
1235
                                                   a=access,
1236
                                                   h=hv,
1237
                                                   d=dt))
1238

    
1239

    
1240
def IsValidDiskAccessModeCombination(hv, disk_template, mode):
1241
  """Checks if an hypervisor can read a disk template with given mode.
1242

1243
  @param hv: the hypervisor that will access the data
1244
  @param disk_template: the disk template the data is stored as
1245
  @param mode: how the hypervisor should access the data
1246
  @return: True if the hypervisor can read a given read disk_template
1247
           in the specified mode.
1248

1249
  """
1250
  if mode == constants.DISK_KERNELSPACE:
1251
    return True
1252

    
1253
  if (hv == constants.HT_KVM and
1254
      disk_template in (constants.DT_RBD, constants.DT_GLUSTER) and
1255
      mode == constants.DISK_USERSPACE):
1256
    return True
1257

    
1258
  # Everything else:
1259
  return False
1260

    
1261

    
1262
def AddNodeCertToCandidateCerts(lu, cfg, node_uuid):
1263
  """Add the node's client SSL certificate digest to the candidate certs.
1264

1265
  @type lu: L{LogicalUnit}
1266
  @param lu: the logical unit
1267
  @type cfg: L{ConfigWriter}
1268
  @param cfg: the configuration client to use
1269
  @type node_uuid: string
1270
  @param node_uuid: the node's UUID
1271

1272
  """
1273
  result = lu.rpc.call_node_crypto_tokens(
1274
             node_uuid,
1275
             [(constants.CRYPTO_TYPE_SSL_DIGEST, constants.CRYPTO_ACTION_GET,
1276
               None)])
1277
  result.Raise("Could not retrieve the node's (uuid %s) SSL digest."
1278
               % node_uuid)
1279
  ((crypto_type, digest), ) = result.payload
1280
  assert crypto_type == constants.CRYPTO_TYPE_SSL_DIGEST
1281

    
1282
  cfg.AddNodeToCandidateCerts(node_uuid, digest)
1283

    
1284

    
1285
def RemoveNodeCertFromCandidateCerts(cfg, node_uuid):
1286
  """Removes the node's certificate from the candidate certificates list.
1287

1288
  @type cfg: C{config.ConfigWriter}
1289
  @param cfg: the cluster's configuration
1290
  @type node_uuid: string
1291
  @param node_uuid: the node's UUID
1292

1293
  """
1294
  cfg.RemoveNodeFromCandidateCerts(node_uuid)
1295

    
1296

    
1297
def CreateNewClientCert(lu, node_uuid, filename=None):
1298
  """Creates a new client SSL certificate for the node.
1299

1300
  @type node_uuid: string
1301
  @param node_uuid: the node's UUID
1302
  @type filename: string
1303
  @param filename: the certificate's filename
1304
  @rtype: string
1305
  @return: the digest of the newly created certificate
1306

1307
  """
1308
  options = {}
1309
  if filename:
1310
    options[constants.CRYPTO_OPTION_CERT_FILE] = filename
1311
  options[constants.CRYPTO_OPTION_SERIAL_NO] = utils.UuidToInt(node_uuid)
1312
  result = lu.rpc.call_node_crypto_tokens(
1313
             node_uuid,
1314
             [(constants.CRYPTO_TYPE_SSL_DIGEST,
1315
               constants.CRYPTO_ACTION_CREATE,
1316
               options)])
1317
  result.Raise("Could not create the node's (uuid %s) SSL client"
1318
               " certificate." % node_uuid)
1319
  ((crypto_type, new_digest), ) = result.payload
1320
  assert crypto_type == constants.CRYPTO_TYPE_SSL_DIGEST
1321
  return new_digest
1322

    
1323

    
1324
def AddInstanceCommunicationNetworkOp(network):
1325
  """Create an OpCode that adds the instance communication network.
1326

1327
  This OpCode contains the configuration necessary for the instance
1328
  communication network.
1329

1330
  @type network: string
1331
  @param network: name or UUID of the instance communication network
1332

1333
  @rtype: L{ganeti.opcodes.OpCode}
1334
  @return: OpCode that creates the instance communication network
1335

1336
  """
1337
  return opcodes.OpNetworkAdd(
1338
    network_name=network,
1339
    gateway=None,
1340
    network=constants.INSTANCE_COMMUNICATION_NETWORK4,
1341
    gateway6=None,
1342
    network6=constants.INSTANCE_COMMUNICATION_NETWORK6,
1343
    mac_prefix=constants.INSTANCE_COMMUNICATION_MAC_PREFIX,
1344
    add_reserved_ips=None,
1345
    conflicts_check=True,
1346
    tags=[])
1347

    
1348

    
1349
def ConnectInstanceCommunicationNetworkOp(group_uuid, network):
1350
  """Create an OpCode that connects a group to the instance
1351
  communication network.
1352

1353
  This OpCode contains the configuration necessary for the instance
1354
  communication network.
1355

1356
  @type group_uuid: string
1357
  @param group_uuid: UUID of the group to connect
1358

1359
  @type network: string
1360
  @param network: name or UUID of the network to connect to, i.e., the
1361
                  instance communication network
1362

1363
  @rtype: L{ganeti.opcodes.OpCode}
1364
  @return: OpCode that connects the group to the instance
1365
           communication network
1366

1367
  """
1368
  return opcodes.OpNetworkConnect(
1369
    group_name=group_uuid,
1370
    network_name=network,
1371
    network_mode=constants.INSTANCE_COMMUNICATION_NETWORK_MODE,
1372
    network_link=constants.INSTANCE_COMMUNICATION_NETWORK_LINK,
1373
    conflicts_check=True)