Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_utils.py @ f665d9de

History | View | Annotate | Download (21.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Utility function mainly, but not only used by instance LU's."""
23

    
24
import logging
25
import os
26

    
27
from ganeti import constants
28
from ganeti import errors
29
from ganeti import locking
30
from ganeti import network
31
from ganeti import objects
32
from ganeti import pathutils
33
from ganeti import utils
34
from ganeti.cmdlib.common import AnnotateDiskParams, \
35
  ComputeIPolicyInstanceViolation, CheckDiskTemplateEnabled
36

    
37

    
38
def BuildInstanceHookEnv(name, primary_node_name, secondary_node_names, os_type,
39
                         status, minmem, maxmem, vcpus, nics, disk_template,
40
                         disks, bep, hvp, hypervisor_name, tags):
41
  """Builds instance related env variables for hooks
42

43
  This builds the hook environment from individual variables.
44

45
  @type name: string
46
  @param name: the name of the instance
47
  @type primary_node_name: string
48
  @param primary_node_name: the name of the instance's primary node
49
  @type secondary_node_names: list
50
  @param secondary_node_names: list of secondary nodes as strings
51
  @type os_type: string
52
  @param os_type: the name of the instance's OS
53
  @type status: string
54
  @param status: the desired status of the instance
55
  @type minmem: string
56
  @param minmem: the minimum memory size of the instance
57
  @type maxmem: string
58
  @param maxmem: the maximum memory size of the instance
59
  @type vcpus: string
60
  @param vcpus: the count of VCPUs the instance has
61
  @type nics: list
62
  @param nics: list of tuples (name, uuid, ip, mac, mode, link, vlan, net,
63
      netinfo) representing the NICs the instance has
64
  @type disk_template: string
65
  @param disk_template: the disk template of the instance
66
  @type disks: list
67
  @param disks: list of tuples (name, uuid, size, mode)
68
  @type bep: dict
69
  @param bep: the backend parameters for the instance
70
  @type hvp: dict
71
  @param hvp: the hypervisor parameters for the instance
72
  @type hypervisor_name: string
73
  @param hypervisor_name: the hypervisor for the instance
74
  @type tags: list
75
  @param tags: list of instance tags as strings
76
  @rtype: dict
77
  @return: the hook environment for this instance
78

79
  """
80
  env = {
81
    "OP_TARGET": name,
82
    "INSTANCE_NAME": name,
83
    "INSTANCE_PRIMARY": primary_node_name,
84
    "INSTANCE_SECONDARIES": " ".join(secondary_node_names),
85
    "INSTANCE_OS_TYPE": os_type,
86
    "INSTANCE_STATUS": status,
87
    "INSTANCE_MINMEM": minmem,
88
    "INSTANCE_MAXMEM": maxmem,
89
    # TODO(2.9) remove deprecated "memory" value
90
    "INSTANCE_MEMORY": maxmem,
91
    "INSTANCE_VCPUS": vcpus,
92
    "INSTANCE_DISK_TEMPLATE": disk_template,
93
    "INSTANCE_HYPERVISOR": hypervisor_name,
94
    }
95
  if nics:
96
    nic_count = len(nics)
97
    for idx, (name, uuid, ip, mac, mode, link, vlan, net, netinfo) \
98
        in enumerate(nics):
99
      if ip is None:
100
        ip = ""
101
      if name:
102
        env["INSTANCE_NIC%d_NAME" % idx] = name
103
      env["INSTANCE_NIC%d_UUID" % idx] = uuid
104
      env["INSTANCE_NIC%d_IP" % idx] = ip
105
      env["INSTANCE_NIC%d_MAC" % idx] = mac
106
      env["INSTANCE_NIC%d_MODE" % idx] = mode
107
      env["INSTANCE_NIC%d_LINK" % idx] = link
108
      env["INSTANCE_NIC%d_VLAN" % idx] = vlan
109
      if netinfo:
110
        nobj = objects.Network.FromDict(netinfo)
111
        env.update(nobj.HooksDict("INSTANCE_NIC%d_" % idx))
112
      elif network:
113
        # FIXME: broken network reference: the instance NIC specifies a
114
        # network, but the relevant network entry was not in the config. This
115
        # should be made impossible.
116
        env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net
117
      if mode == constants.NIC_MODE_BRIDGED or \
118
         mode == constants.NIC_MODE_OVS:
119
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
120
  else:
121
    nic_count = 0
122

    
123
  env["INSTANCE_NIC_COUNT"] = nic_count
124

    
125
  if disks:
126
    disk_count = len(disks)
127
    for idx, (name, uuid, size, mode, info) in enumerate(disks):
128
      if name:
129
        env["INSTANCE_DISK%d_NAME" % idx] = name
130
      env["INSTANCE_DISK%d_UUID" % idx] = uuid
131
      env["INSTANCE_DISK%d_SIZE" % idx] = size
132
      env["INSTANCE_DISK%d_MODE" % idx] = mode
133
      env.update(info)
134
  else:
135
    disk_count = 0
136

    
137
  env["INSTANCE_DISK_COUNT"] = disk_count
138

    
139
  if not tags:
140
    tags = []
141

    
142
  env["INSTANCE_TAGS"] = " ".join(tags)
143

    
144
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
145
    for key, value in source.items():
146
      env["INSTANCE_%s_%s" % (kind, key)] = value
147

    
148
  return env
149

    
150

    
151
def BuildInstanceHookEnvByObject(lu, instance, override=None):
152
  """Builds instance related env variables for hooks from an object.
153

154
  @type lu: L{LogicalUnit}
155
  @param lu: the logical unit on whose behalf we execute
156
  @type instance: L{objects.Instance}
157
  @param instance: the instance for which we should build the
158
      environment
159
  @type override: dict
160
  @param override: dictionary with key/values that will override
161
      our values
162
  @rtype: dict
163
  @return: the hook environment dictionary
164

165
  """
166
  cluster = lu.cfg.GetClusterInfo()
167
  bep = cluster.FillBE(instance)
168
  hvp = cluster.FillHV(instance)
169
  args = {
170
    "name": instance.name,
171
    "primary_node_name": lu.cfg.GetNodeName(instance.primary_node),
172
    "secondary_node_names": lu.cfg.GetNodeNames(instance.secondary_nodes),
173
    "os_type": instance.os,
174
    "status": instance.admin_state,
175
    "maxmem": bep[constants.BE_MAXMEM],
176
    "minmem": bep[constants.BE_MINMEM],
177
    "vcpus": bep[constants.BE_VCPUS],
178
    "nics": NICListToTuple(lu, instance.nics),
179
    "disk_template": instance.disk_template,
180
    "disks": [(disk.name, disk.uuid, disk.size, disk.mode,
181
               BuildDiskLogicalIDEnv(instance.disk_template, idx, disk))
182
              for idx, disk in enumerate(instance.disks)],
183
    "bep": bep,
184
    "hvp": hvp,
185
    "hypervisor_name": instance.hypervisor,
186
    "tags": instance.tags,
187
  }
188
  if override:
189
    args.update(override)
190
  return BuildInstanceHookEnv(**args) # pylint: disable=W0142
191

    
192

    
193
def GetClusterDomainSecret():
194
  """Reads the cluster domain secret.
195

196
  """
197
  return utils.ReadOneLineFile(pathutils.CLUSTER_DOMAIN_SECRET_FILE,
198
                               strict=True)
199

    
200

    
201
def CheckNodeNotDrained(lu, node_uuid):
202
  """Ensure that a given node is not drained.
203

204
  @param lu: the LU on behalf of which we make the check
205
  @param node_uuid: the node to check
206
  @raise errors.OpPrereqError: if the node is drained
207

208
  """
209
  node = lu.cfg.GetNodeInfo(node_uuid)
210
  if node.drained:
211
    raise errors.OpPrereqError("Can't use drained node %s" % node.name,
212
                               errors.ECODE_STATE)
213

    
214

    
215
def CheckNodeVmCapable(lu, node_uuid):
216
  """Ensure that a given node is vm capable.
217

218
  @param lu: the LU on behalf of which we make the check
219
  @param node_uuid: the node to check
220
  @raise errors.OpPrereqError: if the node is not vm capable
221

222
  """
223
  if not lu.cfg.GetNodeInfo(node_uuid).vm_capable:
224
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node_uuid,
225
                               errors.ECODE_STATE)
226

    
227

    
228
def RemoveInstance(lu, feedback_fn, instance, ignore_failures):
229
  """Utility function to remove an instance.
230

231
  """
232
  logging.info("Removing block devices for instance %s", instance.name)
233

    
234
  if not RemoveDisks(lu, instance, ignore_failures=ignore_failures):
235
    if not ignore_failures:
236
      raise errors.OpExecError("Can't remove instance's disks")
237
    feedback_fn("Warning: can't remove instance's disks")
238

    
239
  logging.info("Removing instance %s out of cluster config", instance.name)
240

    
241
  lu.cfg.RemoveInstance(instance.uuid)
242

    
243
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
244
    "Instance lock removal conflict"
245

    
246
  # Remove lock for the instance
247
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
248

    
249

    
250
def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False):
251
  """Remove all disks for an instance.
252

253
  This abstracts away some work from `AddInstance()` and
254
  `RemoveInstance()`. Note that in case some of the devices couldn't
255
  be removed, the removal will continue with the other ones.
256

257
  @type lu: L{LogicalUnit}
258
  @param lu: the logical unit on whose behalf we execute
259
  @type instance: L{objects.Instance}
260
  @param instance: the instance whose disks we should remove
261
  @type target_node_uuid: string
262
  @param target_node_uuid: used to override the node on which to remove the
263
          disks
264
  @rtype: boolean
265
  @return: the success of the removal
266

267
  """
268
  logging.info("Removing block devices for instance %s", instance.name)
269

    
270
  all_result = True
271
  ports_to_release = set()
272
  anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg)
273
  for (idx, device) in enumerate(anno_disks):
274
    if target_node_uuid:
275
      edata = [(target_node_uuid, device)]
276
    else:
277
      edata = device.ComputeNodeTree(instance.primary_node)
278
    for node_uuid, disk in edata:
279
      if getattr(lu.op, "keep_disks", None):
280
        if lu.op.keep_disks and disk.dev_type is constants.DT_EXT:
281
          continue
282
      result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
283
      if result.fail_msg:
284
        lu.LogWarning("Could not remove disk %s on node %s,"
285
                      " continuing anyway: %s", idx,
286
                      lu.cfg.GetNodeName(node_uuid), result.fail_msg)
287
        if not (result.offline and node_uuid != instance.primary_node):
288
          all_result = False
289

    
290
    # if this is a DRBD disk, return its port to the pool
291
    if device.dev_type in constants.DTS_DRBD:
292
      ports_to_release.add(device.logical_id[2])
293

    
294
  if all_result or ignore_failures:
295
    for port in ports_to_release:
296
      lu.cfg.AddTcpUdpPort(port)
297

    
298
  CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
299

    
300
  if instance.disk_template in constants.DTS_FILEBASED:
301
    if len(instance.disks) > 0:
302
      file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
303
    else:
304
      if instance.disk_template == constants.DT_SHARED_FILE:
305
        file_storage_dir = utils.PathJoin(lu.cfg.GetSharedFileStorageDir(),
306
                                          instance.name)
307
      else:
308
        file_storage_dir = utils.PathJoin(lu.cfg.GetFileStorageDir(),
309
                                          instance.name)
310
    if target_node_uuid:
311
      tgt = target_node_uuid
312
    else:
313
      tgt = instance.primary_node
314
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
315
    if result.fail_msg:
316
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
317
                    file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg)
318
      all_result = False
319

    
320
  return all_result
321

    
322

    
323
def NICToTuple(lu, nic):
324
  """Build a tupple of nic information.
325

326
  @type lu:  L{LogicalUnit}
327
  @param lu: the logical unit on whose behalf we execute
328
  @type nic: L{objects.NIC}
329
  @param nic: nic to convert to hooks tuple
330

331
  """
332
  cluster = lu.cfg.GetClusterInfo()
333
  filled_params = cluster.SimpleFillNIC(nic.nicparams)
334
  mode = filled_params[constants.NIC_MODE]
335
  link = filled_params[constants.NIC_LINK]
336
  vlan = filled_params[constants.NIC_VLAN]
337
  netinfo = None
338
  if nic.network:
339
    nobj = lu.cfg.GetNetwork(nic.network)
340
    netinfo = objects.Network.ToDict(nobj)
341
  return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, vlan,
342
          nic.network, netinfo)
343

    
344

    
345
def NICListToTuple(lu, nics):
346
  """Build a list of nic information tuples.
347

348
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
349
  value in LUInstanceQueryData.
350

351
  @type lu:  L{LogicalUnit}
352
  @param lu: the logical unit on whose behalf we execute
353
  @type nics: list of L{objects.NIC}
354
  @param nics: list of nics to convert to hooks tuples
355

356
  """
357
  hooks_nics = []
358
  for nic in nics:
359
    hooks_nics.append(NICToTuple(lu, nic))
360
  return hooks_nics
361

    
362

    
363
def CopyLockList(names):
364
  """Makes a copy of a list of lock names.
365

366
  Handles L{locking.ALL_SET} correctly.
367

368
  """
369
  if names == locking.ALL_SET:
370
    return locking.ALL_SET
371
  else:
372
    return names[:]
373

    
374

    
375
def ReleaseLocks(lu, level, names=None, keep=None):
376
  """Releases locks owned by an LU.
377

378
  @type lu: L{LogicalUnit}
379
  @param level: Lock level
380
  @type names: list or None
381
  @param names: Names of locks to release
382
  @type keep: list or None
383
  @param keep: Names of locks to retain
384

385
  """
386
  assert not (keep is not None and names is not None), \
387
         "Only one of the 'names' and the 'keep' parameters can be given"
388

    
389
  if names is not None:
390
    should_release = names.__contains__
391
  elif keep:
392
    should_release = lambda name: name not in keep
393
  else:
394
    should_release = None
395

    
396
  owned = lu.owned_locks(level)
397
  if not owned:
398
    # Not owning any lock at this level, do nothing
399
    pass
400

    
401
  elif should_release:
402
    retain = []
403
    release = []
404

    
405
    # Determine which locks to release
406
    for name in owned:
407
      if should_release(name):
408
        release.append(name)
409
      else:
410
        retain.append(name)
411

    
412
    assert len(lu.owned_locks(level)) == (len(retain) + len(release))
413

    
414
    # Release just some locks
415
    lu.glm.release(level, names=release)
416

    
417
    assert frozenset(lu.owned_locks(level)) == frozenset(retain)
418
  else:
419
    # Release everything
420
    lu.glm.release(level)
421

    
422
    assert not lu.glm.is_owned(level), "No locks should be owned"
423

    
424

    
425
def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
426
                                 target_group, cfg,
427
                                 _compute_fn=ComputeIPolicyInstanceViolation):
428
  """Compute if instance meets the specs of the new target group.
429

430
  @param ipolicy: The ipolicy to verify
431
  @param instance: The instance object to verify
432
  @param current_group: The current group of the instance
433
  @param target_group: The new group of the instance
434
  @type cfg: L{config.ConfigWriter}
435
  @param cfg: Cluster configuration
436
  @param _compute_fn: The function to verify ipolicy (unittest only)
437
  @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
438

439
  """
440
  if current_group == target_group:
441
    return []
442
  else:
443
    return _compute_fn(ipolicy, instance, cfg)
444

    
445

    
446
def CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
447
                           _compute_fn=_ComputeIPolicyNodeViolation):
448
  """Checks that the target node is correct in terms of instance policy.
449

450
  @param ipolicy: The ipolicy to verify
451
  @param instance: The instance object to verify
452
  @param node: The new node to relocate
453
  @type cfg: L{config.ConfigWriter}
454
  @param cfg: Cluster configuration
455
  @param ignore: Ignore violations of the ipolicy
456
  @param _compute_fn: The function to verify ipolicy (unittest only)
457
  @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
458

459
  """
460
  primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
461
  res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg)
462

    
463
  if res:
464
    msg = ("Instance does not meet target node group's (%s) instance"
465
           " policy: %s") % (node.group, utils.CommaJoin(res))
466
    if ignore:
467
      lu.LogWarning(msg)
468
    else:
469
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
470

    
471

    
472
def GetInstanceInfoText(instance):
473
  """Compute that text that should be added to the disk's metadata.
474

475
  """
476
  return "originstname+%s" % instance.name
477

    
478

    
479
def CheckNodeFreeMemory(lu, node_uuid, reason, requested, hvname, hvparams):
480
  """Checks if a node has enough free memory.
481

482
  This function checks if a given node has the needed amount of free
483
  memory. In case the node has less memory or we cannot get the
484
  information from the node, this function raises an OpPrereqError
485
  exception.
486

487
  @type lu: C{LogicalUnit}
488
  @param lu: a logical unit from which we get configuration data
489
  @type node_uuid: C{str}
490
  @param node_uuid: the node to check
491
  @type reason: C{str}
492
  @param reason: string to use in the error message
493
  @type requested: C{int}
494
  @param requested: the amount of memory in MiB to check for
495
  @type hvname: string
496
  @param hvname: the hypervisor's name
497
  @type hvparams: dict of strings
498
  @param hvparams: the hypervisor's parameters
499
  @rtype: integer
500
  @return: node current free memory
501
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
502
      we cannot check the node
503

504
  """
505
  node_name = lu.cfg.GetNodeName(node_uuid)
506
  nodeinfo = lu.rpc.call_node_info([node_uuid], None, [(hvname, hvparams)])
507
  nodeinfo[node_uuid].Raise("Can't get data from node %s" % node_name,
508
                            prereq=True, ecode=errors.ECODE_ENVIRON)
509
  (_, _, (hv_info, )) = nodeinfo[node_uuid].payload
510

    
511
  free_mem = hv_info.get("memory_free", None)
512
  if not isinstance(free_mem, int):
513
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
514
                               " was '%s'" % (node_name, free_mem),
515
                               errors.ECODE_ENVIRON)
516
  if requested > free_mem:
517
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
518
                               " needed %s MiB, available %s MiB" %
519
                               (node_name, reason, requested, free_mem),
520
                               errors.ECODE_NORES)
521
  return free_mem
522

    
523

    
524
def CheckInstanceBridgesExist(lu, instance, node_uuid=None):
525
  """Check that the brigdes needed by an instance exist.
526

527
  """
528
  if node_uuid is None:
529
    node_uuid = instance.primary_node
530
  CheckNicsBridgesExist(lu, instance.nics, node_uuid)
531

    
532

    
533
def CheckNicsBridgesExist(lu, nics, node_uuid):
534
  """Check that the brigdes needed by a list of nics exist.
535

536
  """
537
  cluster = lu.cfg.GetClusterInfo()
538
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in nics]
539
  brlist = [params[constants.NIC_LINK] for params in paramslist
540
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
541
  if brlist:
542
    result = lu.rpc.call_bridges_exist(node_uuid, brlist)
543
    result.Raise("Error checking bridges on destination node '%s'" %
544
                 lu.cfg.GetNodeName(node_uuid), prereq=True,
545
                 ecode=errors.ECODE_ENVIRON)
546

    
547

    
548
def CheckNodeHasOS(lu, node_uuid, os_name, force_variant):
549
  """Ensure that a node supports a given OS.
550

551
  @param lu: the LU on behalf of which we make the check
552
  @param node_uuid: the node to check
553
  @param os_name: the OS to query about
554
  @param force_variant: whether to ignore variant errors
555
  @raise errors.OpPrereqError: if the node is not supporting the OS
556

557
  """
558
  result = lu.rpc.call_os_get(node_uuid, os_name)
559
  result.Raise("OS '%s' not in supported OS list for node %s" %
560
               (os_name, lu.cfg.GetNodeName(node_uuid)),
561
               prereq=True, ecode=errors.ECODE_INVAL)
562
  if not force_variant:
563
    _CheckOSVariant(result.payload, os_name)
564

    
565

    
566
def _CheckOSVariant(os_obj, name):
567
  """Check whether an OS name conforms to the os variants specification.
568

569
  @type os_obj: L{objects.OS}
570
  @param os_obj: OS object to check
571
  @type name: string
572
  @param name: OS name passed by the user, to check for validity
573

574
  """
575
  variant = objects.OS.GetVariant(name)
576
  if not os_obj.supported_variants:
577
    if variant:
578
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
579
                                 " passed)" % (os_obj.name, variant),
580
                                 errors.ECODE_INVAL)
581
    return
582
  if not variant:
583
    raise errors.OpPrereqError("OS name must include a variant",
584
                               errors.ECODE_INVAL)
585

    
586
  if variant not in os_obj.supported_variants:
587
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
588

    
589

    
590
def BuildDiskLogicalIDEnv(template_name, idx, disk):
591
  if template_name == constants.DT_PLAIN:
592
    vg, name = disk.logical_id
593
    ret = {
594
      "INSTANCE_DISK%d_VG" % idx: vg,
595
      "INSTANCE_DISK%d_ID" % idx: name
596
      }
597
  elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
598
    file_driver, name = disk.logical_id
599
    ret = {
600
      "INSTANCE_DISK%d_DRIVER" % idx: file_driver,
601
      "INSTANCE_DISK%d_ID" % idx: name
602
      }
603
  elif template_name == constants.DT_BLOCK:
604
    block_driver, adopt = disk.logical_id
605
    ret = {
606
      "INSTANCE_DISK%d_DRIVER" % idx: block_driver,
607
      "INSTANCE_DISK%d_ID" % idx: adopt
608
      }
609
  elif template_name == constants.DT_RBD:
610
    rbd, name = disk.logical_id
611
    ret = {
612
      "INSTANCE_DISK%d_DRIVER" % idx: rbd,
613
      "INSTANCE_DISK%d_ID" % idx: name
614
      }
615
  elif template_name == constants.DT_EXT:
616
    provider, name = disk.logical_id
617
    ret = {
618
      "INSTANCE_DISK%d_PROVIDER" % idx: provider,
619
      "INSTANCE_DISK%d_ID" % idx: name
620
      }
621
  elif template_name == constants.DT_DRBD8:
622
    pnode, snode, port, pmin, smin, _ = disk.logical_id
623
    data, meta = disk.children
624
    data_vg, data_name = data.logical_id
625
    meta_vg, meta_name = meta.logical_id
626
    ret = {
627
      "INSTANCE_DISK%d_PNODE" % idx: pnode,
628
      "INSTANCE_DISK%d_SNODE" % idx: snode,
629
      "INSTANCE_DISK%d_PORT" % idx: port,
630
      "INSTANCE_DISK%d_PMINOR" % idx: pmin,
631
      "INSTANCE_DISK%d_SMINOR" % idx: smin,
632
      "INSTANCE_DISK%d_DATA_VG" % idx: data_vg,
633
      "INSTANCE_DISK%d_DATA_ID" % idx: data_name,
634
      "INSTANCE_DISK%d_META_VG" % idx: meta_vg,
635
      "INSTANCE_DISK%d_META_ID" % idx: meta_name,
636
      }
637
  elif template_name == constants.DT_DISKLESS:
638
    ret = {}
639

    
640
  ret.update({
641
    "INSTANCE_DISK%d_TEMPLATE_NAME" % idx: template_name
642
    })
643

    
644
  return ret