Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_utils.py @ 731624e6

History | View | Annotate | Download (21.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Utility function mainly, but not only used by instance LU's."""
23

    
24
import logging
25
import os
26

    
27
from ganeti import constants
28
from ganeti import errors
29
from ganeti import locking
30
from ganeti import network
31
from ganeti import objects
32
from ganeti import pathutils
33
from ganeti import utils
34
from ganeti.cmdlib.common import AnnotateDiskParams, \
35
  ComputeIPolicyInstanceViolation, CheckDiskTemplateEnabled
36

    
37

    
38
def BuildInstanceHookEnv(name, primary_node_name, secondary_node_names, os_type,
39
                         status, minmem, maxmem, vcpus, nics, disk_template,
40
                         disks, bep, hvp, hypervisor_name, tags):
41
  """Builds instance related env variables for hooks
42

43
  This builds the hook environment from individual variables.
44

45
  @type name: string
46
  @param name: the name of the instance
47
  @type primary_node_name: string
48
  @param primary_node_name: the name of the instance's primary node
49
  @type secondary_node_names: list
50
  @param secondary_node_names: list of secondary nodes as strings
51
  @type os_type: string
52
  @param os_type: the name of the instance's OS
53
  @type status: string
54
  @param status: the desired status of the instance
55
  @type minmem: string
56
  @param minmem: the minimum memory size of the instance
57
  @type maxmem: string
58
  @param maxmem: the maximum memory size of the instance
59
  @type vcpus: string
60
  @param vcpus: the count of VCPUs the instance has
61
  @type nics: list
62
  @param nics: list of tuples (name, uuid, ip, mac, mode, link, vlan, net,
63
      netinfo) representing the NICs the instance has
64
  @type disk_template: string
65
  @param disk_template: the disk template of the instance
66
  @type disks: list
67
  @param disks: list of tuples (name, uuid, size, mode)
68
  @type bep: dict
69
  @param bep: the backend parameters for the instance
70
  @type hvp: dict
71
  @param hvp: the hypervisor parameters for the instance
72
  @type hypervisor_name: string
73
  @param hypervisor_name: the hypervisor for the instance
74
  @type tags: list
75
  @param tags: list of instance tags as strings
76
  @rtype: dict
77
  @return: the hook environment for this instance
78

79
  """
80
  env = {
81
    "OP_TARGET": name,
82
    "INSTANCE_NAME": name,
83
    "INSTANCE_PRIMARY": primary_node_name,
84
    "INSTANCE_SECONDARIES": " ".join(secondary_node_names),
85
    "INSTANCE_OS_TYPE": os_type,
86
    "INSTANCE_STATUS": status,
87
    "INSTANCE_MINMEM": minmem,
88
    "INSTANCE_MAXMEM": maxmem,
89
    # TODO(2.9) remove deprecated "memory" value
90
    "INSTANCE_MEMORY": maxmem,
91
    "INSTANCE_VCPUS": vcpus,
92
    "INSTANCE_DISK_TEMPLATE": disk_template,
93
    "INSTANCE_HYPERVISOR": hypervisor_name,
94
    }
95
  if nics:
96
    nic_count = len(nics)
97
    for idx, (name, uuid, ip, mac, mode, link, vlan, net, netinfo) \
98
        in enumerate(nics):
99
      if ip is None:
100
        ip = ""
101
      if name:
102
        env["INSTANCE_NIC%d_NAME" % idx] = name
103
      env["INSTANCE_NIC%d_UUID" % idx] = uuid
104
      env["INSTANCE_NIC%d_IP" % idx] = ip
105
      env["INSTANCE_NIC%d_MAC" % idx] = mac
106
      env["INSTANCE_NIC%d_MODE" % idx] = mode
107
      env["INSTANCE_NIC%d_LINK" % idx] = link
108
      env["INSTANCE_NIC%d_VLAN" % idx] = vlan
109
      if netinfo:
110
        nobj = objects.Network.FromDict(netinfo)
111
        env.update(nobj.HooksDict("INSTANCE_NIC%d_" % idx))
112
      elif network:
113
        # FIXME: broken network reference: the instance NIC specifies a
114
        # network, but the relevant network entry was not in the config. This
115
        # should be made impossible.
116
        env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net
117
      if mode == constants.NIC_MODE_BRIDGED or \
118
         mode == constants.NIC_MODE_OVS:
119
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
120
  else:
121
    nic_count = 0
122

    
123
  env["INSTANCE_NIC_COUNT"] = nic_count
124

    
125
  if disks:
126
    disk_count = len(disks)
127
    for idx, (name, uuid, size, mode, info) in enumerate(disks):
128
      if name:
129
        env["INSTANCE_DISK%d_NAME" % idx] = name
130
      env["INSTANCE_DISK%d_UUID" % idx] = uuid
131
      env["INSTANCE_DISK%d_SIZE" % idx] = size
132
      env["INSTANCE_DISK%d_MODE" % idx] = mode
133
      env.update(info)
134
  else:
135
    disk_count = 0
136

    
137
  env["INSTANCE_DISK_COUNT"] = disk_count
138

    
139
  if not tags:
140
    tags = []
141

    
142
  env["INSTANCE_TAGS"] = " ".join(tags)
143

    
144
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
145
    for key, value in source.items():
146
      env["INSTANCE_%s_%s" % (kind, key)] = value
147

    
148
  return env
149

    
150

    
151
def BuildInstanceHookEnvByObject(lu, instance, override=None):
152
  """Builds instance related env variables for hooks from an object.
153

154
  @type lu: L{LogicalUnit}
155
  @param lu: the logical unit on whose behalf we execute
156
  @type instance: L{objects.Instance}
157
  @param instance: the instance for which we should build the
158
      environment
159
  @type override: dict
160
  @param override: dictionary with key/values that will override
161
      our values
162
  @rtype: dict
163
  @return: the hook environment dictionary
164

165
  """
166
  cluster = lu.cfg.GetClusterInfo()
167
  bep = cluster.FillBE(instance)
168
  hvp = cluster.FillHV(instance)
169
  args = {
170
    "name": instance.name,
171
    "primary_node_name": lu.cfg.GetNodeName(instance.primary_node),
172
    "secondary_node_names": lu.cfg.GetNodeNames(instance.secondary_nodes),
173
    "os_type": instance.os,
174
    "status": instance.admin_state,
175
    "maxmem": bep[constants.BE_MAXMEM],
176
    "minmem": bep[constants.BE_MINMEM],
177
    "vcpus": bep[constants.BE_VCPUS],
178
    "nics": NICListToTuple(lu, instance.nics),
179
    "disk_template": instance.disk_template,
180
    "disks": [(disk.name, disk.uuid, disk.size, disk.mode,
181
               BuildDiskLogicalIDEnv(instance.disk_template, idx, disk))
182
              for idx, disk in enumerate(instance.disks)],
183
    "bep": bep,
184
    "hvp": hvp,
185
    "hypervisor_name": instance.hypervisor,
186
    "tags": instance.tags,
187
  }
188
  if override:
189
    args.update(override)
190
  return BuildInstanceHookEnv(**args) # pylint: disable=W0142
191

    
192

    
193
def GetClusterDomainSecret():
194
  """Reads the cluster domain secret.
195

196
  """
197
  return utils.ReadOneLineFile(pathutils.CLUSTER_DOMAIN_SECRET_FILE,
198
                               strict=True)
199

    
200

    
201
def CheckNodeNotDrained(lu, node_uuid):
202
  """Ensure that a given node is not drained.
203

204
  @param lu: the LU on behalf of which we make the check
205
  @param node_uuid: the node to check
206
  @raise errors.OpPrereqError: if the node is drained
207

208
  """
209
  node = lu.cfg.GetNodeInfo(node_uuid)
210
  if node.drained:
211
    raise errors.OpPrereqError("Can't use drained node %s" % node.name,
212
                               errors.ECODE_STATE)
213

    
214

    
215
def CheckNodeVmCapable(lu, node_uuid):
216
  """Ensure that a given node is vm capable.
217

218
  @param lu: the LU on behalf of which we make the check
219
  @param node_uuid: the node to check
220
  @raise errors.OpPrereqError: if the node is not vm capable
221

222
  """
223
  if not lu.cfg.GetNodeInfo(node_uuid).vm_capable:
224
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node_uuid,
225
                               errors.ECODE_STATE)
226

    
227

    
228
def RemoveInstance(lu, feedback_fn, instance, ignore_failures):
229
  """Utility function to remove an instance.
230

231
  """
232
  logging.info("Removing block devices for instance %s", instance.name)
233

    
234
  if not RemoveDisks(lu, instance, ignore_failures=ignore_failures):
235
    if not ignore_failures:
236
      raise errors.OpExecError("Can't remove instance's disks")
237
    feedback_fn("Warning: can't remove instance's disks")
238

    
239
  logging.info("Removing instance %s out of cluster config", instance.name)
240

    
241
  lu.cfg.RemoveInstance(instance.uuid)
242

    
243
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
244
    "Instance lock removal conflict"
245

    
246
  # Remove lock for the instance
247
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
248

    
249

    
250
def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False):
251
  """Remove all disks for an instance.
252

253
  This abstracts away some work from `AddInstance()` and
254
  `RemoveInstance()`. Note that in case some of the devices couldn't
255
  be removed, the removal will continue with the other ones.
256

257
  @type lu: L{LogicalUnit}
258
  @param lu: the logical unit on whose behalf we execute
259
  @type instance: L{objects.Instance}
260
  @param instance: the instance whose disks we should remove
261
  @type target_node_uuid: string
262
  @param target_node_uuid: used to override the node on which to remove the
263
          disks
264
  @rtype: boolean
265
  @return: the success of the removal
266

267
  """
268
  logging.info("Removing block devices for instance %s", instance.name)
269

    
270
  all_result = True
271
  ports_to_release = set()
272
  anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg)
273
  for (idx, device) in enumerate(anno_disks):
274
    if target_node_uuid:
275
      edata = [(target_node_uuid, device)]
276
    else:
277
      edata = device.ComputeNodeTree(instance.primary_node)
278
    for node_uuid, disk in edata:
279
      result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
280
      if result.fail_msg:
281
        lu.LogWarning("Could not remove disk %s on node %s,"
282
                      " continuing anyway: %s", idx,
283
                      lu.cfg.GetNodeName(node_uuid), result.fail_msg)
284
        if not (result.offline and node_uuid != instance.primary_node):
285
          all_result = False
286

    
287
    # if this is a DRBD disk, return its port to the pool
288
    if device.dev_type in constants.DTS_DRBD:
289
      ports_to_release.add(device.logical_id[2])
290

    
291
  if all_result or ignore_failures:
292
    for port in ports_to_release:
293
      lu.cfg.AddTcpUdpPort(port)
294

    
295
  CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
296

    
297
  if instance.disk_template in constants.DTS_FILEBASED:
298
    if len(instance.disks) > 0:
299
      file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
300
    else:
301
      if instance.disk_template == constants.DT_SHARED_FILE:
302
        file_storage_dir = utils.PathJoin(lu.cfg.GetSharedFileStorageDir(),
303
                                          instance.name)
304
      else:
305
        file_storage_dir = utils.PathJoin(lu.cfg.GetFileStorageDir(),
306
                                          instance.name)
307
    if target_node_uuid:
308
      tgt = target_node_uuid
309
    else:
310
      tgt = instance.primary_node
311
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
312
    if result.fail_msg:
313
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
314
                    file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg)
315
      all_result = False
316

    
317
  return all_result
318

    
319

    
320
def NICToTuple(lu, nic):
321
  """Build a tupple of nic information.
322

323
  @type lu:  L{LogicalUnit}
324
  @param lu: the logical unit on whose behalf we execute
325
  @type nic: L{objects.NIC}
326
  @param nic: nic to convert to hooks tuple
327

328
  """
329
  cluster = lu.cfg.GetClusterInfo()
330
  filled_params = cluster.SimpleFillNIC(nic.nicparams)
331
  mode = filled_params[constants.NIC_MODE]
332
  link = filled_params[constants.NIC_LINK]
333
  vlan = filled_params[constants.NIC_VLAN]
334
  netinfo = None
335
  if nic.network:
336
    nobj = lu.cfg.GetNetwork(nic.network)
337
    netinfo = objects.Network.ToDict(nobj)
338
  return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, vlan,
339
          nic.network, netinfo)
340

    
341

    
342
def NICListToTuple(lu, nics):
343
  """Build a list of nic information tuples.
344

345
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
346
  value in LUInstanceQueryData.
347

348
  @type lu:  L{LogicalUnit}
349
  @param lu: the logical unit on whose behalf we execute
350
  @type nics: list of L{objects.NIC}
351
  @param nics: list of nics to convert to hooks tuples
352

353
  """
354
  hooks_nics = []
355
  for nic in nics:
356
    hooks_nics.append(NICToTuple(lu, nic))
357
  return hooks_nics
358

    
359

    
360
def CopyLockList(names):
361
  """Makes a copy of a list of lock names.
362

363
  Handles L{locking.ALL_SET} correctly.
364

365
  """
366
  if names == locking.ALL_SET:
367
    return locking.ALL_SET
368
  else:
369
    return names[:]
370

    
371

    
372
def ReleaseLocks(lu, level, names=None, keep=None):
373
  """Releases locks owned by an LU.
374

375
  @type lu: L{LogicalUnit}
376
  @param level: Lock level
377
  @type names: list or None
378
  @param names: Names of locks to release
379
  @type keep: list or None
380
  @param keep: Names of locks to retain
381

382
  """
383
  assert not (keep is not None and names is not None), \
384
         "Only one of the 'names' and the 'keep' parameters can be given"
385

    
386
  if names is not None:
387
    should_release = names.__contains__
388
  elif keep:
389
    should_release = lambda name: name not in keep
390
  else:
391
    should_release = None
392

    
393
  owned = lu.owned_locks(level)
394
  if not owned:
395
    # Not owning any lock at this level, do nothing
396
    pass
397

    
398
  elif should_release:
399
    retain = []
400
    release = []
401

    
402
    # Determine which locks to release
403
    for name in owned:
404
      if should_release(name):
405
        release.append(name)
406
      else:
407
        retain.append(name)
408

    
409
    assert len(lu.owned_locks(level)) == (len(retain) + len(release))
410

    
411
    # Release just some locks
412
    lu.glm.release(level, names=release)
413

    
414
    assert frozenset(lu.owned_locks(level)) == frozenset(retain)
415
  else:
416
    # Release everything
417
    lu.glm.release(level)
418

    
419
    assert not lu.glm.is_owned(level), "No locks should be owned"
420

    
421

    
422
def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
423
                                 target_group, cfg,
424
                                 _compute_fn=ComputeIPolicyInstanceViolation):
425
  """Compute if instance meets the specs of the new target group.
426

427
  @param ipolicy: The ipolicy to verify
428
  @param instance: The instance object to verify
429
  @param current_group: The current group of the instance
430
  @param target_group: The new group of the instance
431
  @type cfg: L{config.ConfigWriter}
432
  @param cfg: Cluster configuration
433
  @param _compute_fn: The function to verify ipolicy (unittest only)
434
  @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
435

436
  """
437
  if current_group == target_group:
438
    return []
439
  else:
440
    return _compute_fn(ipolicy, instance, cfg)
441

    
442

    
443
def CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
444
                           _compute_fn=_ComputeIPolicyNodeViolation):
445
  """Checks that the target node is correct in terms of instance policy.
446

447
  @param ipolicy: The ipolicy to verify
448
  @param instance: The instance object to verify
449
  @param node: The new node to relocate
450
  @type cfg: L{config.ConfigWriter}
451
  @param cfg: Cluster configuration
452
  @param ignore: Ignore violations of the ipolicy
453
  @param _compute_fn: The function to verify ipolicy (unittest only)
454
  @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
455

456
  """
457
  primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
458
  res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg)
459

    
460
  if res:
461
    msg = ("Instance does not meet target node group's (%s) instance"
462
           " policy: %s") % (node.group, utils.CommaJoin(res))
463
    if ignore:
464
      lu.LogWarning(msg)
465
    else:
466
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
467

    
468

    
469
def GetInstanceInfoText(instance):
470
  """Compute that text that should be added to the disk's metadata.
471

472
  """
473
  return "originstname+%s" % instance.name
474

    
475

    
476
def CheckNodeFreeMemory(lu, node_uuid, reason, requested, hvname, hvparams):
477
  """Checks if a node has enough free memory.
478

479
  This function checks if a given node has the needed amount of free
480
  memory. In case the node has less memory or we cannot get the
481
  information from the node, this function raises an OpPrereqError
482
  exception.
483

484
  @type lu: C{LogicalUnit}
485
  @param lu: a logical unit from which we get configuration data
486
  @type node_uuid: C{str}
487
  @param node_uuid: the node to check
488
  @type reason: C{str}
489
  @param reason: string to use in the error message
490
  @type requested: C{int}
491
  @param requested: the amount of memory in MiB to check for
492
  @type hvname: string
493
  @param hvname: the hypervisor's name
494
  @type hvparams: dict of strings
495
  @param hvparams: the hypervisor's parameters
496
  @rtype: integer
497
  @return: node current free memory
498
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
499
      we cannot check the node
500

501
  """
502
  node_name = lu.cfg.GetNodeName(node_uuid)
503
  nodeinfo = lu.rpc.call_node_info([node_uuid], None, [(hvname, hvparams)])
504
  nodeinfo[node_uuid].Raise("Can't get data from node %s" % node_name,
505
                            prereq=True, ecode=errors.ECODE_ENVIRON)
506
  (_, _, (hv_info, )) = nodeinfo[node_uuid].payload
507

    
508
  free_mem = hv_info.get("memory_free", None)
509
  if not isinstance(free_mem, int):
510
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
511
                               " was '%s'" % (node_name, free_mem),
512
                               errors.ECODE_ENVIRON)
513
  if requested > free_mem:
514
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
515
                               " needed %s MiB, available %s MiB" %
516
                               (node_name, reason, requested, free_mem),
517
                               errors.ECODE_NORES)
518
  return free_mem
519

    
520

    
521
def CheckInstanceBridgesExist(lu, instance, node_uuid=None):
522
  """Check that the brigdes needed by an instance exist.
523

524
  """
525
  if node_uuid is None:
526
    node_uuid = instance.primary_node
527
  CheckNicsBridgesExist(lu, instance.nics, node_uuid)
528

    
529

    
530
def CheckNicsBridgesExist(lu, nics, node_uuid):
531
  """Check that the brigdes needed by a list of nics exist.
532

533
  """
534
  cluster = lu.cfg.GetClusterInfo()
535
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in nics]
536
  brlist = [params[constants.NIC_LINK] for params in paramslist
537
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
538
  if brlist:
539
    result = lu.rpc.call_bridges_exist(node_uuid, brlist)
540
    result.Raise("Error checking bridges on destination node '%s'" %
541
                 lu.cfg.GetNodeName(node_uuid), prereq=True,
542
                 ecode=errors.ECODE_ENVIRON)
543

    
544

    
545
def CheckNodeHasOS(lu, node_uuid, os_name, force_variant):
546
  """Ensure that a node supports a given OS.
547

548
  @param lu: the LU on behalf of which we make the check
549
  @param node_uuid: the node to check
550
  @param os_name: the OS to query about
551
  @param force_variant: whether to ignore variant errors
552
  @raise errors.OpPrereqError: if the node is not supporting the OS
553

554
  """
555
  result = lu.rpc.call_os_get(node_uuid, os_name)
556
  result.Raise("OS '%s' not in supported OS list for node %s" %
557
               (os_name, lu.cfg.GetNodeName(node_uuid)),
558
               prereq=True, ecode=errors.ECODE_INVAL)
559
  if not force_variant:
560
    _CheckOSVariant(result.payload, os_name)
561

    
562

    
563
def _CheckOSVariant(os_obj, name):
564
  """Check whether an OS name conforms to the os variants specification.
565

566
  @type os_obj: L{objects.OS}
567
  @param os_obj: OS object to check
568
  @type name: string
569
  @param name: OS name passed by the user, to check for validity
570

571
  """
572
  variant = objects.OS.GetVariant(name)
573
  if not os_obj.supported_variants:
574
    if variant:
575
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
576
                                 " passed)" % (os_obj.name, variant),
577
                                 errors.ECODE_INVAL)
578
    return
579
  if not variant:
580
    raise errors.OpPrereqError("OS name must include a variant",
581
                               errors.ECODE_INVAL)
582

    
583
  if variant not in os_obj.supported_variants:
584
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
585

    
586

    
587
def BuildDiskLogicalIDEnv(template_name, idx, disk):
588
  if template_name == constants.DT_PLAIN:
589
    vg, name = disk.logical_id
590
    ret = {
591
      "INSTANCE_DISK%d_VG" % idx: vg,
592
      "INSTANCE_DISK%d_ID" % idx: name
593
      }
594
  elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
595
    file_driver, name = disk.logical_id
596
    ret = {
597
      "INSTANCE_DISK%d_DRIVER" % idx: file_driver,
598
      "INSTANCE_DISK%d_ID" % idx: name
599
      }
600
  elif template_name == constants.DT_BLOCK:
601
    block_driver, adopt = disk.logical_id
602
    ret = {
603
      "INSTANCE_DISK%d_DRIVER" % idx: block_driver,
604
      "INSTANCE_DISK%d_ID" % idx: adopt
605
      }
606
  elif template_name == constants.DT_RBD:
607
    rbd, name = disk.logical_id
608
    ret = {
609
      "INSTANCE_DISK%d_DRIVER" % idx: rbd,
610
      "INSTANCE_DISK%d_ID" % idx: name
611
      }
612
  elif template_name == constants.DT_EXT:
613
    provider, name = disk.logical_id
614
    ret = {
615
      "INSTANCE_DISK%d_PROVIDER" % idx: provider,
616
      "INSTANCE_DISK%d_ID" % idx: name
617
      }
618
  elif template_name == constants.DT_DRBD8:
619
    pnode, snode, port, pmin, smin, _ = disk.logical_id
620
    data, meta = disk.children
621
    data_vg, data_name = data.logical_id
622
    meta_vg, meta_name = meta.logical_id
623
    ret = {
624
      "INSTANCE_DISK%d_PNODE" % idx: pnode,
625
      "INSTANCE_DISK%d_SNODE" % idx: snode,
626
      "INSTANCE_DISK%d_PORT" % idx: port,
627
      "INSTANCE_DISK%d_PMINOR" % idx: pmin,
628
      "INSTANCE_DISK%d_SMINOR" % idx: smin,
629
      "INSTANCE_DISK%d_DATA_VG" % idx: data_vg,
630
      "INSTANCE_DISK%d_DATA_ID" % idx: data_name,
631
      "INSTANCE_DISK%d_META_VG" % idx: meta_vg,
632
      "INSTANCE_DISK%d_META_ID" % idx: meta_name,
633
      }
634
  elif template_name == constants.DT_DISKLESS:
635
    ret = {}
636

    
637
  ret.update({
638
    "INSTANCE_DISK%d_TEMPLATE_NAME" % idx: template_name
639
    })
640

    
641
  return ret