Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_utils.py @ 6be5f3e0

History | View | Annotate | Download (20.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Utility function mainly, but not only used by instance LU's."""
23

    
24
import logging
25
import os
26

    
27
from ganeti import constants
28
from ganeti import errors
29
from ganeti import locking
30
from ganeti import network
31
from ganeti import objects
32
from ganeti import pathutils
33
from ganeti import utils
34
from ganeti.cmdlib.common import AnnotateDiskParams, \
35
  ComputeIPolicyInstanceViolation
36

    
37

    
38
def BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
39
                         minmem, maxmem, vcpus, nics, disk_template, disks,
40
                         bep, hvp, hypervisor_name, tags):
41
  """Builds instance related env variables for hooks
42

43
  This builds the hook environment from individual variables.
44

45
  @type name: string
46
  @param name: the name of the instance
47
  @type primary_node: string
48
  @param primary_node: the name of the instance's primary node
49
  @type secondary_nodes: list
50
  @param secondary_nodes: list of secondary nodes as strings
51
  @type os_type: string
52
  @param os_type: the name of the instance's OS
53
  @type status: string
54
  @param status: the desired status of the instance
55
  @type minmem: string
56
  @param minmem: the minimum memory size of the instance
57
  @type maxmem: string
58
  @param maxmem: the maximum memory size of the instance
59
  @type vcpus: string
60
  @param vcpus: the count of VCPUs the instance has
61
  @type nics: list
62
  @param nics: list of tuples (name, uuid, ip, mac, mode, link, net, netinfo)
63
      representing the NICs the instance has
64
  @type disk_template: string
65
  @param disk_template: the disk template of the instance
66
  @type disks: list
67
  @param disks: list of tuples (name, uuid, size, mode)
68
  @type bep: dict
69
  @param bep: the backend parameters for the instance
70
  @type hvp: dict
71
  @param hvp: the hypervisor parameters for the instance
72
  @type hypervisor_name: string
73
  @param hypervisor_name: the hypervisor for the instance
74
  @type tags: list
75
  @param tags: list of instance tags as strings
76
  @rtype: dict
77
  @return: the hook environment for this instance
78

79
  """
80
  env = {
81
    "OP_TARGET": name,
82
    "INSTANCE_NAME": name,
83
    "INSTANCE_PRIMARY": primary_node,
84
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
85
    "INSTANCE_OS_TYPE": os_type,
86
    "INSTANCE_STATUS": status,
87
    "INSTANCE_MINMEM": minmem,
88
    "INSTANCE_MAXMEM": maxmem,
89
    # TODO(2.9) remove deprecated "memory" value
90
    "INSTANCE_MEMORY": maxmem,
91
    "INSTANCE_VCPUS": vcpus,
92
    "INSTANCE_DISK_TEMPLATE": disk_template,
93
    "INSTANCE_HYPERVISOR": hypervisor_name,
94
    }
95
  if nics:
96
    nic_count = len(nics)
97
    for idx, (name, uuid, ip, mac, mode, link, net, netinfo) in enumerate(nics):
98
      if ip is None:
99
        ip = ""
100
      if name:
101
        env["INSTANCE_NIC%d_NAME" % idx] = name
102
      env["INSTANCE_NIC%d_UUID" % idx] = uuid
103
      env["INSTANCE_NIC%d_IP" % idx] = ip
104
      env["INSTANCE_NIC%d_MAC" % idx] = mac
105
      env["INSTANCE_NIC%d_MODE" % idx] = mode
106
      env["INSTANCE_NIC%d_LINK" % idx] = link
107
      if netinfo:
108
        nobj = objects.Network.FromDict(netinfo)
109
        env.update(nobj.HooksDict("INSTANCE_NIC%d_" % idx))
110
      elif network:
111
        # FIXME: broken network reference: the instance NIC specifies a
112
        # network, but the relevant network entry was not in the config. This
113
        # should be made impossible.
114
        env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net
115
      if mode == constants.NIC_MODE_BRIDGED:
116
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
117
  else:
118
    nic_count = 0
119

    
120
  env["INSTANCE_NIC_COUNT"] = nic_count
121

    
122
  if disks:
123
    disk_count = len(disks)
124
    for idx, (name, uuid, size, mode, info) in enumerate(disks):
125
      if name:
126
        env["INSTANCE_DISK%d_NAME" % idx] = name
127
      env["INSTANCE_DISK%d_UUID" % idx] = uuid
128
      env["INSTANCE_DISK%d_SIZE" % idx] = size
129
      env["INSTANCE_DISK%d_MODE" % idx] = mode
130
      env.update(info)
131
  else:
132
    disk_count = 0
133

    
134
  env["INSTANCE_DISK_COUNT"] = disk_count
135

    
136
  if not tags:
137
    tags = []
138

    
139
  env["INSTANCE_TAGS"] = " ".join(tags)
140

    
141
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
142
    for key, value in source.items():
143
      env["INSTANCE_%s_%s" % (kind, key)] = value
144

    
145
  return env
146

    
147

    
148
def BuildInstanceHookEnvByObject(lu, instance, override=None):
149
  """Builds instance related env variables for hooks from an object.
150

151
  @type lu: L{LogicalUnit}
152
  @param lu: the logical unit on whose behalf we execute
153
  @type instance: L{objects.Instance}
154
  @param instance: the instance for which we should build the
155
      environment
156
  @type override: dict
157
  @param override: dictionary with key/values that will override
158
      our values
159
  @rtype: dict
160
  @return: the hook environment dictionary
161

162
  """
163
  cluster = lu.cfg.GetClusterInfo()
164
  bep = cluster.FillBE(instance)
165
  hvp = cluster.FillHV(instance)
166
  args = {
167
    "name": instance.name,
168
    "primary_node": instance.primary_node,
169
    "secondary_nodes": instance.secondary_nodes,
170
    "os_type": instance.os,
171
    "status": instance.admin_state,
172
    "maxmem": bep[constants.BE_MAXMEM],
173
    "minmem": bep[constants.BE_MINMEM],
174
    "vcpus": bep[constants.BE_VCPUS],
175
    "nics": NICListToTuple(lu, instance.nics),
176
    "disk_template": instance.disk_template,
177
    "disks": [(disk.name, disk.uuid, disk.size, disk.mode,
178
               BuildDiskLogicalIDEnv(instance.disk_template, idx, disk))
179
              for idx, disk in enumerate(instance.disks)],
180
    "bep": bep,
181
    "hvp": hvp,
182
    "hypervisor_name": instance.hypervisor,
183
    "tags": instance.tags,
184
  }
185
  if override:
186
    args.update(override)
187
  return BuildInstanceHookEnv(**args) # pylint: disable=W0142
188

    
189

    
190
def GetClusterDomainSecret():
191
  """Reads the cluster domain secret.
192

193
  """
194
  return utils.ReadOneLineFile(pathutils.CLUSTER_DOMAIN_SECRET_FILE,
195
                               strict=True)
196

    
197

    
198
def CheckNodeNotDrained(lu, node):
199
  """Ensure that a given node is not drained.
200

201
  @param lu: the LU on behalf of which we make the check
202
  @param node: the node to check
203
  @raise errors.OpPrereqError: if the node is drained
204

205
  """
206
  if lu.cfg.GetNodeInfo(node).drained:
207
    raise errors.OpPrereqError("Can't use drained node %s" % node,
208
                               errors.ECODE_STATE)
209

    
210

    
211
def CheckNodeVmCapable(lu, node):
212
  """Ensure that a given node is vm capable.
213

214
  @param lu: the LU on behalf of which we make the check
215
  @param node: the node to check
216
  @raise errors.OpPrereqError: if the node is not vm capable
217

218
  """
219
  if not lu.cfg.GetNodeInfo(node).vm_capable:
220
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
221
                               errors.ECODE_STATE)
222

    
223

    
224
def RemoveInstance(lu, feedback_fn, instance, ignore_failures):
225
  """Utility function to remove an instance.
226

227
  """
228
  logging.info("Removing block devices for instance %s", instance.name)
229

    
230
  if not RemoveDisks(lu, instance, ignore_failures=ignore_failures):
231
    if not ignore_failures:
232
      raise errors.OpExecError("Can't remove instance's disks")
233
    feedback_fn("Warning: can't remove instance's disks")
234

    
235
  logging.info("Removing instance %s out of cluster config", instance.name)
236

    
237
  lu.cfg.RemoveInstance(instance.name)
238

    
239
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
240
    "Instance lock removal conflict"
241

    
242
  # Remove lock for the instance
243
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
244

    
245

    
246
def RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
247
  """Remove all disks for an instance.
248

249
  This abstracts away some work from `AddInstance()` and
250
  `RemoveInstance()`. Note that in case some of the devices couldn't
251
  be removed, the removal will continue with the other ones.
252

253
  @type lu: L{LogicalUnit}
254
  @param lu: the logical unit on whose behalf we execute
255
  @type instance: L{objects.Instance}
256
  @param instance: the instance whose disks we should remove
257
  @type target_node: string
258
  @param target_node: used to override the node on which to remove the disks
259
  @rtype: boolean
260
  @return: the success of the removal
261

262
  """
263
  logging.info("Removing block devices for instance %s", instance.name)
264

    
265
  all_result = True
266
  ports_to_release = set()
267
  anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg)
268
  for (idx, device) in enumerate(anno_disks):
269
    if target_node:
270
      edata = [(target_node, device)]
271
    else:
272
      edata = device.ComputeNodeTree(instance.primary_node)
273
    for node, disk in edata:
274
      if lu.op.keep_disks and disk.dev_type in constants.DT_EXT:
275
        continue
276
      lu.cfg.SetDiskID(disk, node)
277
      result = lu.rpc.call_blockdev_remove(node, disk)
278
      if result.fail_msg:
279
        lu.LogWarning("Could not remove disk %s on node %s,"
280
                      " continuing anyway: %s", idx, node, result.fail_msg)
281
        if not (result.offline and node != instance.primary_node):
282
          all_result = False
283

    
284
    # if this is a DRBD disk, return its port to the pool
285
    if device.dev_type in constants.LDS_DRBD:
286
      ports_to_release.add(device.logical_id[2])
287

    
288
  if all_result or ignore_failures:
289
    for port in ports_to_release:
290
      lu.cfg.AddTcpUdpPort(port)
291

    
292
  if instance.disk_template in constants.DTS_FILEBASED:
293
    if len(instance.disks) > 0:
294
      file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
295
    else:
296
      if instance.disk_template == constants.DT_SHARED_FILE:
297
        file_storage_dir = utils.PathJoin(lu.cfg.GetSharedFileStorageDir(),
298
                                          instance.name)
299
      else:
300
        file_storage_dir = utils.PathJoin(lu.cfg.GetFileStorageDir(),
301
                                          instance.name)
302
    if target_node:
303
      tgt = target_node
304
    else:
305
      tgt = instance.primary_node
306
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
307
    if result.fail_msg:
308
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
309
                    file_storage_dir, instance.primary_node, result.fail_msg)
310
      all_result = False
311

    
312
  return all_result
313

    
314

    
315
def NICToTuple(lu, nic):
316
  """Build a tupple of nic information.
317

318
  @type lu:  L{LogicalUnit}
319
  @param lu: the logical unit on whose behalf we execute
320
  @type nic: L{objects.NIC}
321
  @param nic: nic to convert to hooks tuple
322

323
  """
324
  cluster = lu.cfg.GetClusterInfo()
325
  filled_params = cluster.SimpleFillNIC(nic.nicparams)
326
  mode = filled_params[constants.NIC_MODE]
327
  link = filled_params[constants.NIC_LINK]
328
  netinfo = None
329
  if nic.network:
330
    nobj = lu.cfg.GetNetwork(nic.network)
331
    netinfo = objects.Network.ToDict(nobj)
332
  return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, nic.network, netinfo)
333

    
334

    
335
def NICListToTuple(lu, nics):
336
  """Build a list of nic information tuples.
337

338
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
339
  value in LUInstanceQueryData.
340

341
  @type lu:  L{LogicalUnit}
342
  @param lu: the logical unit on whose behalf we execute
343
  @type nics: list of L{objects.NIC}
344
  @param nics: list of nics to convert to hooks tuples
345

346
  """
347
  hooks_nics = []
348
  for nic in nics:
349
    hooks_nics.append(NICToTuple(lu, nic))
350
  return hooks_nics
351

    
352

    
353
def CopyLockList(names):
354
  """Makes a copy of a list of lock names.
355

356
  Handles L{locking.ALL_SET} correctly.
357

358
  """
359
  if names == locking.ALL_SET:
360
    return locking.ALL_SET
361
  else:
362
    return names[:]
363

    
364

    
365
def ReleaseLocks(lu, level, names=None, keep=None):
366
  """Releases locks owned by an LU.
367

368
  @type lu: L{LogicalUnit}
369
  @param level: Lock level
370
  @type names: list or None
371
  @param names: Names of locks to release
372
  @type keep: list or None
373
  @param keep: Names of locks to retain
374

375
  """
376
  assert not (keep is not None and names is not None), \
377
         "Only one of the 'names' and the 'keep' parameters can be given"
378

    
379
  if names is not None:
380
    should_release = names.__contains__
381
  elif keep:
382
    should_release = lambda name: name not in keep
383
  else:
384
    should_release = None
385

    
386
  owned = lu.owned_locks(level)
387
  if not owned:
388
    # Not owning any lock at this level, do nothing
389
    pass
390

    
391
  elif should_release:
392
    retain = []
393
    release = []
394

    
395
    # Determine which locks to release
396
    for name in owned:
397
      if should_release(name):
398
        release.append(name)
399
      else:
400
        retain.append(name)
401

    
402
    assert len(lu.owned_locks(level)) == (len(retain) + len(release))
403

    
404
    # Release just some locks
405
    lu.glm.release(level, names=release)
406

    
407
    assert frozenset(lu.owned_locks(level)) == frozenset(retain)
408
  else:
409
    # Release everything
410
    lu.glm.release(level)
411

    
412
    assert not lu.glm.is_owned(level), "No locks should be owned"
413

    
414

    
415
def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
416
                                 target_group, cfg,
417
                                 _compute_fn=ComputeIPolicyInstanceViolation):
418
  """Compute if instance meets the specs of the new target group.
419

420
  @param ipolicy: The ipolicy to verify
421
  @param instance: The instance object to verify
422
  @param current_group: The current group of the instance
423
  @param target_group: The new group of the instance
424
  @type cfg: L{config.ConfigWriter}
425
  @param cfg: Cluster configuration
426
  @param _compute_fn: The function to verify ipolicy (unittest only)
427
  @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
428

429
  """
430
  if current_group == target_group:
431
    return []
432
  else:
433
    return _compute_fn(ipolicy, instance, cfg)
434

    
435

    
436
def CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
437
                           _compute_fn=_ComputeIPolicyNodeViolation):
438
  """Checks that the target node is correct in terms of instance policy.
439

440
  @param ipolicy: The ipolicy to verify
441
  @param instance: The instance object to verify
442
  @param node: The new node to relocate
443
  @type cfg: L{config.ConfigWriter}
444
  @param cfg: Cluster configuration
445
  @param ignore: Ignore violations of the ipolicy
446
  @param _compute_fn: The function to verify ipolicy (unittest only)
447
  @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
448

449
  """
450
  primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
451
  res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg)
452

    
453
  if res:
454
    msg = ("Instance does not meet target node group's (%s) instance"
455
           " policy: %s") % (node.group, utils.CommaJoin(res))
456
    if ignore:
457
      lu.LogWarning(msg)
458
    else:
459
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
460

    
461

    
462
def GetInstanceInfoText(instance):
463
  """Compute that text that should be added to the disk's metadata.
464

465
  """
466
  return "originstname+%s" % instance.name
467

    
468

    
469
def CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
470
  """Checks if a node has enough free memory.
471

472
  This function checks if a given node has the needed amount of free
473
  memory. In case the node has less memory or we cannot get the
474
  information from the node, this function raises an OpPrereqError
475
  exception.
476

477
  @type lu: C{LogicalUnit}
478
  @param lu: a logical unit from which we get configuration data
479
  @type node: C{str}
480
  @param node: the node to check
481
  @type reason: C{str}
482
  @param reason: string to use in the error message
483
  @type requested: C{int}
484
  @param requested: the amount of memory in MiB to check for
485
  @type hypervisor_name: C{str}
486
  @param hypervisor_name: the hypervisor to ask for memory stats
487
  @rtype: integer
488
  @return: node current free memory
489
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
490
      we cannot check the node
491

492
  """
493
  nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name], False)
494
  nodeinfo[node].Raise("Can't get data from node %s" % node,
495
                       prereq=True, ecode=errors.ECODE_ENVIRON)
496
  (_, _, (hv_info, )) = nodeinfo[node].payload
497

    
498
  free_mem = hv_info.get("memory_free", None)
499
  if not isinstance(free_mem, int):
500
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
501
                               " was '%s'" % (node, free_mem),
502
                               errors.ECODE_ENVIRON)
503
  if requested > free_mem:
504
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
505
                               " needed %s MiB, available %s MiB" %
506
                               (node, reason, requested, free_mem),
507
                               errors.ECODE_NORES)
508
  return free_mem
509

    
510

    
511
def CheckInstanceBridgesExist(lu, instance, node=None):
512
  """Check that the brigdes needed by an instance exist.
513

514
  """
515
  if node is None:
516
    node = instance.primary_node
517
  CheckNicsBridgesExist(lu, instance.nics, node)
518

    
519

    
520
def CheckNicsBridgesExist(lu, target_nics, target_node):
521
  """Check that the brigdes needed by a list of nics exist.
522

523
  """
524
  cluster = lu.cfg.GetClusterInfo()
525
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
526
  brlist = [params[constants.NIC_LINK] for params in paramslist
527
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
528
  if brlist:
529
    result = lu.rpc.call_bridges_exist(target_node, brlist)
530
    result.Raise("Error checking bridges on destination node '%s'" %
531
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
532

    
533

    
534
def CheckNodeHasOS(lu, node, os_name, force_variant):
535
  """Ensure that a node supports a given OS.
536

537
  @param lu: the LU on behalf of which we make the check
538
  @param node: the node to check
539
  @param os_name: the OS to query about
540
  @param force_variant: whether to ignore variant errors
541
  @raise errors.OpPrereqError: if the node is not supporting the OS
542

543
  """
544
  result = lu.rpc.call_os_get(node, os_name)
545
  result.Raise("OS '%s' not in supported OS list for node %s" %
546
               (os_name, node),
547
               prereq=True, ecode=errors.ECODE_INVAL)
548
  if not force_variant:
549
    _CheckOSVariant(result.payload, os_name)
550

    
551

    
552
def _CheckOSVariant(os_obj, name):
553
  """Check whether an OS name conforms to the os variants specification.
554

555
  @type os_obj: L{objects.OS}
556
  @param os_obj: OS object to check
557
  @type name: string
558
  @param name: OS name passed by the user, to check for validity
559

560
  """
561
  variant = objects.OS.GetVariant(name)
562
  if not os_obj.supported_variants:
563
    if variant:
564
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
565
                                 " passed)" % (os_obj.name, variant),
566
                                 errors.ECODE_INVAL)
567
    return
568
  if not variant:
569
    raise errors.OpPrereqError("OS name must include a variant",
570
                               errors.ECODE_INVAL)
571

    
572
  if variant not in os_obj.supported_variants:
573
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
574

    
575

    
576
def BuildDiskLogicalIDEnv(template_name, idx, disk):
577
  if template_name == constants.DT_PLAIN:
578
    vg, name = disk.logical_id
579
    ret = {
580
      "INSTANCE_DISK%d_VG" % idx : vg,
581
      "INSTANCE_DISK%d_ID" % idx : name
582
      }
583
  elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
584
    file_driver, name = disk.logical_id
585
    ret = {
586
      "INSTANCE_DISK%d_DRIVER" % idx : file_driver,
587
      "INSTANCE_DISK%d_ID" % idx : name
588
      }
589
  elif template_name == constants.DT_BLOCK:
590
    block_driver, adopt = disk.logical_id
591
    ret = {
592
      "INSTANCE_DISK%d_DRIVER" % idx : block_driver,
593
      "INSTANCE_DISK%d_ID" % idx : adopt
594
      }
595
  elif template_name == constants.DT_RBD:
596
    rbd, name = disk.logical_id
597
    ret = {
598
      "INSTANCE_DISK%d_DRIVER" % idx : rbd,
599
      "INSTANCE_DISK%d_ID" % idx : name
600
      }
601
  elif template_name == constants.DT_EXT:
602
    provider, name = disk.logical_id
603
    ret = {
604
      "INSTANCE_DISK%d_PROVIDER" % idx : provider,
605
      "INSTANCE_DISK%d_ID" % idx : name
606
      }
607
  elif template_name == constants.DT_DRBD8:
608
    pnode, snode, port, pmin, smin, _ = disk.logical_id
609
    data, meta = disk.children
610
    data_vg, data_name = data.logical_id
611
    meta_vg, meta_name = meta.logical_id
612
    ret = {
613
      "INSTANCE_DISK%d_PNODE" % idx : pnode,
614
      "INSTANCE_DISK%d_SNODE" % idx : snode,
615
      "INSTANCE_DISK%d_PORT" % idx : port,
616
      "INSTANCE_DISK%d_PMINOR" % idx : pmin,
617
      "INSTANCE_DISK%d_SMINOR" % idx : smin,
618
      "INSTANCE_DISK%d_DATA_VG" % idx : data_vg,
619
      "INSTANCE_DISK%d_DATA_ID" % idx : data_name,
620
      "INSTANCE_DISK%d_META_VG" % idx : meta_vg,
621
      "INSTANCE_DISK%d_META_ID" % idx : meta_name,
622
      }
623
  elif template_name == constants.DT_DISKLESS:
624
    ret = {}
625

    
626
  ret.update({
627
    "INSTANCE_DISK%d_TEMPLATE_NAME" % idx: template_name
628
    })
629

    
630
  return ret