Revision 60975797

b/doc/examples/bash_completion.in
90 90
  if [[ -e "@LOCALSTATEDIR@/lib/ganeti/ssconf_cluster_name" ]]; then
91 91
    cmds="add-tags command copyfile destroy getmaster info list-tags \
92 92
          masterfailover modify queue redist-conf remove-tags rename \
93
          search-tags verify verify-disks version"
93
          repair-disk-sizes search-tags verify verify-disks version"
94 94
  else
95 95
    cmds="init"
96 96
  fi
b/lib/cmdlib.py
1329 1329
    return result
1330 1330

  
1331 1331

  
1332
class LURepairDiskSizes(NoHooksLU):
1333
  """Verifies the cluster disks sizes.
1334

  
1335
  """
1336
  _OP_REQP = ["instances"]
1337
  REQ_BGL = False
1338

  
1339
  def ExpandNames(self):
1340

  
1341
    if not isinstance(self.op.instances, list):
1342
      raise errors.OpPrereqError("Invalid argument type 'instances'")
1343

  
1344
    if self.op.instances:
1345
      self.wanted_names = []
1346
      for name in self.op.instances:
1347
        full_name = self.cfg.ExpandInstanceName(name)
1348
        if full_name is None:
1349
          raise errors.OpPrereqError("Instance '%s' not known" % name)
1350
        self.wanted_names.append(full_name)
1351
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
1352
      self.needed_locks = {
1353
        locking.LEVEL_NODE: [],
1354
        locking.LEVEL_INSTANCE: self.wanted_names,
1355
        }
1356
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1357
    else:
1358
      self.wanted_names = None
1359
      self.needed_locks = {
1360
        locking.LEVEL_NODE: locking.ALL_SET,
1361
        locking.LEVEL_INSTANCE: locking.ALL_SET,
1362
        }
1363
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1364

  
1365
  def DeclareLocks(self, level):
1366
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
1367
      self._LockInstancesNodes(primary_only=True)
1368

  
1369
  def CheckPrereq(self):
1370
    """Check prerequisites.
1371

  
1372
    This only checks the optional instance list against the existing names.
1373

  
1374
    """
1375
    if self.wanted_names is None:
1376
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
1377

  
1378
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
1379
                             in self.wanted_names]
1380

  
1381
  def Exec(self, feedback_fn):
1382
    """Verify the size of cluster disks.
1383

  
1384
    """
1385
    # TODO: check child disks too
1386
    # TODO: check differences in size between primary/secondary nodes
1387
    per_node_disks = {}
1388
    for instance in self.wanted_instances:
1389
      pnode = instance.primary_node
1390
      if pnode not in per_node_disks:
1391
        per_node_disks[pnode] = []
1392
      for idx, disk in enumerate(instance.disks):
1393
        per_node_disks[pnode].append((instance, idx, disk))
1394

  
1395
    changed = []
1396
    for node, dskl in per_node_disks.items():
1397
      result = self.rpc.call_blockdev_getsizes(node, [v[2] for v in dskl])
1398
      if result.failed:
1399
        self.LogWarning("Failure in blockdev_getsizes call to node"
1400
                        " %s, ignoring", node)
1401
        continue
1402
      if len(result.data) != len(dskl):
1403
        self.LogWarning("Invalid result from node %s, ignoring node results",
1404
                        node)
1405
        continue
1406
      for ((instance, idx, disk), size) in zip(dskl, result.data):
1407
        if size is None:
1408
          self.LogWarning("Disk %d of instance %s did not return size"
1409
                          " information, ignoring", idx, instance.name)
1410
          continue
1411
        if not isinstance(size, (int, long)):
1412
          self.LogWarning("Disk %d of instance %s did not return valid"
1413
                          " size information, ignoring", idx, instance.name)
1414
          continue
1415
        size = size >> 20
1416
        if size != disk.size:
1417
          self.LogInfo("Disk %d of instance %s has mismatched size,"
1418
                       " correcting: recorded %d, actual %d", idx,
1419
                       instance.name, disk.size, size)
1420
          disk.size = size
1421
          self.cfg.Update(instance)
1422
          changed.append((instance.name, idx, size))
1423
    return changed
1424

  
1425

  
1332 1426
class LURenameCluster(LogicalUnit):
1333 1427
  """Rename the cluster.
1334 1428

  
b/lib/mcpu.py
50 50
    opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
51 51
    opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
52 52
    opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
53
    opcodes.OpRepairDiskSizes: cmdlib.LURepairDiskSizes,
53 54
    # node lu
54 55
    opcodes.OpAddNode: cmdlib.LUAddNode,
55 56
    opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
b/lib/opcodes.py
229 229
  __slots__ = []
230 230

  
231 231

  
232
class OpRepairDiskSizes(OpCode):
233
  """Verify the disk sizes of the instances and fixes configuration
234
  mimatches.
235

  
236
  Parameters: optional instances list, in case we want to restrict the
237
  checks to only a subset of the instances.
238

  
239
  Result: a list of tuples, (instance, disk, new-size) for changed
240
  configurations.
241

  
242
  In normal operation, the list should be empty.
243

  
244
  @type instances: list
245
  @ivar instances: the list of instances to check, or empty for all instances
246

  
247
  """
248
  OP_ID = "OP_CLUSTER_REPAIR_DISK_SIZES"
249
  __slots__ = ["instances"]
250

  
251

  
232 252
class OpQueryConfigValues(OpCode):
233 253
  """Query cluster configuration values."""
234 254
  OP_ID = "OP_CLUSTER_CONFIG_QUERY"
b/man/gnt-cluster.sgml
611 611
    </refsect2>
612 612

  
613 613
    <refsect2>
614
      <title>REPAIR-DISK-SIZES</title>
615

  
616
      <cmdsynopsis>
617
        <command>repair-disk-sizes</command>
618
        <arg rep="repeat">instance</arg>
619
      </cmdsynopsis>
620

  
621
      <para>
622
        This command checks that the recorded size of the given
623
        instance's disks matches the actual size and updates any
624
        mismatches found. This is needed if the Ganeti configuration
625
        is no longer consistent with reality, as it will impact some
626
        disk operations. If no arguments are given, all instances will
627
        be checked.
628
      </para>
629

  
630
      <para>
631
        Note that only active disks can be checked by this command; in
632
        case a disk cannot be activated it's advised to use
633
        <command>gnt-instance activate-disks --ignore-size
634
        ...</command> to force activation without regard to the
635
        current size.
636
      </para>
637

  
638
      <para>
639
        When the all disk sizes are consistent, the command will
640
        return no output. Otherwise it will log details about the
641
        inconsistencies in the configuration.
642
      </para>
643
    </refsect2>
644

  
645
    <refsect2>
614 646
      <title>SEARCH-TAGS</title>
615 647

  
616 648
      <cmdsynopsis>
b/scripts/gnt-cluster
409 409
  return retcode
410 410

  
411 411

  
412
def RepairDiskSizes(opts, args):
413
  """Verify sizes of cluster disks.
414

  
415
  @param opts: the command line options selected by the user
416
  @type args: list
417
  @param args: optional list of instances to restrict check to
418
  @rtype: int
419
  @return: the desired exit code
420

  
421
  """
422
  op = opcodes.OpRepairDiskSizes(instances=args)
423
  SubmitOpCode(op)
424

  
425

  
412 426
@UsesRPC
413 427
def MasterFailover(opts, args):
414 428
  """Failover the master node.
......
621 635
             "", "Does a check on the cluster configuration"),
622 636
  'verify-disks': (VerifyDisks, ARGS_NONE, [DEBUG_OPT],
623 637
                   "", "Does a check on the cluster disk status"),
638
  'repair-disk-sizes': (RepairDiskSizes, ARGS_ANY, [DEBUG_OPT],
639
                   "", "Updates mismatches in recorded disk sizes"),
624 640
  'masterfailover': (MasterFailover, ARGS_NONE, [DEBUG_OPT,
625 641
                     make_option("--no-voting", dest="no_voting",
626 642
                                 help="Skip node agreement check (dangerous)",

Also available in: Unified diff