gnt-group list: Query filter support
[ganeti-local] / lib / client / gnt_instance.py
index 0029a53..e26a8cd 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -57,8 +57,6 @@ _SHUTDOWN_NODES_TAGS_MODES = (
     _SHUTDOWN_NODES_SEC_BY_TAGS)
 
 
-_VALUE_TRUE = "true"
-
 #: default list of options for L{ListInstances}
 _LIST_DEF_FIELDS = [
   "name", "hypervisor", "os", "pnode", "status", "oper_ram",
@@ -144,44 +142,6 @@ def _ExpandMultiNames(mode, names, client=None):
   return inames
 
 
-def _ConfirmOperation(inames, text, extra=""):
-  """Ask the user to confirm an operation on a list of instances.
-
-  This function is used to request confirmation for doing an operation
-  on a given list of instances.
-
-  @type inames: list
-  @param inames: the list of names that we display when
-      we ask for confirmation
-  @type text: str
-  @param text: the operation that the user should confirm
-      (e.g. I{shutdown} or I{startup})
-  @rtype: boolean
-  @return: True or False depending on user's confirmation.
-
-  """
-  count = len(inames)
-  msg = ("The %s will operate on %d instances.\n%s"
-         "Do you want to continue?" % (text, count, extra))
-  affected = ("\nAffected instances:\n" +
-              "\n".join(["  %s" % name for name in inames]))
-
-  choices = [('y', True, 'Yes, execute the %s' % text),
-             ('n', False, 'No, abort the %s' % text)]
-
-  if count > 20:
-    choices.insert(1, ('v', 'v', 'View the list of affected instances'))
-    ask = msg
-  else:
-    ask = msg + affected
-
-  choice = AskUser(ask, choices)
-  if choice == 'v':
-    choices.pop(1)
-    choice = AskUser(msg + affected, choices)
-  return choice
-
-
 def _EnsureInstancesExist(client, names):
   """Check for and ensure the given instance names exist.
 
@@ -195,7 +155,7 @@ def _EnsureInstancesExist(client, names):
   @raise errors.OpPrereqError: in case any instance is missing
 
   """
-  # TODO: change LUQueryInstances to that it actually returns None
+  # TODO: change LUInstanceQuery to that it actually returns None
   # instead of raising an exception, or devise a better mechanism
   result = client.QueryInstances(names, ["name"], False)
   for orig_name, row in zip(names, result):
@@ -219,11 +179,14 @@ def GenericManyOps(operation, fn):
     cl = GetClient()
     inames = _ExpandMultiNames(opts.multi_mode, args, client=cl)
     if not inames:
+      if opts.multi_mode == _SHUTDOWN_CLUSTER:
+        ToStdout("Cluster is empty, no instances to shutdown")
+        return 0
       raise errors.OpPrereqError("Selection filter does not match"
                                  " any instances", errors.ECODE_INVAL)
     multi_on = opts.multi_mode != _SHUTDOWN_INSTANCES or len(inames) > 1
     if not (opts.force_multi or not multi_on
-            or _ConfirmOperation(inames, operation)):
+            or ConfirmOperation(inames, "instances", operation)):
       return 1
     jex = JobExecutor(verbose=multi_on, cl=cl, opts=opts)
     for name in inames:
@@ -256,7 +219,8 @@ def ListInstances(opts, args):
 
   return GenericList(constants.QR_INSTANCE, selected_fields, args, opts.units,
                      opts.separator, not opts.no_headers,
-                     format_override=fmtoverride)
+                     format_override=fmtoverride, verbose=opts.verbose,
+                     force_filter=opts.force_filter)
 
 
 def ListInstanceFields(opts, args):
@@ -340,7 +304,7 @@ def BatchCreate(opts, args):
                                    required_field, errors.ECODE_INVAL)
     # Validate special fields
     if spec['primary_node'] is not None:
-      if (spec['template'] in constants.DTS_NET_MIRROR and
+      if (spec['template'] in constants.DTS_INT_MIRROR and
           spec['secondary_node'] is None):
         raise errors.OpPrereqError('Template requires secondary node, but'
                                    ' there was no secondary provided.',
@@ -394,7 +358,7 @@ def BatchCreate(opts, args):
     utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
 
     tmp_nics = []
-    for field in ('ip', 'mac', 'mode', 'link', 'bridge'):
+    for field in constants.INIC_PARAMS:
       if field in specs:
         if not tmp_nics:
           tmp_nics.append({})
@@ -409,7 +373,7 @@ def BatchCreate(opts, args):
     elif not tmp_nics:
       tmp_nics = [{}]
 
-    op = opcodes.OpCreateInstance(instance_name=name,
+    op = opcodes.OpInstanceCreate(instance_name=name,
                                   disks=disks,
                                   disk_template=specs['template'],
                                   mode=constants.INSTANCE_CREATE,
@@ -458,7 +422,7 @@ def ReinstallInstance(opts, args):
 
   # second, if requested, ask for an OS
   if opts.select_os is True:
-    op = opcodes.OpDiagnoseOS(output_fields=["name", "variants"], names=[])
+    op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
     result = SubmitOpCode(op, opts=opts)
 
     if not result:
@@ -483,28 +447,34 @@ def ReinstallInstance(opts, args):
       return 1
 
     os_name = selected
+    os_msg = "change the OS to '%s'" % selected
   else:
     os_name = opts.os
+    if opts.os is not None:
+      os_msg = "change the OS to '%s'" % os_name
+    else:
+      os_msg = "keep the same OS"
 
   # third, get confirmation: multi-reinstall requires --force-multi,
   # single-reinstall either --force or --force-multi (--force-multi is
   # a stronger --force)
   multi_on = opts.multi_mode != _SHUTDOWN_INSTANCES or len(inames) > 1
   if multi_on:
-    warn_msg = "Note: this will remove *all* data for the below instances!\n"
+    warn_msg = ("Note: this will remove *all* data for the"
+                " below instances! It will %s.\n" % os_msg)
     if not (opts.force_multi or
-            _ConfirmOperation(inames, "reinstall", extra=warn_msg)):
+            ConfirmOperation(inames, "instances", "reinstall", extra=warn_msg)):
       return 1
   else:
     if not (opts.force or opts.force_multi):
-      usertext = ("This will reinstall the instance %s and remove"
-                  " all data. Continue?") % inames[0]
+      usertext = ("This will reinstall the instance '%s' (and %s) which"
+                  " removes all data. Continue?") % (inames[0], os_msg)
       if not AskUser(usertext):
         return 1
 
   jex = JobExecutor(verbose=multi_on, opts=opts)
   for instance_name in inames:
-    op = opcodes.OpReinstallInstance(instance_name=instance_name,
+    op = opcodes.OpInstanceReinstall(instance_name=instance_name,
                                      os_type=os_name,
                                      force_variant=opts.force_variant,
                                      osparams=opts.osparams)
@@ -538,7 +508,7 @@ def RemoveInstance(opts, args):
     if not AskUser(usertext):
       return 1
 
-  op = opcodes.OpRemoveInstance(instance_name=instance_name,
+  op = opcodes.OpInstanceRemove(instance_name=instance_name,
                                 ignore_failures=opts.ignore_failures,
                                 shutdown_timeout=opts.shutdown_timeout)
   SubmitOrSend(op, opts, cl=cl)
@@ -561,7 +531,7 @@ def RenameInstance(opts, args):
                    " that '%s' is a FQDN. Continue?" % args[1]):
       return 1
 
-  op = opcodes.OpRenameInstance(instance_name=args[0],
+  op = opcodes.OpInstanceRename(instance_name=args[0],
                                 new_name=args[1],
                                 ip_check=opts.ip_check,
                                 name_check=opts.name_check)
@@ -611,7 +581,8 @@ def DeactivateDisks(opts, args):
 
   """
   instance_name = args[0]
-  op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
+  op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name,
+                                         force=opts.force)
   SubmitOrSend(op, opts)
   return 0
 
@@ -636,7 +607,7 @@ def RecreateDisks(opts, args):
   else:
     opts.disks = []
 
-  op = opcodes.OpRecreateInstanceDisks(instance_name=instance_name,
+  op = opcodes.OpInstanceRecreateDisks(instance_name=instance_name,
                                        disks=opts.disks)
   SubmitOrSend(op, opts)
   return 0
@@ -661,8 +632,9 @@ def GrowDisk(opts, args):
     raise errors.OpPrereqError("Invalid disk index: %s" % str(err),
                                errors.ECODE_INVAL)
   amount = utils.ParseUnit(args[2])
-  op = opcodes.OpGrowDisk(instance_name=instance, disk=disk, amount=amount,
-                          wait_for_sync=opts.wait_for_sync)
+  op = opcodes.OpInstanceGrowDisk(instance_name=instance,
+                                  disk=disk, amount=amount,
+                                  wait_for_sync=opts.wait_for_sync)
   SubmitOrSend(op, opts)
   return 0
 
@@ -678,7 +650,7 @@ def _StartupInstance(name, opts):
   @return: the opcode needed for the operation
 
   """
-  op = opcodes.OpStartupInstance(instance_name=name,
+  op = opcodes.OpInstanceStartup(instance_name=name,
                                  force=opts.force,
                                  ignore_offline_nodes=opts.ignore_offline)
   # do not add these parameters to the opcode unless they're defined
@@ -700,7 +672,7 @@ def _RebootInstance(name, opts):
   @return: the opcode needed for the operation
 
   """
-  return opcodes.OpRebootInstance(instance_name=name,
+  return opcodes.OpInstanceReboot(instance_name=name,
                                   reboot_type=opts.reboot_type,
                                   ignore_secondaries=opts.ignore_secondaries,
                                   shutdown_timeout=opts.shutdown_timeout)
@@ -717,7 +689,7 @@ def _ShutdownInstance(name, opts):
   @return: the opcode needed for the operation
 
   """
-  return opcodes.OpShutdownInstance(instance_name=name,
+  return opcodes.OpInstanceShutdown(instance_name=name,
                                     timeout=opts.timeout,
                                     ignore_offline_nodes=opts.ignore_offline)
 
@@ -760,10 +732,10 @@ def ReplaceDisks(opts, args):
     # replace secondary
     mode = constants.REPLACE_DISK_CHG
 
-  op = opcodes.OpReplaceDisks(instance_name=args[0], disks=disks,
-                              remote_node=new_2ndary, mode=mode,
-                              iallocator=iallocator,
-                              early_release=opts.early_release)
+  op = opcodes.OpInstanceReplaceDisks(instance_name=args[0], disks=disks,
+                                      remote_node=new_2ndary, mode=mode,
+                                      iallocator=iallocator,
+                                      early_release=opts.early_release)
   SubmitOrSend(op, opts)
   return 0
 
@@ -784,6 +756,12 @@ def FailoverInstance(opts, args):
   cl = GetClient()
   instance_name = args[0]
   force = opts.force
+  iallocator = opts.iallocator
+  target_node = opts.dst_node
+
+  if iallocator and target_node:
+    raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
+                               " node (-n) but not both", errors.ECODE_INVAL)
 
   if not force:
     _EnsureInstancesExist(cl, [instance_name])
@@ -794,9 +772,11 @@ def FailoverInstance(opts, args):
     if not AskUser(usertext):
       return 1
 
-  op = opcodes.OpFailoverInstance(instance_name=instance_name,
+  op = opcodes.OpInstanceFailover(instance_name=instance_name,
                                   ignore_consistency=opts.ignore_consistency,
-                                  shutdown_timeout=opts.shutdown_timeout)
+                                  shutdown_timeout=opts.shutdown_timeout,
+                                  iallocator=iallocator,
+                                  target_node=target_node)
   SubmitOrSend(op, opts, cl=cl)
   return 0
 
@@ -816,6 +796,12 @@ def MigrateInstance(opts, args):
   cl = GetClient()
   instance_name = args[0]
   force = opts.force
+  iallocator = opts.iallocator
+  target_node = opts.dst_node
+
+  if iallocator and target_node:
+    raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
+                               " node (-n) but not both", errors.ECODE_INVAL)
 
   if not force:
     _EnsureInstancesExist(cl, [instance_name])
@@ -842,8 +828,10 @@ def MigrateInstance(opts, args):
   else:
     mode = opts.migration_mode
 
-  op = opcodes.OpMigrateInstance(instance_name=instance_name, mode=mode,
-                                 cleanup=opts.cleanup)
+  op = opcodes.OpInstanceMigrate(instance_name=instance_name, mode=mode,
+                                 cleanup=opts.cleanup, iallocator=iallocator,
+                                 target_node=target_node,
+                                 allow_failover=opts.allow_failover)
   SubmitOpCode(op, cl=cl, opts=opts)
   return 0
 
@@ -869,7 +857,7 @@ def MoveInstance(opts, args):
     if not AskUser(usertext):
       return 1
 
-  op = opcodes.OpMoveInstance(instance_name=instance_name,
+  op = opcodes.OpInstanceMove(instance_name=instance_name,
                               target_node=opts.node,
                               shutdown_timeout=opts.shutdown_timeout)
   SubmitOrSend(op, opts, cl=cl)
@@ -888,7 +876,7 @@ def ConnectToInstanceConsole(opts, args):
   """
   instance_name = args[0]
 
-  op = opcodes.OpConnectConsole(instance_name=instance_name)
+  op = opcodes.OpInstanceConsole(instance_name=instance_name)
 
   cl = GetClient()
   try:
@@ -906,7 +894,7 @@ def ConnectToInstanceConsole(opts, args):
 
 def _DoConsole(console, show_command, cluster_name, feedback_fn=ToStdout,
                _runcmd_fn=utils.RunCmd):
-  """Acts based on the result of L{opcodes.OpConnectConsole}.
+  """Acts based on the result of L{opcodes.OpInstanceConsole}.
 
   @type console: L{objects.InstanceConsole}
   @param console: Console object
@@ -1141,7 +1129,7 @@ def ShowInstanceConfig(opts, args):
     return 1
 
   retcode = 0
-  op = opcodes.OpQueryInstanceData(instances=args, static=opts.static)
+  op = opcodes.OpInstanceQueryData(instances=args, static=opts.static)
   result = SubmitOpCode(op, opts=opts)
   if not result:
     ToStdout("No instances.")
@@ -1273,13 +1261,13 @@ def SetInstanceParams(opts, args):
       disk_dict['size'] = utils.ParseUnit(disk_dict['size'])
 
   if (opts.disk_template and
-      opts.disk_template in constants.DTS_NET_MIRROR and
+      opts.disk_template in constants.DTS_INT_MIRROR and
       not opts.node):
     ToStderr("Changing the disk template to a mirrored one requires"
              " specifying a secondary node")
     return 1
 
-  op = opcodes.OpSetInstanceParams(instance_name=args[0],
+  op = opcodes.OpInstanceSetParams(instance_name=args[0],
                                    nics=opts.nics,
                                    disks=opts.disks,
                                    disk_template=opts.disk_template,
@@ -1373,15 +1361,15 @@ commands = {
   'failover': (
     FailoverInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, IGNORE_CONSIST_OPT, SUBMIT_OPT, SHUTDOWN_TIMEOUT_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT],
+     DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT],
     "[-f] <instance>", "Stops the instance and starts it on the backup node,"
-    " using the remote mirror (only for instances of type drbd)"),
+    " using the remote mirror (only for mirrored instances)"),
   'migrate': (
     MigrateInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, CLEANUP_OPT, DRY_RUN_OPT,
-     PRIORITY_OPT],
+     PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT],
     "[-f] <instance>", "Migrate instance to its secondary node"
-    " (only for instances of type drbd)"),
+    " (only for mirrored instances)"),
   'move': (
     MoveInstance, ARGS_ONE_INSTANCE,
     [FORCE_OPT, SUBMIT_OPT, SINGLE_NODE_OPT, SHUTDOWN_TIMEOUT_OPT,
@@ -1395,7 +1383,8 @@ commands = {
     "Show information on the specified instance(s)"),
   'list': (
     ListInstances, ARGS_MANY_INSTANCES,
-    [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT],
+    [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT,
+     FORCE_FILTER_OPT],
     "[<instance>...]",
     "Lists the instances and their status. The available fields can be shown"
     " using the \"list-fields\" command (see the man page for details)."
@@ -1464,8 +1453,8 @@ commands = {
     "<instance>", "Activate an instance's disks"),
   'deactivate-disks': (
     DeactivateDisks, ARGS_ONE_INSTANCE,
-    [SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
-    "<instance>", "Deactivate an instance's disks"),
+    [FORCE_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+    "[-f] <instance>", "Deactivate an instance's disks"),
   'recreate-disks': (
     RecreateDisks, ARGS_ONE_INSTANCE,
     [SUBMIT_OPT, DISKIDX_OPT, DRY_RUN_OPT, PRIORITY_OPT],