- Ignore failures while shutting down instances during failover from
offline node
- Exit daemon's bootstrap process only once daemon is ready
-- Export more information via ``LUQueryInstances``/remote API
+- Export more information via ``LUInstanceQuery``/remote API
- Improved documentation, QA and unittests
- RAPI daemon now watches ``rapi_users`` all the time and doesn't need a
restart if the file was created or changed
- ``OpInstanceCreate``, where the new hv and be parameters will be sent
as dictionaries; note that all hv and be parameters are now optional,
as the values can be instead taken from the cluster
-- ``OpQueryInstances``, where we have to be able to query these new
+- ``OpInstanceQuery``, where we have to be able to query these new
parameters; the syntax for names will be ``hvparam/$NAME`` and
``beparam/$NAME`` for querying an individual parameter out of one
dictionary, and ``hvparams``, respectively ``beparams``, for the whole
@raise errors.OpPrereqError: in case any instance is missing
"""
- # TODO: change LUQueryInstances to that it actually returns None
+ # TODO: change LUInstanceQuery to that it actually returns None
# instead of raising an exception, or devise a better mechanism
result = client.QueryInstances(names, ["name"], False)
for orig_name, row in zip(names, result):
lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
-class LUQueryInstances(NoHooksLU):
+class LUInstanceQuery(NoHooksLU):
"""Logical unit for querying instances.
"""
]
-class OpQueryInstances(OpCode):
+class OpInstanceQuery(OpCode):
"""Compute the list of instances."""
OP_ID = "OP_INSTANCE_QUERY"
OP_PARAMS = [
#
#
-# Copyright (C) 2006, 2007, 2010 Google Inc.
+# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
if use_locking:
raise errors.OpPrereqError("Sync queries are not allowed",
errors.ECODE_INVAL)
- op = opcodes.OpQueryInstances(names=names, output_fields=fields,
- use_locking=use_locking)
+ op = opcodes.OpInstanceQuery(names=names, output_fields=fields,
+ use_locking=use_locking)
return self._Query(op)
elif method == luxi.REQ_QUERY_NODES:
#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
"""
op1_fields = ["name", "status", "admin_state", "snodes"]
- op1 = opcodes.OpQueryInstances(output_fields=op1_fields, names=[],
- use_locking=True)
+ op1 = opcodes.OpInstanceQuery(output_fields=op1_fields, names=[],
+ use_locking=True)
op2_fields = ["name", "bootid", "offline"]
op2 = opcodes.OpQueryNodes(output_fields=op2_fields, names=[],
use_locking=True)
for pnode, snode, enode, instance in mytor:
Log("instance %s", instance, indent=1)
# read the full name of the instance
- nam_op = opcodes.OpQueryInstances(output_fields=["name"],
- names=[instance], use_locking=True)
+ nam_op = opcodes.OpInstanceQuery(output_fields=["name"],
+ names=[instance], use_locking=True)
full_name = self.ExecOp(False, nam_op)[0][0]
if self.opts.iallocator: