#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
"""
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
# C0103: Invalid name, since the R_* names are not conforming
from ganeti import opcodes
+from ganeti import objects
from ganeti import http
from ganeti import constants
from ganeti import cli
I_FIELDS = ["name", "admin_state", "os",
"pnode", "snodes",
"disk_template",
- "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
+ "nic.ips", "nic.macs", "nic.modes", "nic.uuids", "nic.names",
+ "nic.links", "nic.networks", "nic.networks.names", "nic.bridges",
"network_port",
- "disk.sizes", "disk_usage",
+ "disk.sizes", "disk.spindles", "disk_usage", "disk.uuids",
+ "disk.names",
"beparams", "hvparams",
"oper_state", "oper_ram", "oper_vcpus", "status",
"custom_hvparams", "custom_beparams", "custom_nicparams",
] + _COMMON_FIELDS
N_FIELDS = ["name", "offline", "master_candidate", "drained",
- "dtotal", "dfree",
+ "dtotal", "dfree", "sptotal", "spfree",
"mtotal", "mnode", "mfree",
"pinst_cnt", "sinst_cnt",
- "ctotal", "cnodes", "csockets",
+ "ctotal", "cnos", "cnodes", "csockets",
"pip", "sip", "role",
"pinst_list", "sinst_list",
"master_capable", "vm_capable",
+ "ndparams",
"group.uuid",
] + _COMMON_FIELDS
+NET_FIELDS = ["name", "network", "gateway",
+ "network6", "gateway6",
+ "mac_prefix",
+ "free_count", "reserved_count",
+ "map", "group_list", "inst_list",
+ "external_reservations",
+ ] + _COMMON_FIELDS
+
G_FIELDS = [
"alloc_policy",
"name",
"node_cnt",
"node_list",
+ "ipolicy",
+ "custom_ipolicy",
+ "diskparams",
+ "custom_diskparams",
+ "ndparams",
+ "custom_ndparams",
] + _COMMON_FIELDS
J_FIELDS_BULK = [
# Feature string for node evacuation with LU-generated jobs
_NODE_EVAC_RES1 = "node-evac-res1"
-ALL_FEATURES = frozenset([
+ALL_FEATURES = compat.UniqueFrozenset([
_INST_CREATE_REQV1,
_INST_REINSTALL_REQV1,
_NODE_MIGRATE_REQV1,
_WFJC_TIMEOUT = 10
+# FIXME: For compatibility we update the beparams/memory field. Needs to be
+# removed in Ganeti 2.8
+def _UpdateBeparams(inst):
+ """Updates the beparams dict of inst to support the memory field.
+
+ @param inst: Inst dict
+ @return: Updated inst dict
+
+ """
+ beparams = inst["beparams"]
+ beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM]
+
+ return inst
+
+
class R_root(baserlib.ResourceBase):
"""/ resource.
"""Returns cluster information.
"""
- client = self.GetClient()
+ client = self.GetClient(query=True)
return client.QueryClusterInfo()
@return: a dictionary with jobs id and uri.
"""
- client = self.GetClient()
+ client = self.GetClient(query=True)
if self.useBulk():
bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
"""
job_id = self.items[0]
- result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
+ result = self.GetClient(query=True).QueryJobs([job_id, ], J_FIELDS)[0]
if result is None:
raise http.HttpNotFound()
return baserlib.MapFields(J_FIELDS, result)
"""Returns a list of all nodes.
"""
- client = self.GetClient()
+ client = self.GetClient(query=True)
if self.useBulk():
bulkdata = client.QueryNodes([], N_FIELDS, False)
"""
node_name = self.items[0]
- client = self.GetClient()
+ client = self.GetClient(query=True)
result = baserlib.HandleItemQueryErrors(client.QueryNodes,
names=[node_name], fields=N_FIELDS,
return baserlib.MapFields(N_FIELDS, result[0])
+class R_2_nodes_name_powercycle(baserlib.OpcodeResource):
+ """/2/nodes/[node_name]/powercycle resource.
+
+ """
+ POST_OPCODE = opcodes.OpNodePowercycle
+
+ def GetPostOpInput(self):
+ """Tries to powercycle a node.
+
+ """
+ return (self.request_body, {
+ "node_name": self.items[0],
+ "force": self.useForce(),
+ })
+
+
class R_2_nodes_name_role(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/role resource.
"""
node_name = self.items[0]
- client = self.GetClient()
+ client = self.GetClient(query=True)
result = client.QueryNodes(names=[node_name], fields=["role"],
use_locking=self.useLocking())
"offline": offline,
"drained": drained,
"force": self.useForce(),
+ "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)),
})
})
+class R_2_nodes_name_modify(baserlib.OpcodeResource):
+ """/2/nodes/[node_name]/modify resource.
+
+ """
+ POST_OPCODE = opcodes.OpNodeSetParams
+
+ def GetPostOpInput(self):
+ """Changes parameters of a node.
+
+ """
+ assert len(self.items) == 1
+
+ return (self.request_body, {
+ "node_name": self.items[0],
+ })
+
+
class R_2_nodes_name_storage(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/storage resource.
})
+class R_2_networks(baserlib.OpcodeResource):
+ """/2/networks resource.
+
+ """
+ GET_OPCODE = opcodes.OpNetworkQuery
+ POST_OPCODE = opcodes.OpNetworkAdd
+ POST_RENAME = {
+ "name": "network_name",
+ }
+
+ def GetPostOpInput(self):
+ """Create a network.
+
+ """
+ assert not self.items
+ return (self.request_body, {
+ "dry_run": self.dryRun(),
+ })
+
+ def GET(self):
+ """Returns a list of all networks.
+
+ """
+ client = self.GetClient(query=True)
+
+ if self.useBulk():
+ bulkdata = client.QueryNetworks([], NET_FIELDS, False)
+ return baserlib.MapBulkFields(bulkdata, NET_FIELDS)
+ else:
+ data = client.QueryNetworks([], ["name"], False)
+ networknames = [row[0] for row in data]
+ return baserlib.BuildUriList(networknames, "/2/networks/%s",
+ uri_fields=("name", "uri"))
+
+
+class R_2_networks_name(baserlib.OpcodeResource):
+ """/2/networks/[network_name] resource.
+
+ """
+ DELETE_OPCODE = opcodes.OpNetworkRemove
+
+ def GET(self):
+ """Send information about a network.
+
+ """
+ network_name = self.items[0]
+ client = self.GetClient(query=True)
+
+ result = baserlib.HandleItemQueryErrors(client.QueryNetworks,
+ names=[network_name],
+ fields=NET_FIELDS,
+ use_locking=self.useLocking())
+
+ return baserlib.MapFields(NET_FIELDS, result[0])
+
+ def GetDeleteOpInput(self):
+ """Delete a network.
+
+ """
+ assert len(self.items) == 1
+ return (self.request_body, {
+ "network_name": self.items[0],
+ "dry_run": self.dryRun(),
+ })
+
+
+class R_2_networks_name_connect(baserlib.OpcodeResource):
+ """/2/networks/[network_name]/connect resource.
+
+ """
+ PUT_OPCODE = opcodes.OpNetworkConnect
+
+ def GetPutOpInput(self):
+ """Changes some parameters of node group.
+
+ """
+ assert self.items
+ return (self.request_body, {
+ "network_name": self.items[0],
+ "dry_run": self.dryRun(),
+ })
+
+
+class R_2_networks_name_disconnect(baserlib.OpcodeResource):
+ """/2/networks/[network_name]/disconnect resource.
+
+ """
+ PUT_OPCODE = opcodes.OpNetworkDisconnect
+
+ def GetPutOpInput(self):
+ """Changes some parameters of node group.
+
+ """
+ assert self.items
+ return (self.request_body, {
+ "network_name": self.items[0],
+ "dry_run": self.dryRun(),
+ })
+
+
+class R_2_networks_name_modify(baserlib.OpcodeResource):
+ """/2/networks/[network_name]/modify resource.
+
+ """
+ PUT_OPCODE = opcodes.OpNetworkSetParams
+
+ def GetPutOpInput(self):
+ """Changes some parameters of network.
+
+ """
+ assert self.items
+ return (self.request_body, {
+ "network_name": self.items[0],
+ })
+
+
class R_2_groups(baserlib.OpcodeResource):
"""/2/groups resource.
def GetPostOpInput(self):
"""Create a node group.
+
"""
assert not self.items
return (self.request_body, {
"""Returns a list of all node groups.
"""
- client = self.GetClient()
+ client = self.GetClient(query=True)
if self.useBulk():
bulkdata = client.QueryGroups([], G_FIELDS, False)
"""
group_name = self.items[0]
- client = self.GetClient()
+ client = self.GetClient(query=True)
result = baserlib.HandleItemQueryErrors(client.QueryGroups,
names=[group_name], fields=G_FIELDS,
use_locking = self.useLocking()
if self.useBulk():
bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
- return baserlib.MapBulkFields(bulkdata, I_FIELDS)
+ return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS))
else:
instancesdata = client.QueryInstances([], ["name"], use_locking)
instanceslist = [row[0] for row in instancesdata]
})
+class R_2_instances_multi_alloc(baserlib.OpcodeResource):
+ """/2/instances-multi-alloc resource.
+
+ """
+ POST_OPCODE = opcodes.OpInstanceMultiAlloc
+
+ def GetPostOpInput(self):
+ """Try to allocate multiple instances.
+
+ @return: A dict with submitted jobs, allocatable instances and failed
+ allocations
+
+ """
+ if "instances" not in self.request_body:
+ raise http.HttpBadRequest("Request is missing required 'instances' field"
+ " in body")
+
+ op_id = {
+ "OP_ID": self.POST_OPCODE.OP_ID, # pylint: disable=E1101
+ }
+ body = objects.FillDict(self.request_body, {
+ "instances": [objects.FillDict(inst, op_id)
+ for inst in self.request_body["instances"]],
+ })
+
+ return (body, {
+ "dry_run": self.dryRun(),
+ })
+
+
class R_2_instances_name(baserlib.OpcodeResource):
"""/2/instances/[instance_name] resource.
fields=I_FIELDS,
use_locking=self.useLocking())
- return baserlib.MapFields(I_FIELDS, result[0])
+ return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
def GetDeleteOpInput(self):
"""Delete an instance.
"""Replaces disks on an instance.
"""
- data = self.request_body.copy()
static = {
"instance_name": self.items[0],
}
+ if self.request_body:
+ data = self.request_body
+ elif self.queryargs:
+ # Legacy interface, do not modify/extend
+ data = {
+ "remote_node": self._checkStringVariable("remote_node", default=None),
+ "mode": self._checkStringVariable("mode", default=None),
+ "disks": self._checkStringVariable("disks", default=None),
+ "iallocator": self._checkStringVariable("iallocator", default=None),
+ }
+ else:
+ data = {}
+
# Parse disks
try:
- raw_disks = data["disks"]
+ raw_disks = data.pop("disks")
except KeyError:
pass
else:
- if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
- # Backwards compatibility for strings of the format "1, 2, 3"
- try:
- data["disks"] = [int(part) for part in raw_disks.split(",")]
- except (TypeError, ValueError), err:
- raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
+ if raw_disks:
+ if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
+ data["disks"] = raw_disks
+ else:
+ # Backwards compatibility for strings of the format "1, 2, 3"
+ try:
+ data["disks"] = [int(part) for part in raw_disks.split(",")]
+ except (TypeError, ValueError), err:
+ raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
return (data, static)
})
+class R_2_instances_name_recreate_disks(baserlib.OpcodeResource):
+ """/2/instances/[instance_name]/recreate-disks resource.
+
+ """
+ POST_OPCODE = opcodes.OpInstanceRecreateDisks
+
+ def GetPostOpInput(self):
+ """Recreate disks for an instance.
+
+ """
+ return ({}, {
+ "instance_name": self.items[0],
+ })
+
+
class R_2_instances_name_prepare_export(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/prepare-export resource.
"""/2/instances/[instance_name]/console resource.
"""
- GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+ GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
GET_OPCODE = opcodes.OpInstanceConsole
def GET(self):
def _GetQueryFields(args):
- """
+ """Tries to extract C{fields} query parameter.
+
+ @type args: dictionary
+ @rtype: list of string
+ @raise http.HttpBadRequest: When parameter can't be found
"""
try:
def _SplitQueryFields(fields):
- """
+ """Splits fields as given for a query request.
+
+ @type fields: string
+ @rtype: list of string
"""
return [i.strip() for i in fields.split(",")]
"""
# Results might contain sensitive information
- GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+ GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
+ PUT_ACCESS = GET_ACCESS
GET_OPCODE = opcodes.OpQuery
PUT_OPCODE = opcodes.OpQuery
- def _Query(self, fields, filter_):
- return self.GetClient().Query(self.items[0], fields, filter_).ToDict()
+ def _Query(self, fields, qfilter):
+ return self.GetClient().Query(self.items[0], fields, qfilter).ToDict()
def GET(self):
"""Returns resource information.
except KeyError:
fields = _GetQueryFields(self.queryargs)
- return self._Query(fields, self.request_body.get("filter", None))
+ qfilter = body.get("qfilter", None)
+ # TODO: remove this after 2.7
+ if qfilter is None:
+ qfilter = body.get("filter", None)
+
+ return self._Query(fields, qfilter)
class R_2_query_fields(baserlib.ResourceBase):
class _R_Tags(baserlib.OpcodeResource):
- """ Quasiclass for tagging resources
+ """Quasiclass for tagging resources.
Manages tags. When inheriting this class you must define the
TAG_LEVEL for it.
if not self.name:
raise http.HttpBadRequest("Missing name on tag request")
- cl = self.GetClient()
- if kind == constants.TAG_INSTANCE:
- fn = cl.QueryInstances
- elif kind == constants.TAG_NODEGROUP:
- fn = cl.QueryGroups
- else:
- fn = cl.QueryNodes
- result = fn(names=[self.name], fields=["tags"], use_locking=False)
- if not result or not result[0]:
- raise http.HttpBadGateway("Invalid response from tag query")
- tags = result[0][0]
+ cl = self.GetClient(query=True)
+ tags = list(cl.QueryTags(kind, self.name))
elif kind == constants.TAG_CLUSTER:
assert not self.name
TAG_LEVEL = constants.TAG_NODEGROUP
+class R_2_networks_name_tags(_R_Tags):
+ """ /2/networks/[network_name]/tags resource.
+
+ Manages per-network tags.
+
+ """
+ TAG_LEVEL = constants.TAG_NETWORK
+
+
class R_2_tags(_R_Tags):
""" /2/tags resource.