#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# 02110-1301, USA.
-"""Remote API version 2 baserlib.library.
+"""Remote API resource implementations.
- PUT or POST?
- ============
+PUT or POST?
+============
- According to RFC2616 the main difference between PUT and POST is that
- POST can create new resources but PUT can only create the resource the
- URI was pointing to on the PUT request.
+According to RFC2616 the main difference between PUT and POST is that
+POST can create new resources but PUT can only create the resource the
+URI was pointing to on the PUT request.
- To be in context of this module for instance creation POST on
- /2/instances is legitim while PUT would be not, due to it does create a
- new entity and not just replace /2/instances with it.
+In the context of this module POST on ``/2/instances`` to change an existing
+entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
+new instance) with a name specified in the request.
- So when adding new methods, if they are operating on the URI entity itself,
- PUT should be prefered over POST.
+Quoting from RFC2616, section 9.6::
+
+ The fundamental difference between the POST and PUT requests is reflected in
+ the different meaning of the Request-URI. The URI in a POST request
+ identifies the resource that will handle the enclosed entity. That resource
+ might be a data-accepting process, a gateway to some other protocol, or a
+ separate entity that accepts annotations. In contrast, the URI in a PUT
+ request identifies the entity enclosed with the request -- the user agent
+ knows what URI is intended and the server MUST NOT attempt to apply the
+ request to some other resource. If the server desires that the request be
+ applied to a different URI, it MUST send a 301 (Moved Permanently) response;
+ the user agent MAY then make its own decision regarding whether or not to
+ redirect the request.
+
+So when adding new methods, if they are operating on the URI entity itself,
+PUT should be prefered over POST.
"""
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
# C0103: Invalid name, since the R_* names are not conforming
from ganeti import opcodes
+from ganeti import objects
from ganeti import http
from ganeti import constants
from ganeti import cli
from ganeti import rapi
from ganeti import ht
+from ganeti import compat
+from ganeti import ssconf
from ganeti.rapi import baserlib
I_FIELDS = ["name", "admin_state", "os",
"pnode", "snodes",
"disk_template",
- "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
+ "nic.ips", "nic.macs", "nic.modes", "nic.uuids", "nic.names",
+ "nic.links", "nic.networks", "nic.networks.names", "nic.bridges",
"network_port",
- "disk.sizes", "disk_usage",
+ "disk.sizes", "disk.spindles", "disk_usage", "disk.uuids",
+ "disk.names",
"beparams", "hvparams",
"oper_state", "oper_ram", "oper_vcpus", "status",
"custom_hvparams", "custom_beparams", "custom_nicparams",
] + _COMMON_FIELDS
N_FIELDS = ["name", "offline", "master_candidate", "drained",
- "dtotal", "dfree",
+ "dtotal", "dfree", "sptotal", "spfree",
"mtotal", "mnode", "mfree",
"pinst_cnt", "sinst_cnt",
- "ctotal", "cnodes", "csockets",
+ "ctotal", "cnos", "cnodes", "csockets",
"pip", "sip", "role",
"pinst_list", "sinst_list",
"master_capable", "vm_capable",
+ "ndparams",
"group.uuid",
] + _COMMON_FIELDS
-G_FIELDS = ["name", "uuid",
- "alloc_policy",
- "node_cnt", "node_list",
- "ctime", "mtime", "serial_no",
- ] # "tags" is missing to be able to use _COMMON_FIELDS here.
+NET_FIELDS = ["name", "network", "gateway",
+ "network6", "gateway6",
+ "mac_prefix",
+ "free_count", "reserved_count",
+ "map", "group_list", "inst_list",
+ "external_reservations",
+ ] + _COMMON_FIELDS
+
+G_FIELDS = [
+ "alloc_policy",
+ "name",
+ "node_cnt",
+ "node_list",
+ "ipolicy",
+ "custom_ipolicy",
+ "diskparams",
+ "custom_diskparams",
+ "ndparams",
+ "custom_ndparams",
+ ] + _COMMON_FIELDS
+
+J_FIELDS_BULK = [
+ "id", "ops", "status", "summary",
+ "opstatus",
+ "received_ts", "start_ts", "end_ts",
+ ]
+
+J_FIELDS = J_FIELDS_BULK + [
+ "oplog",
+ "opresult",
+ ]
_NR_DRAINED = "drained"
-_NR_MASTER_CANDIATE = "master-candidate"
+_NR_MASTER_CANDIDATE = "master-candidate"
_NR_MASTER = "master"
_NR_OFFLINE = "offline"
_NR_REGULAR = "regular"
_NR_MAP = {
constants.NR_MASTER: _NR_MASTER,
- constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
+ constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE,
constants.NR_DRAINED: _NR_DRAINED,
constants.NR_OFFLINE: _NR_OFFLINE,
constants.NR_REGULAR: _NR_REGULAR,
# Feature string for node evacuation with LU-generated jobs
_NODE_EVAC_RES1 = "node-evac-res1"
-ALL_FEATURES = frozenset([
+ALL_FEATURES = compat.UniqueFrozenset([
_INST_CREATE_REQV1,
_INST_REINSTALL_REQV1,
_NODE_MIGRATE_REQV1,
_WFJC_TIMEOUT = 10
-class R_version(baserlib.R_Generic):
+# FIXME: For compatibility we update the beparams/memory field. Needs to be
+# removed in Ganeti 2.8
+def _UpdateBeparams(inst):
+ """Updates the beparams dict of inst to support the memory field.
+
+ @param inst: Inst dict
+ @return: Updated inst dict
+
+ """
+ beparams = inst["beparams"]
+ beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM]
+
+ return inst
+
+
+class R_root(baserlib.ResourceBase):
+ """/ resource.
+
+ """
+ @staticmethod
+ def GET():
+ """Supported for legacy reasons.
+
+ """
+ return None
+
+
+class R_2(R_root):
+ """/2 resource.
+
+ """
+
+
+class R_version(baserlib.ResourceBase):
"""/version resource.
This resource should be used to determine the remote API version and
return constants.RAPI_VERSION
-class R_2_info(baserlib.R_Generic):
+class R_2_info(baserlib.OpcodeResource):
"""/2/info resource.
"""
- @staticmethod
- def GET():
+ GET_OPCODE = opcodes.OpClusterQuery
+
+ def GET(self):
"""Returns cluster information.
"""
- client = baserlib.GetClient()
+ client = self.GetClient(query=True)
return client.QueryClusterInfo()
-class R_2_features(baserlib.R_Generic):
+class R_2_features(baserlib.ResourceBase):
"""/2/features resource.
"""
return list(ALL_FEATURES)
-class R_2_os(baserlib.R_Generic):
+class R_2_os(baserlib.OpcodeResource):
"""/2/os resource.
"""
- @staticmethod
- def GET():
+ GET_OPCODE = opcodes.OpOsDiagnose
+
+ def GET(self):
"""Return a list of all OSes.
Can return error 500 in case of a problem.
Example: ["debian-etch"]
"""
- cl = baserlib.GetClient()
+ cl = self.GetClient()
op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
- job_id = baserlib.SubmitJob([op], cl)
+ job_id = self.SubmitJob([op], cl=cl)
# we use custom feedback function, instead of print we log the status
result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
diagnose_data = result[0]
return os_names
-class R_2_redist_config(baserlib.R_Generic):
+class R_2_redist_config(baserlib.OpcodeResource):
"""/2/redistribute-config resource.
"""
- @staticmethod
- def PUT():
- """Redistribute configuration to all nodes.
-
- """
- return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
+ PUT_OPCODE = opcodes.OpClusterRedistConf
-class R_2_cluster_modify(baserlib.R_Generic):
+class R_2_cluster_modify(baserlib.OpcodeResource):
"""/2/modify resource.
"""
- def PUT(self):
- """Modifies cluster parameters.
-
- @return: a job id
+ PUT_OPCODE = opcodes.OpClusterSetParams
- """
- op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
- None)
-
- return baserlib.SubmitJob([op])
-
-class R_2_jobs(baserlib.R_Generic):
+class R_2_jobs(baserlib.ResourceBase):
"""/2/jobs resource.
"""
- @staticmethod
- def GET():
+ def GET(self):
"""Returns a dictionary of jobs.
@return: a dictionary with jobs id and uri.
"""
- fields = ["id"]
- cl = baserlib.GetClient()
- # Convert the list of lists to the list of ids
- result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
- return baserlib.BuildUriList(result, "/2/jobs/%s",
- uri_fields=("id", "uri"))
+ client = self.GetClient(query=True)
+ if self.useBulk():
+ bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
+ return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
+ else:
+ jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
+ return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
+ uri_fields=("id", "uri"))
-class R_2_jobs_id(baserlib.R_Generic):
+
+class R_2_jobs_id(baserlib.ResourceBase):
"""/2/jobs/[job_id] resource.
"""
- opresult: OpCodes results as a list of lists
"""
- fields = ["id", "ops", "status", "summary",
- "opstatus", "opresult", "oplog",
- "received_ts", "start_ts", "end_ts",
- ]
job_id = self.items[0]
- result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
+ result = self.GetClient(query=True).QueryJobs([job_id, ], J_FIELDS)[0]
if result is None:
raise http.HttpNotFound()
- return baserlib.MapFields(fields, result)
+ return baserlib.MapFields(J_FIELDS, result)
def DELETE(self):
"""Cancel not-yet-started job.
"""
job_id = self.items[0]
- result = baserlib.GetClient().CancelJob(job_id)
+ result = self.GetClient().CancelJob(job_id)
return result
-class R_2_jobs_id_wait(baserlib.R_Generic):
+class R_2_jobs_id_wait(baserlib.ResourceBase):
"""/2/jobs/[job_id]/wait resource.
"""
raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
" be a number")
- client = baserlib.GetClient()
+ client = self.GetClient()
result = client.WaitForJobChangeOnce(job_id, fields,
prev_job_info, prev_log_serial,
timeout=_WFJC_TIMEOUT)
}
-class R_2_nodes(baserlib.R_Generic):
+class R_2_nodes(baserlib.OpcodeResource):
"""/2/nodes resource.
"""
+ GET_OPCODE = opcodes.OpNodeQuery
+
def GET(self):
"""Returns a list of all nodes.
"""
- client = baserlib.GetClient()
+ client = self.GetClient(query=True)
if self.useBulk():
bulkdata = client.QueryNodes([], N_FIELDS, False)
uri_fields=("id", "uri"))
-class R_2_nodes_name(baserlib.R_Generic):
+class R_2_nodes_name(baserlib.OpcodeResource):
"""/2/nodes/[node_name] resource.
"""
+ GET_OPCODE = opcodes.OpNodeQuery
+
def GET(self):
"""Send information about a node.
"""
node_name = self.items[0]
- client = baserlib.GetClient()
+ client = self.GetClient(query=True)
result = baserlib.HandleItemQueryErrors(client.QueryNodes,
names=[node_name], fields=N_FIELDS,
return baserlib.MapFields(N_FIELDS, result[0])
-class R_2_nodes_name_role(baserlib.R_Generic):
- """ /2/nodes/[node_name]/role resource.
+class R_2_nodes_name_powercycle(baserlib.OpcodeResource):
+ """/2/nodes/[node_name]/powercycle resource.
+
+ """
+ POST_OPCODE = opcodes.OpNodePowercycle
+
+ def GetPostOpInput(self):
+ """Tries to powercycle a node.
+
+ """
+ return (self.request_body, {
+ "node_name": self.items[0],
+ "force": self.useForce(),
+ })
+
+
+class R_2_nodes_name_role(baserlib.OpcodeResource):
+ """/2/nodes/[node_name]/role resource.
"""
+ PUT_OPCODE = opcodes.OpNodeSetParams
+
def GET(self):
"""Returns the current node role.
"""
node_name = self.items[0]
- client = baserlib.GetClient()
+ client = self.GetClient(query=True)
result = client.QueryNodes(names=[node_name], fields=["role"],
use_locking=self.useLocking())
return _NR_MAP[result[0][0]]
- def PUT(self):
+ def GetPutOpInput(self):
"""Sets the node role.
- @return: a job id
-
"""
- if not isinstance(self.request_body, basestring):
- raise http.HttpBadRequest("Invalid body contents, not a string")
+ baserlib.CheckType(self.request_body, basestring, "Body contents")
- node_name = self.items[0]
role = self.request_body
if role == _NR_REGULAR:
offline = False
drained = False
- elif role == _NR_MASTER_CANDIATE:
+ elif role == _NR_MASTER_CANDIDATE:
candidate = True
offline = drained = None
else:
raise http.HttpBadRequest("Can't set '%s' role" % role)
- op = opcodes.OpNodeSetParams(node_name=node_name,
- master_candidate=candidate,
- offline=offline,
- drained=drained,
- force=bool(self.useForce()))
+ assert len(self.items) == 1
- return baserlib.SubmitJob([op])
+ return ({}, {
+ "node_name": self.items[0],
+ "master_candidate": candidate,
+ "offline": offline,
+ "drained": drained,
+ "force": self.useForce(),
+ "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)),
+ })
-class R_2_nodes_name_evacuate(baserlib.R_Generic):
+class R_2_nodes_name_evacuate(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/evacuate resource.
"""
- def POST(self):
+ POST_OPCODE = opcodes.OpNodeEvacuate
+
+ def GetPostOpInput(self):
"""Evacuate all instances off a node.
"""
- op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
+ return (self.request_body, {
"node_name": self.items[0],
"dry_run": self.dryRun(),
})
- return baserlib.SubmitJob([op])
-
-class R_2_nodes_name_migrate(baserlib.R_Generic):
+class R_2_nodes_name_migrate(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/migrate resource.
"""
- def POST(self):
+ POST_OPCODE = opcodes.OpNodeMigrate
+
+ def GetPostOpInput(self):
"""Migrate all primary instances from a node.
"""
- node_name = self.items[0]
-
if self.queryargs:
# Support old-style requests
if "live" in self.queryargs and "mode" in self.queryargs:
else:
data = self.request_body
- op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
- "node_name": node_name,
+ return (data, {
+ "node_name": self.items[0],
})
- return baserlib.SubmitJob([op])
+
+class R_2_nodes_name_modify(baserlib.OpcodeResource):
+ """/2/nodes/[node_name]/modify resource.
+
+ """
+ POST_OPCODE = opcodes.OpNodeSetParams
+
+ def GetPostOpInput(self):
+ """Changes parameters of a node.
+
+ """
+ assert len(self.items) == 1
+
+ return (self.request_body, {
+ "node_name": self.items[0],
+ })
-class R_2_nodes_name_storage(baserlib.R_Generic):
+class R_2_nodes_name_storage(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/storage resource.
"""
# LUNodeQueryStorage acquires locks, hence restricting access to GET
GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+ GET_OPCODE = opcodes.OpNodeQueryStorage
- def GET(self):
- node_name = self.items[0]
+ def GetGetOpInput(self):
+ """List storage available on a node.
+ """
storage_type = self._checkStringVariable("storage_type", None)
- if not storage_type:
- raise http.HttpBadRequest("Missing the required 'storage_type'"
- " parameter")
-
output_fields = self._checkStringVariable("output_fields", None)
+
if not output_fields:
raise http.HttpBadRequest("Missing the required 'output_fields'"
" parameter")
- op = opcodes.OpNodeQueryStorage(nodes=[node_name],
- storage_type=storage_type,
- output_fields=output_fields.split(","))
- return baserlib.SubmitJob([op])
+ return ({}, {
+ "nodes": [self.items[0]],
+ "storage_type": storage_type,
+ "output_fields": output_fields.split(","),
+ })
-class R_2_nodes_name_storage_modify(baserlib.R_Generic):
+class R_2_nodes_name_storage_modify(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/storage/modify resource.
"""
- def PUT(self):
- node_name = self.items[0]
+ PUT_OPCODE = opcodes.OpNodeModifyStorage
- storage_type = self._checkStringVariable("storage_type", None)
- if not storage_type:
- raise http.HttpBadRequest("Missing the required 'storage_type'"
- " parameter")
+ def GetPutOpInput(self):
+ """Modifies a storage volume on a node.
+ """
+ storage_type = self._checkStringVariable("storage_type", None)
name = self._checkStringVariable("name", None)
+
if not name:
raise http.HttpBadRequest("Missing the required 'name'"
" parameter")
changes[constants.SF_ALLOCATABLE] = \
bool(self._checkIntVariable("allocatable", default=1))
- op = opcodes.OpNodeModifyStorage(node_name=node_name,
- storage_type=storage_type,
- name=name,
- changes=changes)
- return baserlib.SubmitJob([op])
+ return ({}, {
+ "node_name": self.items[0],
+ "storage_type": storage_type,
+ "name": name,
+ "changes": changes,
+ })
-class R_2_nodes_name_storage_repair(baserlib.R_Generic):
+class R_2_nodes_name_storage_repair(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/storage/repair resource.
"""
- def PUT(self):
- node_name = self.items[0]
+ PUT_OPCODE = opcodes.OpRepairNodeStorage
- storage_type = self._checkStringVariable("storage_type", None)
- if not storage_type:
- raise http.HttpBadRequest("Missing the required 'storage_type'"
- " parameter")
+ def GetPutOpInput(self):
+ """Repairs a storage volume on a node.
+ """
+ storage_type = self._checkStringVariable("storage_type", None)
name = self._checkStringVariable("name", None)
if not name:
raise http.HttpBadRequest("Missing the required 'name'"
" parameter")
- op = opcodes.OpRepairNodeStorage(node_name=node_name,
- storage_type=storage_type,
- name=name)
- return baserlib.SubmitJob([op])
-
+ return ({}, {
+ "node_name": self.items[0],
+ "storage_type": storage_type,
+ "name": name,
+ })
-def _ParseCreateGroupRequest(data, dry_run):
- """Parses a request for creating a node group.
- @rtype: L{opcodes.OpGroupAdd}
- @return: Group creation opcode
+class R_2_networks(baserlib.OpcodeResource):
+ """/2/networks resource.
"""
- override = {
- "dry_run": dry_run,
+ GET_OPCODE = opcodes.OpNetworkQuery
+ POST_OPCODE = opcodes.OpNetworkAdd
+ POST_RENAME = {
+ "name": "network_name",
}
- rename = {
- "name": "group_name",
- }
+ def GetPostOpInput(self):
+ """Create a network.
- return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
- rename=rename)
+ """
+ assert not self.items
+ return (self.request_body, {
+ "dry_run": self.dryRun(),
+ })
+
+ def GET(self):
+ """Returns a list of all networks.
+
+ """
+ client = self.GetClient(query=True)
+
+ if self.useBulk():
+ bulkdata = client.QueryNetworks([], NET_FIELDS, False)
+ return baserlib.MapBulkFields(bulkdata, NET_FIELDS)
+ else:
+ data = client.QueryNetworks([], ["name"], False)
+ networknames = [row[0] for row in data]
+ return baserlib.BuildUriList(networknames, "/2/networks/%s",
+ uri_fields=("name", "uri"))
+
+
+class R_2_networks_name(baserlib.OpcodeResource):
+ """/2/networks/[network_name] resource.
+
+ """
+ DELETE_OPCODE = opcodes.OpNetworkRemove
+
+ def GET(self):
+ """Send information about a network.
+
+ """
+ network_name = self.items[0]
+ client = self.GetClient(query=True)
+
+ result = baserlib.HandleItemQueryErrors(client.QueryNetworks,
+ names=[network_name],
+ fields=NET_FIELDS,
+ use_locking=self.useLocking())
+
+ return baserlib.MapFields(NET_FIELDS, result[0])
+
+ def GetDeleteOpInput(self):
+ """Delete a network.
+
+ """
+ assert len(self.items) == 1
+ return (self.request_body, {
+ "network_name": self.items[0],
+ "dry_run": self.dryRun(),
+ })
+
+
+class R_2_networks_name_connect(baserlib.OpcodeResource):
+ """/2/networks/[network_name]/connect resource.
+
+ """
+ PUT_OPCODE = opcodes.OpNetworkConnect
+
+ def GetPutOpInput(self):
+ """Changes some parameters of node group.
+
+ """
+ assert self.items
+ return (self.request_body, {
+ "network_name": self.items[0],
+ "dry_run": self.dryRun(),
+ })
+
+
+class R_2_networks_name_disconnect(baserlib.OpcodeResource):
+ """/2/networks/[network_name]/disconnect resource.
+
+ """
+ PUT_OPCODE = opcodes.OpNetworkDisconnect
+
+ def GetPutOpInput(self):
+ """Changes some parameters of node group.
+
+ """
+ assert self.items
+ return (self.request_body, {
+ "network_name": self.items[0],
+ "dry_run": self.dryRun(),
+ })
+
+
+class R_2_networks_name_modify(baserlib.OpcodeResource):
+ """/2/networks/[network_name]/modify resource.
+
+ """
+ PUT_OPCODE = opcodes.OpNetworkSetParams
+
+ def GetPutOpInput(self):
+ """Changes some parameters of network.
+
+ """
+ assert self.items
+ return (self.request_body, {
+ "network_name": self.items[0],
+ })
-class R_2_groups(baserlib.R_Generic):
+class R_2_groups(baserlib.OpcodeResource):
"""/2/groups resource.
"""
+ GET_OPCODE = opcodes.OpGroupQuery
+ POST_OPCODE = opcodes.OpGroupAdd
+ POST_RENAME = {
+ "name": "group_name",
+ }
+
+ def GetPostOpInput(self):
+ """Create a node group.
+
+
+ """
+ assert not self.items
+ return (self.request_body, {
+ "dry_run": self.dryRun(),
+ })
+
def GET(self):
"""Returns a list of all node groups.
"""
- client = baserlib.GetClient()
+ client = self.GetClient(query=True)
if self.useBulk():
bulkdata = client.QueryGroups([], G_FIELDS, False)
return baserlib.BuildUriList(groupnames, "/2/groups/%s",
uri_fields=("name", "uri"))
- def POST(self):
- """Create a node group.
- @return: a job id
-
- """
- baserlib.CheckType(self.request_body, dict, "Body contents")
- op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
- return baserlib.SubmitJob([op])
-
-
-class R_2_groups_name(baserlib.R_Generic):
+class R_2_groups_name(baserlib.OpcodeResource):
"""/2/groups/[group_name] resource.
"""
+ DELETE_OPCODE = opcodes.OpGroupRemove
+
def GET(self):
"""Send information about a node group.
"""
group_name = self.items[0]
- client = baserlib.GetClient()
+ client = self.GetClient(query=True)
result = baserlib.HandleItemQueryErrors(client.QueryGroups,
names=[group_name], fields=G_FIELDS,
return baserlib.MapFields(G_FIELDS, result[0])
- def DELETE(self):
+ def GetDeleteOpInput(self):
"""Delete a node group.
"""
- op = opcodes.OpGroupRemove(group_name=self.items[0],
- dry_run=bool(self.dryRun()))
-
- return baserlib.SubmitJob([op])
-
-
-def _ParseModifyGroupRequest(name, data):
- """Parses a request for modifying a node group.
-
- @rtype: L{opcodes.OpGroupSetParams}
- @return: Group modify opcode
-
- """
- return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
- "group_name": name,
- })
-
+ assert len(self.items) == 1
+ return ({}, {
+ "group_name": self.items[0],
+ "dry_run": self.dryRun(),
+ })
-class R_2_groups_name_modify(baserlib.R_Generic):
+class R_2_groups_name_modify(baserlib.OpcodeResource):
"""/2/groups/[group_name]/modify resource.
"""
- def PUT(self):
- """Changes some parameters of node group.
+ PUT_OPCODE = opcodes.OpGroupSetParams
- @return: a job id
+ def GetPutOpInput(self):
+ """Changes some parameters of node group.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
-
- op = _ParseModifyGroupRequest(self.items[0], self.request_body)
-
- return baserlib.SubmitJob([op])
-
-
-def _ParseRenameGroupRequest(name, data, dry_run):
- """Parses a request for renaming a node group.
-
- @type name: string
- @param name: name of the node group to rename
- @type data: dict
- @param data: the body received by the rename request
- @type dry_run: bool
- @param dry_run: whether to perform a dry run
-
- @rtype: L{opcodes.OpGroupRename}
- @return: Node group rename opcode
-
- """
- return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
- "group_name": name,
- "dry_run": dry_run,
- })
+ assert self.items
+ return (self.request_body, {
+ "group_name": self.items[0],
+ })
-class R_2_groups_name_rename(baserlib.R_Generic):
+class R_2_groups_name_rename(baserlib.OpcodeResource):
"""/2/groups/[group_name]/rename resource.
"""
- def PUT(self):
- """Changes the name of a node group.
+ PUT_OPCODE = opcodes.OpGroupRename
- @return: a job id
+ def GetPutOpInput(self):
+ """Changes the name of a node group.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
- op = _ParseRenameGroupRequest(self.items[0], self.request_body,
- self.dryRun())
- return baserlib.SubmitJob([op])
+ assert len(self.items) == 1
+ return (self.request_body, {
+ "group_name": self.items[0],
+ "dry_run": self.dryRun(),
+ })
-class R_2_groups_name_assign_nodes(baserlib.R_Generic):
+class R_2_groups_name_assign_nodes(baserlib.OpcodeResource):
"""/2/groups/[group_name]/assign-nodes resource.
"""
- def PUT(self):
- """Assigns nodes to a group.
+ PUT_OPCODE = opcodes.OpGroupAssignNodes
- @return: a job id
+ def GetPutOpInput(self):
+ """Assigns nodes to a group.
"""
- op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
+ assert len(self.items) == 1
+ return (self.request_body, {
"group_name": self.items[0],
"dry_run": self.dryRun(),
"force": self.useForce(),
})
- return baserlib.SubmitJob([op])
-
-def _ParseInstanceCreateRequestVersion1(data, dry_run):
- """Parses an instance creation request version 1.
-
- @rtype: L{opcodes.OpInstanceCreate}
- @return: Instance creation opcode
+class R_2_instances(baserlib.OpcodeResource):
+ """/2/instances resource.
"""
- override = {
- "dry_run": dry_run,
- }
-
- rename = {
+ GET_OPCODE = opcodes.OpInstanceQuery
+ POST_OPCODE = opcodes.OpInstanceCreate
+ POST_RENAME = {
"os": "os_type",
"name": "instance_name",
}
- return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
- rename=rename)
-
-
-class R_2_instances(baserlib.R_Generic):
- """/2/instances resource.
-
- """
def GET(self):
"""Returns a list of all available instances.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
use_locking = self.useLocking()
if self.useBulk():
bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
- return baserlib.MapBulkFields(bulkdata, I_FIELDS)
+ return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS))
else:
instancesdata = client.QueryInstances([], ["name"], use_locking)
instanceslist = [row[0] for row in instancesdata]
return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
uri_fields=("id", "uri"))
- def POST(self):
+ def GetPostOpInput(self):
"""Create an instance.
@return: a job id
"""
- if not isinstance(self.request_body, dict):
- raise http.HttpBadRequest("Invalid body contents, not a dictionary")
+ baserlib.CheckType(self.request_body, dict, "Body contents")
# Default to request data version 0
data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
if data_version == 0:
raise http.HttpBadRequest("Instance creation request version 0 is no"
" longer supported")
- elif data_version == 1:
- data = self.request_body.copy()
- # Remove "__version__"
- data.pop(_REQ_DATA_VERSION, None)
- op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
- else:
+ elif data_version != 1:
raise http.HttpBadRequest("Unsupported request data version %s" %
data_version)
- return baserlib.SubmitJob([op])
+ data = self.request_body.copy()
+ # Remove "__version__"
+ data.pop(_REQ_DATA_VERSION, None)
+
+ return (data, {
+ "dry_run": self.dryRun(),
+ })
+
+
+class R_2_instances_multi_alloc(baserlib.OpcodeResource):
+ """/2/instances-multi-alloc resource.
+
+ """
+ POST_OPCODE = opcodes.OpInstanceMultiAlloc
+
+ def GetPostOpInput(self):
+ """Try to allocate multiple instances.
+
+ @return: A dict with submitted jobs, allocatable instances and failed
+ allocations
+
+ """
+ if "instances" not in self.request_body:
+ raise http.HttpBadRequest("Request is missing required 'instances' field"
+ " in body")
+
+ op_id = {
+ "OP_ID": self.POST_OPCODE.OP_ID, # pylint: disable=E1101
+ }
+ body = objects.FillDict(self.request_body, {
+ "instances": [objects.FillDict(inst, op_id)
+ for inst in self.request_body["instances"]],
+ })
+
+ return (body, {
+ "dry_run": self.dryRun(),
+ })
-class R_2_instances_name(baserlib.R_Generic):
+class R_2_instances_name(baserlib.OpcodeResource):
"""/2/instances/[instance_name] resource.
"""
+ GET_OPCODE = opcodes.OpInstanceQuery
+ DELETE_OPCODE = opcodes.OpInstanceRemove
+
def GET(self):
"""Send information about an instance.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
instance_name = self.items[0]
result = baserlib.HandleItemQueryErrors(client.QueryInstances,
fields=I_FIELDS,
use_locking=self.useLocking())
- return baserlib.MapFields(I_FIELDS, result[0])
+ return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
- def DELETE(self):
+ def GetDeleteOpInput(self):
"""Delete an instance.
"""
- op = opcodes.OpInstanceRemove(instance_name=self.items[0],
- ignore_failures=False,
- dry_run=bool(self.dryRun()))
- return baserlib.SubmitJob([op])
+ assert len(self.items) == 1
+ return ({}, {
+ "instance_name": self.items[0],
+ "ignore_failures": False,
+ "dry_run": self.dryRun(),
+ })
-class R_2_instances_name_info(baserlib.R_Generic):
+class R_2_instances_name_info(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/info resource.
"""
- def GET(self):
+ GET_OPCODE = opcodes.OpInstanceQueryData
+
+ def GetGetOpInput(self):
"""Request detailed instance information.
"""
- instance_name = self.items[0]
- static = bool(self._checkIntVariable("static", default=0))
-
- op = opcodes.OpInstanceQueryData(instances=[instance_name],
- static=static)
- return baserlib.SubmitJob([op])
+ assert len(self.items) == 1
+ return ({}, {
+ "instances": [self.items[0]],
+ "static": bool(self._checkIntVariable("static", default=0)),
+ })
-class R_2_instances_name_reboot(baserlib.R_Generic):
+class R_2_instances_name_reboot(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/reboot resource.
Implements an instance reboot.
"""
- def POST(self):
+ POST_OPCODE = opcodes.OpInstanceReboot
+
+ def GetPostOpInput(self):
"""Reboot an instance.
The URI takes type=[hard|soft|full] and
ignore_secondaries=[False|True] parameters.
"""
- instance_name = self.items[0]
- reboot_type = self.queryargs.get('type',
- [constants.INSTANCE_REBOOT_HARD])[0]
- ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
- op = opcodes.OpInstanceReboot(instance_name=instance_name,
- reboot_type=reboot_type,
- ignore_secondaries=ignore_secondaries,
- dry_run=bool(self.dryRun()))
-
- return baserlib.SubmitJob([op])
+ return ({}, {
+ "instance_name": self.items[0],
+ "reboot_type":
+ self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0],
+ "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")),
+ "dry_run": self.dryRun(),
+ })
-class R_2_instances_name_startup(baserlib.R_Generic):
+class R_2_instances_name_startup(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/startup resource.
Implements an instance startup.
"""
- def PUT(self):
+ PUT_OPCODE = opcodes.OpInstanceStartup
+
+ def GetPutOpInput(self):
"""Startup an instance.
The URI takes force=[False|True] parameter to start the instance
if even if secondary disks are failing.
"""
- instance_name = self.items[0]
- force_startup = bool(self._checkIntVariable('force'))
- no_remember = bool(self._checkIntVariable('no_remember'))
- op = opcodes.OpInstanceStartup(instance_name=instance_name,
- force=force_startup,
- dry_run=bool(self.dryRun()),
- no_remember=no_remember)
-
- return baserlib.SubmitJob([op])
-
-
-def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
- """Parses a request for an instance shutdown.
-
- @rtype: L{opcodes.OpInstanceShutdown}
- @return: Instance shutdown opcode
-
- """
- return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
- "instance_name": name,
- "dry_run": dry_run,
- "no_remember": no_remember,
- })
+ return ({}, {
+ "instance_name": self.items[0],
+ "force": self.useForce(),
+ "dry_run": self.dryRun(),
+ "no_remember": bool(self._checkIntVariable("no_remember")),
+ })
-class R_2_instances_name_shutdown(baserlib.R_Generic):
+class R_2_instances_name_shutdown(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/shutdown resource.
Implements an instance shutdown.
"""
- def PUT(self):
- """Shutdown an instance.
+ PUT_OPCODE = opcodes.OpInstanceShutdown
- @return: a job id
+ def GetPutOpInput(self):
+ """Shutdown an instance.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
-
- no_remember = bool(self._checkIntVariable('no_remember'))
- op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
- bool(self.dryRun()), no_remember)
-
- return baserlib.SubmitJob([op])
+ return (self.request_body, {
+ "instance_name": self.items[0],
+ "no_remember": bool(self._checkIntVariable("no_remember")),
+ "dry_run": self.dryRun(),
+ })
def _ParseInstanceReinstallRequest(name, data):
return ops
-class R_2_instances_name_reinstall(baserlib.R_Generic):
+class R_2_instances_name_reinstall(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/reinstall resource.
Implements an instance reinstall.
"""
+ POST_OPCODE = opcodes.OpInstanceReinstall
+
def POST(self):
"""Reinstall an instance.
ops = _ParseInstanceReinstallRequest(self.items[0], body)
- return baserlib.SubmitJob(ops)
-
-
-def _ParseInstanceReplaceDisksRequest(name, data):
- """Parses a request for an instance export.
-
- @rtype: L{opcodes.OpInstanceReplaceDisks}
- @return: Instance export opcode
-
- """
- override = {
- "instance_name": name,
- }
-
- # Parse disks
- try:
- raw_disks = data["disks"]
- except KeyError:
- pass
- else:
- if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
- # Backwards compatibility for strings of the format "1, 2, 3"
- try:
- data["disks"] = [int(part) for part in raw_disks.split(",")]
- except (TypeError, ValueError), err:
- raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
-
- return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
+ return self.SubmitJob(ops)
-class R_2_instances_name_replace_disks(baserlib.R_Generic):
+class R_2_instances_name_replace_disks(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/replace-disks resource.
"""
- def POST(self):
+ POST_OPCODE = opcodes.OpInstanceReplaceDisks
+
+ def GetPostOpInput(self):
"""Replaces disks on an instance.
"""
- op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
+ static = {
+ "instance_name": self.items[0],
+ }
- return baserlib.SubmitJob([op])
+ if self.request_body:
+ data = self.request_body
+ elif self.queryargs:
+ # Legacy interface, do not modify/extend
+ data = {
+ "remote_node": self._checkStringVariable("remote_node", default=None),
+ "mode": self._checkStringVariable("mode", default=None),
+ "disks": self._checkStringVariable("disks", default=None),
+ "iallocator": self._checkStringVariable("iallocator", default=None),
+ }
+ else:
+ data = {}
+
+ # Parse disks
+ try:
+ raw_disks = data.pop("disks")
+ except KeyError:
+ pass
+ else:
+ if raw_disks:
+ if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
+ data["disks"] = raw_disks
+ else:
+ # Backwards compatibility for strings of the format "1, 2, 3"
+ try:
+ data["disks"] = [int(part) for part in raw_disks.split(",")]
+ except (TypeError, ValueError), err:
+ raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
+
+ return (data, static)
-class R_2_instances_name_activate_disks(baserlib.R_Generic):
+class R_2_instances_name_activate_disks(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/activate-disks resource.
"""
- def PUT(self):
+ PUT_OPCODE = opcodes.OpInstanceActivateDisks
+
+ def GetPutOpInput(self):
"""Activate disks for an instance.
The URI might contain ignore_size to ignore current recorded size.
"""
- instance_name = self.items[0]
- ignore_size = bool(self._checkIntVariable('ignore_size'))
-
- op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
- ignore_size=ignore_size)
-
- return baserlib.SubmitJob([op])
+ return ({}, {
+ "instance_name": self.items[0],
+ "ignore_size": bool(self._checkIntVariable("ignore_size")),
+ })
-class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
+class R_2_instances_name_deactivate_disks(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/deactivate-disks resource.
"""
- def PUT(self):
+ PUT_OPCODE = opcodes.OpInstanceDeactivateDisks
+
+ def GetPutOpInput(self):
"""Deactivate disks for an instance.
"""
- instance_name = self.items[0]
-
- op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
-
- return baserlib.SubmitJob([op])
+ return ({}, {
+ "instance_name": self.items[0],
+ })
-class R_2_instances_name_prepare_export(baserlib.R_Generic):
- """/2/instances/[instance_name]/prepare-export resource.
+class R_2_instances_name_recreate_disks(baserlib.OpcodeResource):
+ """/2/instances/[instance_name]/recreate-disks resource.
"""
- def PUT(self):
- """Prepares an export for an instance.
+ POST_OPCODE = opcodes.OpInstanceRecreateDisks
- @return: a job id
+ def GetPostOpInput(self):
+ """Recreate disks for an instance.
"""
- instance_name = self.items[0]
- mode = self._checkStringVariable("mode")
-
- op = opcodes.OpBackupPrepare(instance_name=instance_name,
- mode=mode)
-
- return baserlib.SubmitJob([op])
-
+ return ({}, {
+ "instance_name": self.items[0],
+ })
-def _ParseExportInstanceRequest(name, data):
- """Parses a request for an instance export.
- @rtype: L{opcodes.OpBackupExport}
- @return: Instance export opcode
+class R_2_instances_name_prepare_export(baserlib.OpcodeResource):
+ """/2/instances/[instance_name]/prepare-export resource.
"""
- # Rename "destination" to "target_node"
- try:
- data["target_node"] = data.pop("destination")
- except KeyError:
- pass
+ PUT_OPCODE = opcodes.OpBackupPrepare
- return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
- "instance_name": name,
- })
+ def GetPutOpInput(self):
+ """Prepares an export for an instance.
+
+ """
+ return ({}, {
+ "instance_name": self.items[0],
+ "mode": self._checkStringVariable("mode"),
+ })
-class R_2_instances_name_export(baserlib.R_Generic):
+class R_2_instances_name_export(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/export resource.
"""
- def PUT(self):
- """Exports an instance.
+ PUT_OPCODE = opcodes.OpBackupExport
+ PUT_RENAME = {
+ "destination": "target_node",
+ }
- @return: a job id
+ def GetPutOpInput(self):
+ """Exports an instance.
"""
- if not isinstance(self.request_body, dict):
- raise http.HttpBadRequest("Invalid body contents, not a dictionary")
-
- op = _ParseExportInstanceRequest(self.items[0], self.request_body)
-
- return baserlib.SubmitJob([op])
-
-
-def _ParseMigrateInstanceRequest(name, data):
- """Parses a request for an instance migration.
-
- @rtype: L{opcodes.OpInstanceMigrate}
- @return: Instance migration opcode
-
- """
- return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
- "instance_name": name,
- })
+ return (self.request_body, {
+ "instance_name": self.items[0],
+ })
-class R_2_instances_name_migrate(baserlib.R_Generic):
+class R_2_instances_name_migrate(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/migrate resource.
"""
- def PUT(self):
- """Migrates an instance.
+ PUT_OPCODE = opcodes.OpInstanceMigrate
- @return: a job id
+ def GetPutOpInput(self):
+ """Migrates an instance.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
-
- op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
-
- return baserlib.SubmitJob([op])
+ return (self.request_body, {
+ "instance_name": self.items[0],
+ })
-class R_2_instances_name_failover(baserlib.R_Generic):
+class R_2_instances_name_failover(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/failover resource.
"""
- def PUT(self):
- """Does a failover of an instance.
+ PUT_OPCODE = opcodes.OpInstanceFailover
- @return: a job id
+ def GetPutOpInput(self):
+ """Does a failover of an instance.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
-
- op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
+ return (self.request_body, {
"instance_name": self.items[0],
})
- return baserlib.SubmitJob([op])
-
-
-def _ParseRenameInstanceRequest(name, data):
- """Parses a request for renaming an instance.
-
- @rtype: L{opcodes.OpInstanceRename}
- @return: Instance rename opcode
-
- """
- return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
- "instance_name": name,
- })
-
-class R_2_instances_name_rename(baserlib.R_Generic):
+class R_2_instances_name_rename(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/rename resource.
"""
- def PUT(self):
- """Changes the name of an instance.
+ PUT_OPCODE = opcodes.OpInstanceRename
- @return: a job id
+ def GetPutOpInput(self):
+ """Changes the name of an instance.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
-
- op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
-
- return baserlib.SubmitJob([op])
-
-
-def _ParseModifyInstanceRequest(name, data):
- """Parses a request for modifying an instance.
-
- @rtype: L{opcodes.OpInstanceSetParams}
- @return: Instance modify opcode
-
- """
- return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
- "instance_name": name,
- })
+ return (self.request_body, {
+ "instance_name": self.items[0],
+ })
-class R_2_instances_name_modify(baserlib.R_Generic):
+class R_2_instances_name_modify(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/modify resource.
"""
- def PUT(self):
- """Changes some parameters of an instance.
+ PUT_OPCODE = opcodes.OpInstanceSetParams
- @return: a job id
+ def GetPutOpInput(self):
+ """Changes parameters of an instance.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
-
- op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
-
- return baserlib.SubmitJob([op])
+ return (self.request_body, {
+ "instance_name": self.items[0],
+ })
-class R_2_instances_name_disk_grow(baserlib.R_Generic):
+class R_2_instances_name_disk_grow(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/disk/[disk_index]/grow resource.
"""
- def POST(self):
- """Increases the size of an instance disk.
+ POST_OPCODE = opcodes.OpInstanceGrowDisk
- @return: a job id
+ def GetPostOpInput(self):
+ """Increases the size of an instance disk.
"""
- op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
+ return (self.request_body, {
"instance_name": self.items[0],
"disk": int(self.items[1]),
})
- return baserlib.SubmitJob([op])
-
-class R_2_instances_name_console(baserlib.R_Generic):
+class R_2_instances_name_console(baserlib.ResourceBase):
"""/2/instances/[instance_name]/console resource.
"""
- GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+ GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
+ GET_OPCODE = opcodes.OpInstanceConsole
def GET(self):
"""Request information for connecting to instance's console.
L{objects.InstanceConsole}
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
def _GetQueryFields(args):
- """
+ """Tries to extract C{fields} query parameter.
+
+ @type args: dictionary
+ @rtype: list of string
+ @raise http.HttpBadRequest: When parameter can't be found
"""
try:
def _SplitQueryFields(fields):
- """
+ """Splits fields as given for a query request.
+
+ @type fields: string
+ @rtype: list of string
"""
return [i.strip() for i in fields.split(",")]
-class R_2_query(baserlib.R_Generic):
+class R_2_query(baserlib.ResourceBase):
"""/2/query/[resource] resource.
"""
# Results might contain sensitive information
- GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+ GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
+ PUT_ACCESS = GET_ACCESS
+ GET_OPCODE = opcodes.OpQuery
+ PUT_OPCODE = opcodes.OpQuery
- def _Query(self, fields, filter_):
- return baserlib.GetClient().Query(self.items[0], fields, filter_).ToDict()
+ def _Query(self, fields, qfilter):
+ return self.GetClient().Query(self.items[0], fields, qfilter).ToDict()
def GET(self):
"""Returns resource information.
except KeyError:
fields = _GetQueryFields(self.queryargs)
- return self._Query(fields, self.request_body.get("filter", None))
+ qfilter = body.get("qfilter", None)
+ # TODO: remove this after 2.7
+ if qfilter is None:
+ qfilter = body.get("filter", None)
+
+ return self._Query(fields, qfilter)
-class R_2_query_fields(baserlib.R_Generic):
+class R_2_query_fields(baserlib.ResourceBase):
"""/2/query/[resource]/fields resource.
"""
+ GET_OPCODE = opcodes.OpQueryFields
+
def GET(self):
"""Retrieves list of available fields for a resource.
else:
fields = _SplitQueryFields(raw_fields[0])
- return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict()
+ return self.GetClient().QueryFields(self.items[0], fields).ToDict()
-class _R_Tags(baserlib.R_Generic):
- """ Quasiclass for tagging resources
+class _R_Tags(baserlib.OpcodeResource):
+ """Quasiclass for tagging resources.
Manages tags. When inheriting this class you must define the
TAG_LEVEL for it.
"""
TAG_LEVEL = None
+ GET_OPCODE = opcodes.OpTagsGet
+ PUT_OPCODE = opcodes.OpTagsSet
+ DELETE_OPCODE = opcodes.OpTagsDel
- def __init__(self, items, queryargs, req):
+ def __init__(self, items, queryargs, req, **kwargs):
"""A tag resource constructor.
We have to override the default to sort out cluster naming case.
"""
- baserlib.R_Generic.__init__(self, items, queryargs, req)
+ baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs)
if self.TAG_LEVEL == constants.TAG_CLUSTER:
self.name = None
Example: ["tag1", "tag2", "tag3"]
"""
- # pylint: disable-msg=W0212
- return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
+ kind = self.TAG_LEVEL
- def PUT(self):
+ if kind in (constants.TAG_INSTANCE,
+ constants.TAG_NODEGROUP,
+ constants.TAG_NODE):
+ if not self.name:
+ raise http.HttpBadRequest("Missing name on tag request")
+
+ cl = self.GetClient(query=True)
+ tags = list(cl.QueryTags(kind, self.name))
+
+ elif kind == constants.TAG_CLUSTER:
+ assert not self.name
+ # TODO: Use query API?
+ ssc = ssconf.SimpleStore()
+ tags = ssc.GetClusterTags()
+
+ return list(tags)
+
+ def GetPutOpInput(self):
"""Add a set of tags.
The request as a list of strings should be PUT to this URI. And
you'll have back a job id.
"""
- # pylint: disable-msg=W0212
- if 'tag' not in self.queryargs:
- raise http.HttpBadRequest("Please specify tag(s) to add using the"
- " the 'tag' parameter")
- return baserlib._Tags_PUT(self.TAG_LEVEL,
- self.queryargs['tag'], name=self.name,
- dry_run=bool(self.dryRun()))
+ return ({}, {
+ "kind": self.TAG_LEVEL,
+ "name": self.name,
+ "tags": self.queryargs.get("tag", []),
+ "dry_run": self.dryRun(),
+ })
- def DELETE(self):
+ def GetDeleteOpInput(self):
"""Delete a tag.
In order to delete a set of tags, the DELETE
/tags?tag=[tag]&tag=[tag]
"""
- # pylint: disable-msg=W0212
- if 'tag' not in self.queryargs:
- # no we not gonna delete all tags
- raise http.HttpBadRequest("Cannot delete all tags - please specify"
- " tag(s) using the 'tag' parameter")
- return baserlib._Tags_DELETE(self.TAG_LEVEL,
- self.queryargs['tag'],
- name=self.name,
- dry_run=bool(self.dryRun()))
+ # Re-use code
+ return self.GetPutOpInput()
class R_2_instances_name_tags(_R_Tags):
TAG_LEVEL = constants.TAG_NODEGROUP
+class R_2_networks_name_tags(_R_Tags):
+ """ /2/networks/[network_name]/tags resource.
+
+ Manages per-network tags.
+
+ """
+ TAG_LEVEL = constants.TAG_NETWORK
+
+
class R_2_tags(_R_Tags):
""" /2/tags resource.