entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
new instance) with a name specified in the request.
-Quoting from RFC2616, section 9.6:
+Quoting from RFC2616, section 9.6::
The fundamental difference between the POST and PUT requests is reflected in
the different meaning of the Request-URI. The URI in a POST request
from ganeti import rapi
from ganeti import ht
from ganeti import compat
+from ganeti import ssconf
from ganeti.rapi import baserlib
_WFJC_TIMEOUT = 10
-class R_version(baserlib.R_Generic):
+class R_root(baserlib.ResourceBase):
+ """/ resource.
+
+ """
+ @staticmethod
+ def GET():
+ """Supported for legacy reasons.
+
+ """
+ return None
+
+
+class R_version(baserlib.ResourceBase):
"""/version resource.
This resource should be used to determine the remote API version and
return constants.RAPI_VERSION
-class R_2_info(baserlib.R_Generic):
+class R_2_info(baserlib.ResourceBase):
"""/2/info resource.
"""
- @staticmethod
- def GET():
+ def GET(self):
"""Returns cluster information.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
return client.QueryClusterInfo()
-class R_2_features(baserlib.R_Generic):
+class R_2_features(baserlib.ResourceBase):
"""/2/features resource.
"""
return list(ALL_FEATURES)
-class R_2_os(baserlib.R_Generic):
+class R_2_os(baserlib.ResourceBase):
"""/2/os resource.
"""
- @staticmethod
- def GET():
+ def GET(self):
"""Return a list of all OSes.
Can return error 500 in case of a problem.
Example: ["debian-etch"]
"""
- cl = baserlib.GetClient()
+ cl = self.GetClient()
op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
- job_id = baserlib.SubmitJob([op], cl)
+ job_id = self.SubmitJob([op], cl=cl)
# we use custom feedback function, instead of print we log the status
result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
diagnose_data = result[0]
return os_names
-class R_2_redist_config(baserlib.R_Generic):
+class R_2_redist_config(baserlib.ResourceBase):
"""/2/redistribute-config resource.
"""
- @staticmethod
- def PUT():
+ def PUT(self):
"""Redistribute configuration to all nodes.
"""
- return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
+ return self.SubmitJob([opcodes.OpClusterRedistConf()])
-class R_2_cluster_modify(baserlib.R_Generic):
+class R_2_cluster_modify(baserlib.ResourceBase):
"""/2/modify resource.
"""
op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
None)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_jobs(baserlib.R_Generic):
+class R_2_jobs(baserlib.ResourceBase):
"""/2/jobs resource.
"""
@return: a dictionary with jobs id and uri.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
if self.useBulk():
bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
uri_fields=("id", "uri"))
-class R_2_jobs_id(baserlib.R_Generic):
+class R_2_jobs_id(baserlib.ResourceBase):
"""/2/jobs/[job_id] resource.
"""
"""
job_id = self.items[0]
- result = baserlib.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
+ result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
if result is None:
raise http.HttpNotFound()
return baserlib.MapFields(J_FIELDS, result)
"""
job_id = self.items[0]
- result = baserlib.GetClient().CancelJob(job_id)
+ result = self.GetClient().CancelJob(job_id)
return result
-class R_2_jobs_id_wait(baserlib.R_Generic):
+class R_2_jobs_id_wait(baserlib.ResourceBase):
"""/2/jobs/[job_id]/wait resource.
"""
raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
" be a number")
- client = baserlib.GetClient()
+ client = self.GetClient()
result = client.WaitForJobChangeOnce(job_id, fields,
prev_job_info, prev_log_serial,
timeout=_WFJC_TIMEOUT)
}
-class R_2_nodes(baserlib.R_Generic):
+class R_2_nodes(baserlib.ResourceBase):
"""/2/nodes resource.
"""
"""Returns a list of all nodes.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
if self.useBulk():
bulkdata = client.QueryNodes([], N_FIELDS, False)
uri_fields=("id", "uri"))
-class R_2_nodes_name(baserlib.R_Generic):
+class R_2_nodes_name(baserlib.ResourceBase):
"""/2/nodes/[node_name] resource.
"""
"""
node_name = self.items[0]
- client = baserlib.GetClient()
+ client = self.GetClient()
result = baserlib.HandleItemQueryErrors(client.QueryNodes,
names=[node_name], fields=N_FIELDS,
return baserlib.MapFields(N_FIELDS, result[0])
-class R_2_nodes_name_role(baserlib.R_Generic):
+class R_2_nodes_name_role(baserlib.ResourceBase):
""" /2/nodes/[node_name]/role resource.
"""
"""
node_name = self.items[0]
- client = baserlib.GetClient()
+ client = self.GetClient()
result = client.QueryNodes(names=[node_name], fields=["role"],
use_locking=self.useLocking())
drained=drained,
force=bool(self.useForce()))
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_nodes_name_evacuate(baserlib.R_Generic):
+class R_2_nodes_name_evacuate(baserlib.ResourceBase):
"""/2/nodes/[node_name]/evacuate resource.
"""
"dry_run": self.dryRun(),
})
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_nodes_name_migrate(baserlib.R_Generic):
+class R_2_nodes_name_migrate(baserlib.ResourceBase):
"""/2/nodes/[node_name]/migrate resource.
"""
"node_name": node_name,
})
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_nodes_name_storage(baserlib.R_Generic):
+class R_2_nodes_name_storage(baserlib.ResourceBase):
"""/2/nodes/[node_name]/storage resource.
"""
op = opcodes.OpNodeQueryStorage(nodes=[node_name],
storage_type=storage_type,
output_fields=output_fields.split(","))
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_nodes_name_storage_modify(baserlib.R_Generic):
+class R_2_nodes_name_storage_modify(baserlib.ResourceBase):
"""/2/nodes/[node_name]/storage/modify resource.
"""
storage_type=storage_type,
name=name,
changes=changes)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_nodes_name_storage_repair(baserlib.R_Generic):
+class R_2_nodes_name_storage_repair(baserlib.ResourceBase):
"""/2/nodes/[node_name]/storage/repair resource.
"""
op = opcodes.OpRepairNodeStorage(node_name=node_name,
storage_type=storage_type,
name=name)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
def _ParseCreateGroupRequest(data, dry_run):
rename=rename)
-class R_2_groups(baserlib.R_Generic):
+class R_2_groups(baserlib.ResourceBase):
"""/2/groups resource.
"""
"""Returns a list of all node groups.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
if self.useBulk():
bulkdata = client.QueryGroups([], G_FIELDS, False)
"""
baserlib.CheckType(self.request_body, dict, "Body contents")
op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_groups_name(baserlib.R_Generic):
+class R_2_groups_name(baserlib.ResourceBase):
"""/2/groups/[group_name] resource.
"""
"""
group_name = self.items[0]
- client = baserlib.GetClient()
+ client = self.GetClient()
result = baserlib.HandleItemQueryErrors(client.QueryGroups,
names=[group_name], fields=G_FIELDS,
op = opcodes.OpGroupRemove(group_name=self.items[0],
dry_run=bool(self.dryRun()))
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
def _ParseModifyGroupRequest(name, data):
})
-
-class R_2_groups_name_modify(baserlib.R_Generic):
+class R_2_groups_name_modify(baserlib.ResourceBase):
"""/2/groups/[group_name]/modify resource.
"""
op = _ParseModifyGroupRequest(self.items[0], self.request_body)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
def _ParseRenameGroupRequest(name, data, dry_run):
})
-class R_2_groups_name_rename(baserlib.R_Generic):
+class R_2_groups_name_rename(baserlib.ResourceBase):
"""/2/groups/[group_name]/rename resource.
"""
baserlib.CheckType(self.request_body, dict, "Body contents")
op = _ParseRenameGroupRequest(self.items[0], self.request_body,
self.dryRun())
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_groups_name_assign_nodes(baserlib.R_Generic):
+class R_2_groups_name_assign_nodes(baserlib.ResourceBase):
"""/2/groups/[group_name]/assign-nodes resource.
"""
"force": self.useForce(),
})
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
def _ParseInstanceCreateRequestVersion1(data, dry_run):
rename=rename)
-class R_2_instances(baserlib.R_Generic):
+class R_2_instances(baserlib.ResourceBase):
"""/2/instances resource.
"""
"""Returns a list of all available instances.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
use_locking = self.useLocking()
if self.useBulk():
raise http.HttpBadRequest("Unsupported request data version %s" %
data_version)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_instances_name(baserlib.R_Generic):
+class R_2_instances_name(baserlib.ResourceBase):
"""/2/instances/[instance_name] resource.
"""
"""Send information about an instance.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
instance_name = self.items[0]
result = baserlib.HandleItemQueryErrors(client.QueryInstances,
op = opcodes.OpInstanceRemove(instance_name=self.items[0],
ignore_failures=False,
dry_run=bool(self.dryRun()))
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_instances_name_info(baserlib.R_Generic):
+class R_2_instances_name_info(baserlib.ResourceBase):
"""/2/instances/[instance_name]/info resource.
"""
op = opcodes.OpInstanceQueryData(instances=[instance_name],
static=static)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_instances_name_reboot(baserlib.R_Generic):
+class R_2_instances_name_reboot(baserlib.ResourceBase):
"""/2/instances/[instance_name]/reboot resource.
Implements an instance reboot.
ignore_secondaries=ignore_secondaries,
dry_run=bool(self.dryRun()))
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_instances_name_startup(baserlib.R_Generic):
+class R_2_instances_name_startup(baserlib.ResourceBase):
"""/2/instances/[instance_name]/startup resource.
Implements an instance startup.
dry_run=bool(self.dryRun()),
no_remember=no_remember)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
})
-class R_2_instances_name_shutdown(baserlib.R_Generic):
+class R_2_instances_name_shutdown(baserlib.ResourceBase):
"""/2/instances/[instance_name]/shutdown resource.
Implements an instance shutdown.
op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
bool(self.dryRun()), no_remember)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
def _ParseInstanceReinstallRequest(name, data):
return ops
-class R_2_instances_name_reinstall(baserlib.R_Generic):
+class R_2_instances_name_reinstall(baserlib.ResourceBase):
"""/2/instances/[instance_name]/reinstall resource.
Implements an instance reinstall.
ops = _ParseInstanceReinstallRequest(self.items[0], body)
- return baserlib.SubmitJob(ops)
+ return self.SubmitJob(ops)
def _ParseInstanceReplaceDisksRequest(name, data):
return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
-class R_2_instances_name_replace_disks(baserlib.R_Generic):
+class R_2_instances_name_replace_disks(baserlib.ResourceBase):
"""/2/instances/[instance_name]/replace-disks resource.
"""
"""
op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_instances_name_activate_disks(baserlib.R_Generic):
+class R_2_instances_name_activate_disks(baserlib.ResourceBase):
"""/2/instances/[instance_name]/activate-disks resource.
"""
op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
ignore_size=ignore_size)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
+class R_2_instances_name_deactivate_disks(baserlib.ResourceBase):
"""/2/instances/[instance_name]/deactivate-disks resource.
"""
op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_instances_name_prepare_export(baserlib.R_Generic):
+class R_2_instances_name_prepare_export(baserlib.ResourceBase):
"""/2/instances/[instance_name]/prepare-export resource.
"""
op = opcodes.OpBackupPrepare(instance_name=instance_name,
mode=mode)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
def _ParseExportInstanceRequest(name, data):
})
-class R_2_instances_name_export(baserlib.R_Generic):
+class R_2_instances_name_export(baserlib.ResourceBase):
"""/2/instances/[instance_name]/export resource.
"""
op = _ParseExportInstanceRequest(self.items[0], self.request_body)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
def _ParseMigrateInstanceRequest(name, data):
})
-class R_2_instances_name_migrate(baserlib.R_Generic):
+class R_2_instances_name_migrate(baserlib.ResourceBase):
"""/2/instances/[instance_name]/migrate resource.
"""
op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_instances_name_failover(baserlib.R_Generic):
+class R_2_instances_name_failover(baserlib.ResourceBase):
"""/2/instances/[instance_name]/failover resource.
"""
"instance_name": self.items[0],
})
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
def _ParseRenameInstanceRequest(name, data):
})
-class R_2_instances_name_rename(baserlib.R_Generic):
+class R_2_instances_name_rename(baserlib.ResourceBase):
"""/2/instances/[instance_name]/rename resource.
"""
op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
def _ParseModifyInstanceRequest(name, data):
})
-class R_2_instances_name_modify(baserlib.R_Generic):
+class R_2_instances_name_modify(baserlib.ResourceBase):
"""/2/instances/[instance_name]/modify resource.
"""
op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_instances_name_disk_grow(baserlib.R_Generic):
+class R_2_instances_name_disk_grow(baserlib.ResourceBase):
"""/2/instances/[instance_name]/disk/[disk_index]/grow resource.
"""
"disk": int(self.items[1]),
})
- return baserlib.SubmitJob([op])
+ return self.SubmitJob([op])
-class R_2_instances_name_console(baserlib.R_Generic):
+class R_2_instances_name_console(baserlib.ResourceBase):
"""/2/instances/[instance_name]/console resource.
"""
L{objects.InstanceConsole}
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
return [i.strip() for i in fields.split(",")]
-class R_2_query(baserlib.R_Generic):
+class R_2_query(baserlib.ResourceBase):
"""/2/query/[resource] resource.
"""
GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
def _Query(self, fields, filter_):
- return baserlib.GetClient().Query(self.items[0], fields, filter_).ToDict()
+ return self.GetClient().Query(self.items[0], fields, filter_).ToDict()
def GET(self):
"""Returns resource information.
return self._Query(fields, self.request_body.get("filter", None))
-class R_2_query_fields(baserlib.R_Generic):
+class R_2_query_fields(baserlib.ResourceBase):
"""/2/query/[resource]/fields resource.
"""
else:
fields = _SplitQueryFields(raw_fields[0])
- return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict()
+ return self.GetClient().QueryFields(self.items[0], fields).ToDict()
-class _R_Tags(baserlib.R_Generic):
+class _R_Tags(baserlib.ResourceBase):
""" Quasiclass for tagging resources
Manages tags. When inheriting this class you must define the
We have to override the default to sort out cluster naming case.
"""
- baserlib.R_Generic.__init__(self, items, queryargs, req)
+ baserlib.ResourceBase.__init__(self, items, queryargs, req)
if self.TAG_LEVEL == constants.TAG_CLUSTER:
self.name = None
Example: ["tag1", "tag2", "tag3"]
"""
- # pylint: disable-msg=W0212
- return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
+ kind = self.TAG_LEVEL
+
+ if kind in (constants.TAG_INSTANCE,
+ constants.TAG_NODEGROUP,
+ constants.TAG_NODE):
+ if not self.name:
+ raise http.HttpBadRequest("Missing name on tag request")
+
+ cl = self.GetClient()
+ if kind == constants.TAG_INSTANCE:
+ fn = cl.QueryInstances
+ elif kind == constants.TAG_NODEGROUP:
+ fn = cl.QueryGroups
+ else:
+ fn = cl.QueryNodes
+ result = fn(names=[self.name], fields=["tags"], use_locking=False)
+ if not result or not result[0]:
+ raise http.HttpBadGateway("Invalid response from tag query")
+ tags = result[0][0]
+
+ elif kind == constants.TAG_CLUSTER:
+ assert not self.name
+ # TODO: Use query API?
+ ssc = ssconf.SimpleStore()
+ tags = ssc.GetClusterTags()
+
+ return list(tags)
def PUT(self):
"""Add a set of tags.
if "tag" not in self.queryargs:
raise http.HttpBadRequest("Please specify tag(s) to add using the"
" the 'tag' parameter")
- return baserlib._Tags_PUT(self.TAG_LEVEL,
- self.queryargs["tag"], name=self.name,
- dry_run=bool(self.dryRun()))
+ op = opcodes.OpTagsSet(kind=self.TAG_LEVEL, name=self.name,
+ tags=self.queryargs["tag"], dry_run=self.dryRun())
+ return self.SubmitJob([op])
def DELETE(self):
"""Delete a tag.
# no we not gonna delete all tags
raise http.HttpBadRequest("Cannot delete all tags - please specify"
" tag(s) using the 'tag' parameter")
- return baserlib._Tags_DELETE(self.TAG_LEVEL,
- self.queryargs["tag"],
- name=self.name,
- dry_run=bool(self.dryRun()))
+ op = opcodes.OpTagsDel(kind=self.TAG_LEVEL, name=self.name,
+ tags=self.queryargs["tag"], dry_run=self.dryRun())
+ return self.SubmitJob([op])
class R_2_instances_name_tags(_R_Tags):