4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API resource implementations.
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
31 In the context of this module POST on ``/2/instances`` to change an existing
32 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
33 new instance) with a name specified in the request.
35 Quoting from RFC2616, section 9.6::
37 The fundamental difference between the POST and PUT requests is reflected in
38 the different meaning of the Request-URI. The URI in a POST request
39 identifies the resource that will handle the enclosed entity. That resource
40 might be a data-accepting process, a gateway to some other protocol, or a
41 separate entity that accepts annotations. In contrast, the URI in a PUT
42 request identifies the entity enclosed with the request -- the user agent
43 knows what URI is intended and the server MUST NOT attempt to apply the
44 request to some other resource. If the server desires that the request be
45 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
46 the user agent MAY then make its own decision regarding whether or not to
49 So when adding new methods, if they are operating on the URI entity itself,
50 PUT should be prefered over POST.
54 # pylint: disable-msg=C0103
56 # C0103: Invalid name, since the R_* names are not conforming
58 from ganeti import opcodes
59 from ganeti import http
60 from ganeti import constants
61 from ganeti import cli
62 from ganeti import rapi
64 from ganeti import compat
65 from ganeti.rapi import baserlib
68 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
69 I_FIELDS = ["name", "admin_state", "os",
72 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
74 "disk.sizes", "disk_usage",
75 "beparams", "hvparams",
76 "oper_state", "oper_ram", "oper_vcpus", "status",
77 "custom_hvparams", "custom_beparams", "custom_nicparams",
80 N_FIELDS = ["name", "offline", "master_candidate", "drained",
82 "mtotal", "mnode", "mfree",
83 "pinst_cnt", "sinst_cnt",
84 "ctotal", "cnodes", "csockets",
86 "pinst_list", "sinst_list",
87 "master_capable", "vm_capable",
99 "id", "ops", "status", "summary",
101 "received_ts", "start_ts", "end_ts",
104 J_FIELDS = J_FIELDS_BULK + [
109 _NR_DRAINED = "drained"
110 _NR_MASTER_CANDIATE = "master-candidate"
111 _NR_MASTER = "master"
112 _NR_OFFLINE = "offline"
113 _NR_REGULAR = "regular"
116 constants.NR_MASTER: _NR_MASTER,
117 constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
118 constants.NR_DRAINED: _NR_DRAINED,
119 constants.NR_OFFLINE: _NR_OFFLINE,
120 constants.NR_REGULAR: _NR_REGULAR,
123 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
125 # Request data version field
126 _REQ_DATA_VERSION = "__version__"
128 # Feature string for instance creation request data version 1
129 _INST_CREATE_REQV1 = "instance-create-reqv1"
131 # Feature string for instance reinstall request version 1
132 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
134 # Feature string for node migration version 1
135 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
137 # Feature string for node evacuation with LU-generated jobs
138 _NODE_EVAC_RES1 = "node-evac-res1"
140 ALL_FEATURES = frozenset([
142 _INST_REINSTALL_REQV1,
147 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
151 class R_version(baserlib.R_Generic):
152 """/version resource.
154 This resource should be used to determine the remote API version and
155 to adapt clients accordingly.
160 """Returns the remote API version.
163 return constants.RAPI_VERSION
166 class R_2_info(baserlib.R_Generic):
172 """Returns cluster information.
175 client = baserlib.GetClient()
176 return client.QueryClusterInfo()
179 class R_2_features(baserlib.R_Generic):
180 """/2/features resource.
185 """Returns list of optional RAPI features implemented.
188 return list(ALL_FEATURES)
191 class R_2_os(baserlib.R_Generic):
197 """Return a list of all OSes.
199 Can return error 500 in case of a problem.
201 Example: ["debian-etch"]
204 cl = baserlib.GetClient()
205 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
206 job_id = baserlib.SubmitJob([op], cl)
207 # we use custom feedback function, instead of print we log the status
208 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
209 diagnose_data = result[0]
211 if not isinstance(diagnose_data, list):
212 raise http.HttpBadGateway(message="Can't get OS list")
215 for (name, variants) in diagnose_data:
216 os_names.extend(cli.CalculateOSNames(name, variants))
221 class R_2_redist_config(baserlib.R_Generic):
222 """/2/redistribute-config resource.
227 """Redistribute configuration to all nodes.
230 return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
233 class R_2_cluster_modify(baserlib.R_Generic):
234 """/2/modify resource.
238 """Modifies cluster parameters.
243 op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
246 return baserlib.SubmitJob([op])
249 class R_2_jobs(baserlib.R_Generic):
254 """Returns a dictionary of jobs.
256 @return: a dictionary with jobs id and uri.
259 client = baserlib.GetClient()
262 bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
263 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
265 jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
266 return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
267 uri_fields=("id", "uri"))
270 class R_2_jobs_id(baserlib.R_Generic):
271 """/2/jobs/[job_id] resource.
275 """Returns a job status.
277 @return: a dictionary with job parameters.
279 - id: job ID as a number
280 - status: current job status as a string
281 - ops: involved OpCodes as a list of dictionaries for each
283 - opstatus: OpCodes status as a list
284 - opresult: OpCodes results as a list of lists
287 job_id = self.items[0]
288 result = baserlib.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
290 raise http.HttpNotFound()
291 return baserlib.MapFields(J_FIELDS, result)
294 """Cancel not-yet-started job.
297 job_id = self.items[0]
298 result = baserlib.GetClient().CancelJob(job_id)
302 class R_2_jobs_id_wait(baserlib.R_Generic):
303 """/2/jobs/[job_id]/wait resource.
306 # WaitForJobChange provides access to sensitive information and blocks
307 # machine resources (it's a blocking RAPI call), hence restricting access.
308 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
311 """Waits for job changes.
314 job_id = self.items[0]
316 fields = self.getBodyParameter("fields")
317 prev_job_info = self.getBodyParameter("previous_job_info", None)
318 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
320 if not isinstance(fields, list):
321 raise http.HttpBadRequest("The 'fields' parameter should be a list")
323 if not (prev_job_info is None or isinstance(prev_job_info, list)):
324 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
327 if not (prev_log_serial is None or
328 isinstance(prev_log_serial, (int, long))):
329 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
332 client = baserlib.GetClient()
333 result = client.WaitForJobChangeOnce(job_id, fields,
334 prev_job_info, prev_log_serial,
335 timeout=_WFJC_TIMEOUT)
337 raise http.HttpNotFound()
339 if result == constants.JOB_NOTCHANGED:
343 (job_info, log_entries) = result
346 "job_info": job_info,
347 "log_entries": log_entries,
351 class R_2_nodes(baserlib.R_Generic):
352 """/2/nodes resource.
356 """Returns a list of all nodes.
359 client = baserlib.GetClient()
362 bulkdata = client.QueryNodes([], N_FIELDS, False)
363 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
365 nodesdata = client.QueryNodes([], ["name"], False)
366 nodeslist = [row[0] for row in nodesdata]
367 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
368 uri_fields=("id", "uri"))
371 class R_2_nodes_name(baserlib.R_Generic):
372 """/2/nodes/[node_name] resource.
376 """Send information about a node.
379 node_name = self.items[0]
380 client = baserlib.GetClient()
382 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
383 names=[node_name], fields=N_FIELDS,
384 use_locking=self.useLocking())
386 return baserlib.MapFields(N_FIELDS, result[0])
389 class R_2_nodes_name_role(baserlib.R_Generic):
390 """ /2/nodes/[node_name]/role resource.
394 """Returns the current node role.
399 node_name = self.items[0]
400 client = baserlib.GetClient()
401 result = client.QueryNodes(names=[node_name], fields=["role"],
402 use_locking=self.useLocking())
404 return _NR_MAP[result[0][0]]
407 """Sets the node role.
412 if not isinstance(self.request_body, basestring):
413 raise http.HttpBadRequest("Invalid body contents, not a string")
415 node_name = self.items[0]
416 role = self.request_body
418 if role == _NR_REGULAR:
423 elif role == _NR_MASTER_CANDIATE:
425 offline = drained = None
427 elif role == _NR_DRAINED:
429 candidate = offline = None
431 elif role == _NR_OFFLINE:
433 candidate = drained = None
436 raise http.HttpBadRequest("Can't set '%s' role" % role)
438 op = opcodes.OpNodeSetParams(node_name=node_name,
439 master_candidate=candidate,
442 force=bool(self.useForce()))
444 return baserlib.SubmitJob([op])
447 class R_2_nodes_name_evacuate(baserlib.R_Generic):
448 """/2/nodes/[node_name]/evacuate resource.
452 """Evacuate all instances off a node.
455 op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
456 "node_name": self.items[0],
457 "dry_run": self.dryRun(),
460 return baserlib.SubmitJob([op])
463 class R_2_nodes_name_migrate(baserlib.R_Generic):
464 """/2/nodes/[node_name]/migrate resource.
468 """Migrate all primary instances from a node.
471 node_name = self.items[0]
474 # Support old-style requests
475 if "live" in self.queryargs and "mode" in self.queryargs:
476 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
479 if "live" in self.queryargs:
480 if self._checkIntVariable("live", default=1):
481 mode = constants.HT_MIGRATION_LIVE
483 mode = constants.HT_MIGRATION_NONLIVE
485 mode = self._checkStringVariable("mode", default=None)
491 data = self.request_body
493 op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
494 "node_name": node_name,
497 return baserlib.SubmitJob([op])
500 class R_2_nodes_name_storage(baserlib.R_Generic):
501 """/2/nodes/[node_name]/storage resource.
504 # LUNodeQueryStorage acquires locks, hence restricting access to GET
505 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
508 node_name = self.items[0]
510 storage_type = self._checkStringVariable("storage_type", None)
512 raise http.HttpBadRequest("Missing the required 'storage_type'"
515 output_fields = self._checkStringVariable("output_fields", None)
516 if not output_fields:
517 raise http.HttpBadRequest("Missing the required 'output_fields'"
520 op = opcodes.OpNodeQueryStorage(nodes=[node_name],
521 storage_type=storage_type,
522 output_fields=output_fields.split(","))
523 return baserlib.SubmitJob([op])
526 class R_2_nodes_name_storage_modify(baserlib.R_Generic):
527 """/2/nodes/[node_name]/storage/modify resource.
531 node_name = self.items[0]
533 storage_type = self._checkStringVariable("storage_type", None)
535 raise http.HttpBadRequest("Missing the required 'storage_type'"
538 name = self._checkStringVariable("name", None)
540 raise http.HttpBadRequest("Missing the required 'name'"
545 if "allocatable" in self.queryargs:
546 changes[constants.SF_ALLOCATABLE] = \
547 bool(self._checkIntVariable("allocatable", default=1))
549 op = opcodes.OpNodeModifyStorage(node_name=node_name,
550 storage_type=storage_type,
553 return baserlib.SubmitJob([op])
556 class R_2_nodes_name_storage_repair(baserlib.R_Generic):
557 """/2/nodes/[node_name]/storage/repair resource.
561 node_name = self.items[0]
563 storage_type = self._checkStringVariable("storage_type", None)
565 raise http.HttpBadRequest("Missing the required 'storage_type'"
568 name = self._checkStringVariable("name", None)
570 raise http.HttpBadRequest("Missing the required 'name'"
573 op = opcodes.OpRepairNodeStorage(node_name=node_name,
574 storage_type=storage_type,
576 return baserlib.SubmitJob([op])
579 def _ParseCreateGroupRequest(data, dry_run):
580 """Parses a request for creating a node group.
582 @rtype: L{opcodes.OpGroupAdd}
583 @return: Group creation opcode
591 "name": "group_name",
594 return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
598 class R_2_groups(baserlib.R_Generic):
599 """/2/groups resource.
603 """Returns a list of all node groups.
606 client = baserlib.GetClient()
609 bulkdata = client.QueryGroups([], G_FIELDS, False)
610 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
612 data = client.QueryGroups([], ["name"], False)
613 groupnames = [row[0] for row in data]
614 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
615 uri_fields=("name", "uri"))
618 """Create a node group.
623 baserlib.CheckType(self.request_body, dict, "Body contents")
624 op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
625 return baserlib.SubmitJob([op])
628 class R_2_groups_name(baserlib.R_Generic):
629 """/2/groups/[group_name] resource.
633 """Send information about a node group.
636 group_name = self.items[0]
637 client = baserlib.GetClient()
639 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
640 names=[group_name], fields=G_FIELDS,
641 use_locking=self.useLocking())
643 return baserlib.MapFields(G_FIELDS, result[0])
646 """Delete a node group.
649 op = opcodes.OpGroupRemove(group_name=self.items[0],
650 dry_run=bool(self.dryRun()))
652 return baserlib.SubmitJob([op])
655 def _ParseModifyGroupRequest(name, data):
656 """Parses a request for modifying a node group.
658 @rtype: L{opcodes.OpGroupSetParams}
659 @return: Group modify opcode
662 return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
668 class R_2_groups_name_modify(baserlib.R_Generic):
669 """/2/groups/[group_name]/modify resource.
673 """Changes some parameters of node group.
678 baserlib.CheckType(self.request_body, dict, "Body contents")
680 op = _ParseModifyGroupRequest(self.items[0], self.request_body)
682 return baserlib.SubmitJob([op])
685 def _ParseRenameGroupRequest(name, data, dry_run):
686 """Parses a request for renaming a node group.
689 @param name: name of the node group to rename
691 @param data: the body received by the rename request
693 @param dry_run: whether to perform a dry run
695 @rtype: L{opcodes.OpGroupRename}
696 @return: Node group rename opcode
699 return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
705 class R_2_groups_name_rename(baserlib.R_Generic):
706 """/2/groups/[group_name]/rename resource.
710 """Changes the name of a node group.
715 baserlib.CheckType(self.request_body, dict, "Body contents")
716 op = _ParseRenameGroupRequest(self.items[0], self.request_body,
718 return baserlib.SubmitJob([op])
721 class R_2_groups_name_assign_nodes(baserlib.R_Generic):
722 """/2/groups/[group_name]/assign-nodes resource.
726 """Assigns nodes to a group.
731 op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
732 "group_name": self.items[0],
733 "dry_run": self.dryRun(),
734 "force": self.useForce(),
737 return baserlib.SubmitJob([op])
740 def _ParseInstanceCreateRequestVersion1(data, dry_run):
741 """Parses an instance creation request version 1.
743 @rtype: L{opcodes.OpInstanceCreate}
744 @return: Instance creation opcode
753 "name": "instance_name",
756 return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
760 class R_2_instances(baserlib.R_Generic):
761 """/2/instances resource.
765 """Returns a list of all available instances.
768 client = baserlib.GetClient()
770 use_locking = self.useLocking()
772 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
773 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
775 instancesdata = client.QueryInstances([], ["name"], use_locking)
776 instanceslist = [row[0] for row in instancesdata]
777 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
778 uri_fields=("id", "uri"))
781 """Create an instance.
786 if not isinstance(self.request_body, dict):
787 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
789 # Default to request data version 0
790 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
792 if data_version == 0:
793 raise http.HttpBadRequest("Instance creation request version 0 is no"
795 elif data_version == 1:
796 data = self.request_body.copy()
797 # Remove "__version__"
798 data.pop(_REQ_DATA_VERSION, None)
799 op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
801 raise http.HttpBadRequest("Unsupported request data version %s" %
804 return baserlib.SubmitJob([op])
807 class R_2_instances_name(baserlib.R_Generic):
808 """/2/instances/[instance_name] resource.
812 """Send information about an instance.
815 client = baserlib.GetClient()
816 instance_name = self.items[0]
818 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
819 names=[instance_name],
821 use_locking=self.useLocking())
823 return baserlib.MapFields(I_FIELDS, result[0])
826 """Delete an instance.
829 op = opcodes.OpInstanceRemove(instance_name=self.items[0],
830 ignore_failures=False,
831 dry_run=bool(self.dryRun()))
832 return baserlib.SubmitJob([op])
835 class R_2_instances_name_info(baserlib.R_Generic):
836 """/2/instances/[instance_name]/info resource.
840 """Request detailed instance information.
843 instance_name = self.items[0]
844 static = bool(self._checkIntVariable("static", default=0))
846 op = opcodes.OpInstanceQueryData(instances=[instance_name],
848 return baserlib.SubmitJob([op])
851 class R_2_instances_name_reboot(baserlib.R_Generic):
852 """/2/instances/[instance_name]/reboot resource.
854 Implements an instance reboot.
858 """Reboot an instance.
860 The URI takes type=[hard|soft|full] and
861 ignore_secondaries=[False|True] parameters.
864 instance_name = self.items[0]
865 reboot_type = self.queryargs.get("type",
866 [constants.INSTANCE_REBOOT_HARD])[0]
867 ignore_secondaries = bool(self._checkIntVariable("ignore_secondaries"))
868 op = opcodes.OpInstanceReboot(instance_name=instance_name,
869 reboot_type=reboot_type,
870 ignore_secondaries=ignore_secondaries,
871 dry_run=bool(self.dryRun()))
873 return baserlib.SubmitJob([op])
876 class R_2_instances_name_startup(baserlib.R_Generic):
877 """/2/instances/[instance_name]/startup resource.
879 Implements an instance startup.
883 """Startup an instance.
885 The URI takes force=[False|True] parameter to start the instance
886 if even if secondary disks are failing.
889 instance_name = self.items[0]
890 force_startup = bool(self._checkIntVariable("force"))
891 no_remember = bool(self._checkIntVariable("no_remember"))
892 op = opcodes.OpInstanceStartup(instance_name=instance_name,
894 dry_run=bool(self.dryRun()),
895 no_remember=no_remember)
897 return baserlib.SubmitJob([op])
900 def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
901 """Parses a request for an instance shutdown.
903 @rtype: L{opcodes.OpInstanceShutdown}
904 @return: Instance shutdown opcode
907 return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
908 "instance_name": name,
910 "no_remember": no_remember,
914 class R_2_instances_name_shutdown(baserlib.R_Generic):
915 """/2/instances/[instance_name]/shutdown resource.
917 Implements an instance shutdown.
921 """Shutdown an instance.
926 baserlib.CheckType(self.request_body, dict, "Body contents")
928 no_remember = bool(self._checkIntVariable("no_remember"))
929 op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
930 bool(self.dryRun()), no_remember)
932 return baserlib.SubmitJob([op])
935 def _ParseInstanceReinstallRequest(name, data):
936 """Parses a request for reinstalling an instance.
939 if not isinstance(data, dict):
940 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
942 ostype = baserlib.CheckParameter(data, "os", default=None)
943 start = baserlib.CheckParameter(data, "start", exptype=bool,
945 osparams = baserlib.CheckParameter(data, "osparams", default=None)
948 opcodes.OpInstanceShutdown(instance_name=name),
949 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
954 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
959 class R_2_instances_name_reinstall(baserlib.R_Generic):
960 """/2/instances/[instance_name]/reinstall resource.
962 Implements an instance reinstall.
966 """Reinstall an instance.
968 The URI takes os=name and nostartup=[0|1] optional
969 parameters. By default, the instance will be started
973 if self.request_body:
975 raise http.HttpBadRequest("Can't combine query and body parameters")
977 body = self.request_body
979 # Legacy interface, do not modify/extend
981 "os": self._checkStringVariable("os"),
982 "start": not self._checkIntVariable("nostartup"),
987 ops = _ParseInstanceReinstallRequest(self.items[0], body)
989 return baserlib.SubmitJob(ops)
992 def _ParseInstanceReplaceDisksRequest(name, data):
993 """Parses a request for an instance export.
995 @rtype: L{opcodes.OpInstanceReplaceDisks}
996 @return: Instance export opcode
1000 "instance_name": name,
1005 raw_disks = data["disks"]
1009 if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
1010 # Backwards compatibility for strings of the format "1, 2, 3"
1012 data["disks"] = [int(part) for part in raw_disks.split(",")]
1013 except (TypeError, ValueError), err:
1014 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1016 return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
1019 class R_2_instances_name_replace_disks(baserlib.R_Generic):
1020 """/2/instances/[instance_name]/replace-disks resource.
1024 """Replaces disks on an instance.
1027 op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
1029 return baserlib.SubmitJob([op])
1032 class R_2_instances_name_activate_disks(baserlib.R_Generic):
1033 """/2/instances/[instance_name]/activate-disks resource.
1037 """Activate disks for an instance.
1039 The URI might contain ignore_size to ignore current recorded size.
1042 instance_name = self.items[0]
1043 ignore_size = bool(self._checkIntVariable("ignore_size"))
1045 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1046 ignore_size=ignore_size)
1048 return baserlib.SubmitJob([op])
1051 class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1052 """/2/instances/[instance_name]/deactivate-disks resource.
1056 """Deactivate disks for an instance.
1059 instance_name = self.items[0]
1061 op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1063 return baserlib.SubmitJob([op])
1066 class R_2_instances_name_prepare_export(baserlib.R_Generic):
1067 """/2/instances/[instance_name]/prepare-export resource.
1071 """Prepares an export for an instance.
1076 instance_name = self.items[0]
1077 mode = self._checkStringVariable("mode")
1079 op = opcodes.OpBackupPrepare(instance_name=instance_name,
1082 return baserlib.SubmitJob([op])
1085 def _ParseExportInstanceRequest(name, data):
1086 """Parses a request for an instance export.
1088 @rtype: L{opcodes.OpBackupExport}
1089 @return: Instance export opcode
1092 # Rename "destination" to "target_node"
1094 data["target_node"] = data.pop("destination")
1098 return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1099 "instance_name": name,
1103 class R_2_instances_name_export(baserlib.R_Generic):
1104 """/2/instances/[instance_name]/export resource.
1108 """Exports an instance.
1113 if not isinstance(self.request_body, dict):
1114 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1116 op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1118 return baserlib.SubmitJob([op])
1121 def _ParseMigrateInstanceRequest(name, data):
1122 """Parses a request for an instance migration.
1124 @rtype: L{opcodes.OpInstanceMigrate}
1125 @return: Instance migration opcode
1128 return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1129 "instance_name": name,
1133 class R_2_instances_name_migrate(baserlib.R_Generic):
1134 """/2/instances/[instance_name]/migrate resource.
1138 """Migrates an instance.
1143 baserlib.CheckType(self.request_body, dict, "Body contents")
1145 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1147 return baserlib.SubmitJob([op])
1150 class R_2_instances_name_failover(baserlib.R_Generic):
1151 """/2/instances/[instance_name]/failover resource.
1155 """Does a failover of an instance.
1160 baserlib.CheckType(self.request_body, dict, "Body contents")
1162 op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
1163 "instance_name": self.items[0],
1166 return baserlib.SubmitJob([op])
1169 def _ParseRenameInstanceRequest(name, data):
1170 """Parses a request for renaming an instance.
1172 @rtype: L{opcodes.OpInstanceRename}
1173 @return: Instance rename opcode
1176 return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1177 "instance_name": name,
1181 class R_2_instances_name_rename(baserlib.R_Generic):
1182 """/2/instances/[instance_name]/rename resource.
1186 """Changes the name of an instance.
1191 baserlib.CheckType(self.request_body, dict, "Body contents")
1193 op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1195 return baserlib.SubmitJob([op])
1198 def _ParseModifyInstanceRequest(name, data):
1199 """Parses a request for modifying an instance.
1201 @rtype: L{opcodes.OpInstanceSetParams}
1202 @return: Instance modify opcode
1205 return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1206 "instance_name": name,
1210 class R_2_instances_name_modify(baserlib.R_Generic):
1211 """/2/instances/[instance_name]/modify resource.
1215 """Changes some parameters of an instance.
1220 baserlib.CheckType(self.request_body, dict, "Body contents")
1222 op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1224 return baserlib.SubmitJob([op])
1227 class R_2_instances_name_disk_grow(baserlib.R_Generic):
1228 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1232 """Increases the size of an instance disk.
1237 op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1238 "instance_name": self.items[0],
1239 "disk": int(self.items[1]),
1242 return baserlib.SubmitJob([op])
1245 class R_2_instances_name_console(baserlib.R_Generic):
1246 """/2/instances/[instance_name]/console resource.
1249 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1252 """Request information for connecting to instance's console.
1254 @return: Serialized instance console description, see
1255 L{objects.InstanceConsole}
1258 client = baserlib.GetClient()
1260 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1263 raise http.HttpServiceUnavailable("Instance console unavailable")
1265 assert isinstance(console, dict)
1269 def _GetQueryFields(args):
1274 fields = args["fields"]
1276 raise http.HttpBadRequest("Missing 'fields' query argument")
1278 return _SplitQueryFields(fields[0])
1281 def _SplitQueryFields(fields):
1285 return [i.strip() for i in fields.split(",")]
1288 class R_2_query(baserlib.R_Generic):
1289 """/2/query/[resource] resource.
1292 # Results might contain sensitive information
1293 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1295 def _Query(self, fields, filter_):
1296 return baserlib.GetClient().Query(self.items[0], fields, filter_).ToDict()
1299 """Returns resource information.
1301 @return: Query result, see L{objects.QueryResponse}
1304 return self._Query(_GetQueryFields(self.queryargs), None)
1307 """Submits job querying for resources.
1309 @return: Query result, see L{objects.QueryResponse}
1312 body = self.request_body
1314 baserlib.CheckType(body, dict, "Body contents")
1317 fields = body["fields"]
1319 fields = _GetQueryFields(self.queryargs)
1321 return self._Query(fields, self.request_body.get("filter", None))
1324 class R_2_query_fields(baserlib.R_Generic):
1325 """/2/query/[resource]/fields resource.
1329 """Retrieves list of available fields for a resource.
1331 @return: List of serialized L{objects.QueryFieldDefinition}
1335 raw_fields = self.queryargs["fields"]
1339 fields = _SplitQueryFields(raw_fields[0])
1341 return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict()
1344 class _R_Tags(baserlib.R_Generic):
1345 """ Quasiclass for tagging resources
1347 Manages tags. When inheriting this class you must define the
1353 def __init__(self, items, queryargs, req):
1354 """A tag resource constructor.
1356 We have to override the default to sort out cluster naming case.
1359 baserlib.R_Generic.__init__(self, items, queryargs, req)
1361 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1364 self.name = items[0]
1367 """Returns a list of tags.
1369 Example: ["tag1", "tag2", "tag3"]
1372 # pylint: disable-msg=W0212
1373 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1376 """Add a set of tags.
1378 The request as a list of strings should be PUT to this URI. And
1379 you'll have back a job id.
1382 # pylint: disable-msg=W0212
1383 if "tag" not in self.queryargs:
1384 raise http.HttpBadRequest("Please specify tag(s) to add using the"
1385 " the 'tag' parameter")
1386 return baserlib._Tags_PUT(self.TAG_LEVEL,
1387 self.queryargs["tag"], name=self.name,
1388 dry_run=bool(self.dryRun()))
1393 In order to delete a set of tags, the DELETE
1394 request should be addressed to URI like:
1395 /tags?tag=[tag]&tag=[tag]
1398 # pylint: disable-msg=W0212
1399 if "tag" not in self.queryargs:
1400 # no we not gonna delete all tags
1401 raise http.HttpBadRequest("Cannot delete all tags - please specify"
1402 " tag(s) using the 'tag' parameter")
1403 return baserlib._Tags_DELETE(self.TAG_LEVEL,
1404 self.queryargs["tag"],
1406 dry_run=bool(self.dryRun()))
1409 class R_2_instances_name_tags(_R_Tags):
1410 """ /2/instances/[instance_name]/tags resource.
1412 Manages per-instance tags.
1415 TAG_LEVEL = constants.TAG_INSTANCE
1418 class R_2_nodes_name_tags(_R_Tags):
1419 """ /2/nodes/[node_name]/tags resource.
1421 Manages per-node tags.
1424 TAG_LEVEL = constants.TAG_NODE
1427 class R_2_groups_name_tags(_R_Tags):
1428 """ /2/groups/[group_name]/tags resource.
1430 Manages per-nodegroup tags.
1433 TAG_LEVEL = constants.TAG_NODEGROUP
1436 class R_2_tags(_R_Tags):
1437 """ /2/tags resource.
1439 Manages cluster tags.
1442 TAG_LEVEL = constants.TAG_CLUSTER