4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API resource implementations.
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
31 In the context of this module POST on ``/2/instances`` to change an existing
32 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
33 new instance) with a name specified in the request.
35 Quoting from RFC2616, section 9.6::
37 The fundamental difference between the POST and PUT requests is reflected in
38 the different meaning of the Request-URI. The URI in a POST request
39 identifies the resource that will handle the enclosed entity. That resource
40 might be a data-accepting process, a gateway to some other protocol, or a
41 separate entity that accepts annotations. In contrast, the URI in a PUT
42 request identifies the entity enclosed with the request -- the user agent
43 knows what URI is intended and the server MUST NOT attempt to apply the
44 request to some other resource. If the server desires that the request be
45 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
46 the user agent MAY then make its own decision regarding whether or not to
49 So when adding new methods, if they are operating on the URI entity itself,
50 PUT should be prefered over POST.
54 # pylint: disable=C0103
56 # C0103: Invalid name, since the R_* names are not conforming
58 from ganeti import opcodes
59 from ganeti import http
60 from ganeti import constants
61 from ganeti import cli
62 from ganeti import rapi
64 from ganeti import compat
65 from ganeti.rapi import baserlib
68 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
69 I_FIELDS = ["name", "admin_state", "os",
72 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
74 "disk.sizes", "disk_usage",
75 "beparams", "hvparams",
76 "oper_state", "oper_ram", "oper_vcpus", "status",
77 "custom_hvparams", "custom_beparams", "custom_nicparams",
80 N_FIELDS = ["name", "offline", "master_candidate", "drained",
82 "mtotal", "mnode", "mfree",
83 "pinst_cnt", "sinst_cnt",
84 "ctotal", "cnodes", "csockets",
86 "pinst_list", "sinst_list",
87 "master_capable", "vm_capable",
99 "id", "ops", "status", "summary",
101 "received_ts", "start_ts", "end_ts",
104 J_FIELDS = J_FIELDS_BULK + [
109 _NR_DRAINED = "drained"
110 _NR_MASTER_CANDIATE = "master-candidate"
111 _NR_MASTER = "master"
112 _NR_OFFLINE = "offline"
113 _NR_REGULAR = "regular"
116 constants.NR_MASTER: _NR_MASTER,
117 constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
118 constants.NR_DRAINED: _NR_DRAINED,
119 constants.NR_OFFLINE: _NR_OFFLINE,
120 constants.NR_REGULAR: _NR_REGULAR,
123 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
125 # Request data version field
126 _REQ_DATA_VERSION = "__version__"
128 # Feature string for instance creation request data version 1
129 _INST_CREATE_REQV1 = "instance-create-reqv1"
131 # Feature string for instance reinstall request version 1
132 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
134 # Feature string for node migration version 1
135 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
137 # Feature string for node evacuation with LU-generated jobs
138 _NODE_EVAC_RES1 = "node-evac-res1"
140 ALL_FEATURES = frozenset([
142 _INST_REINSTALL_REQV1,
147 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
151 class R_version(baserlib.R_Generic):
152 """/version resource.
154 This resource should be used to determine the remote API version and
155 to adapt clients accordingly.
160 """Returns the remote API version.
163 return constants.RAPI_VERSION
166 class R_2_info(baserlib.R_Generic):
172 """Returns cluster information.
175 client = baserlib.GetClient()
176 return client.QueryClusterInfo()
179 class R_2_features(baserlib.R_Generic):
180 """/2/features resource.
185 """Returns list of optional RAPI features implemented.
188 return list(ALL_FEATURES)
191 class R_2_os(baserlib.R_Generic):
197 """Return a list of all OSes.
199 Can return error 500 in case of a problem.
201 Example: ["debian-etch"]
204 cl = baserlib.GetClient()
205 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
206 job_id = baserlib.SubmitJob([op], cl)
207 # we use custom feedback function, instead of print we log the status
208 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
209 diagnose_data = result[0]
211 if not isinstance(diagnose_data, list):
212 raise http.HttpBadGateway(message="Can't get OS list")
215 for (name, variants) in diagnose_data:
216 os_names.extend(cli.CalculateOSNames(name, variants))
221 class R_2_redist_config(baserlib.R_Generic):
222 """/2/redistribute-config resource.
227 """Redistribute configuration to all nodes.
230 return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
233 class R_2_cluster_modify(baserlib.R_Generic):
234 """/2/modify resource.
238 """Modifies cluster parameters.
243 op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
246 return baserlib.SubmitJob([op])
249 class R_2_jobs(baserlib.R_Generic):
254 """Returns a dictionary of jobs.
256 @return: a dictionary with jobs id and uri.
259 client = baserlib.GetClient()
262 bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
263 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
265 jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
266 return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
267 uri_fields=("id", "uri"))
270 class R_2_jobs_id(baserlib.R_Generic):
271 """/2/jobs/[job_id] resource.
275 """Returns a job status.
277 @return: a dictionary with job parameters.
279 - id: job ID as a number
280 - status: current job status as a string
281 - ops: involved OpCodes as a list of dictionaries for each
283 - opstatus: OpCodes status as a list
284 - opresult: OpCodes results as a list of lists
287 job_id = self.items[0]
288 result = baserlib.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
290 raise http.HttpNotFound()
291 return baserlib.MapFields(J_FIELDS, result)
294 """Cancel not-yet-started job.
297 job_id = self.items[0]
298 result = baserlib.GetClient().CancelJob(job_id)
302 class R_2_jobs_id_wait(baserlib.R_Generic):
303 """/2/jobs/[job_id]/wait resource.
306 # WaitForJobChange provides access to sensitive information and blocks
307 # machine resources (it's a blocking RAPI call), hence restricting access.
308 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
311 """Waits for job changes.
314 job_id = self.items[0]
316 fields = self.getBodyParameter("fields")
317 prev_job_info = self.getBodyParameter("previous_job_info", None)
318 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
320 if not isinstance(fields, list):
321 raise http.HttpBadRequest("The 'fields' parameter should be a list")
323 if not (prev_job_info is None or isinstance(prev_job_info, list)):
324 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
327 if not (prev_log_serial is None or
328 isinstance(prev_log_serial, (int, long))):
329 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
332 client = baserlib.GetClient()
333 result = client.WaitForJobChangeOnce(job_id, fields,
334 prev_job_info, prev_log_serial,
335 timeout=_WFJC_TIMEOUT)
337 raise http.HttpNotFound()
339 if result == constants.JOB_NOTCHANGED:
343 (job_info, log_entries) = result
346 "job_info": job_info,
347 "log_entries": log_entries,
351 class R_2_nodes(baserlib.R_Generic):
352 """/2/nodes resource.
356 """Returns a list of all nodes.
359 client = baserlib.GetClient()
362 bulkdata = client.QueryNodes([], N_FIELDS, False)
363 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
365 nodesdata = client.QueryNodes([], ["name"], False)
366 nodeslist = [row[0] for row in nodesdata]
367 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
368 uri_fields=("id", "uri"))
371 class R_2_nodes_name(baserlib.R_Generic):
372 """/2/nodes/[node_name] resource.
376 """Send information about a node.
379 node_name = self.items[0]
380 client = baserlib.GetClient()
382 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
383 names=[node_name], fields=N_FIELDS,
384 use_locking=self.useLocking())
386 return baserlib.MapFields(N_FIELDS, result[0])
389 class R_2_nodes_name_role(baserlib.R_Generic):
390 """ /2/nodes/[node_name]/role resource.
394 """Returns the current node role.
399 node_name = self.items[0]
400 client = baserlib.GetClient()
401 result = client.QueryNodes(names=[node_name], fields=["role"],
402 use_locking=self.useLocking())
404 return _NR_MAP[result[0][0]]
407 """Sets the node role.
412 if not isinstance(self.request_body, basestring):
413 raise http.HttpBadRequest("Invalid body contents, not a string")
415 node_name = self.items[0]
416 role = self.request_body
418 if role == _NR_REGULAR:
423 elif role == _NR_MASTER_CANDIATE:
425 offline = drained = None
427 elif role == _NR_DRAINED:
429 candidate = offline = None
431 elif role == _NR_OFFLINE:
433 candidate = drained = None
436 raise http.HttpBadRequest("Can't set '%s' role" % role)
438 op = opcodes.OpNodeSetParams(node_name=node_name,
439 master_candidate=candidate,
442 force=bool(self.useForce()))
444 return baserlib.SubmitJob([op])
447 class R_2_nodes_name_evacuate(baserlib.R_Generic):
448 """/2/nodes/[node_name]/evacuate resource.
452 """Evacuate all instances off a node.
455 op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
456 "node_name": self.items[0],
457 "dry_run": self.dryRun(),
460 return baserlib.SubmitJob([op])
463 class R_2_nodes_name_migrate(baserlib.R_Generic):
464 """/2/nodes/[node_name]/migrate resource.
468 """Migrate all primary instances from a node.
471 node_name = self.items[0]
474 # Support old-style requests
475 if "live" in self.queryargs and "mode" in self.queryargs:
476 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
479 if "live" in self.queryargs:
480 if self._checkIntVariable("live", default=1):
481 mode = constants.HT_MIGRATION_LIVE
483 mode = constants.HT_MIGRATION_NONLIVE
485 mode = self._checkStringVariable("mode", default=None)
491 data = self.request_body
493 op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
494 "node_name": node_name,
497 return baserlib.SubmitJob([op])
500 class R_2_nodes_name_storage(baserlib.R_Generic):
501 """/2/nodes/[node_name]/storage resource.
504 # LUNodeQueryStorage acquires locks, hence restricting access to GET
505 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
508 node_name = self.items[0]
510 storage_type = self._checkStringVariable("storage_type", None)
512 raise http.HttpBadRequest("Missing the required 'storage_type'"
515 output_fields = self._checkStringVariable("output_fields", None)
516 if not output_fields:
517 raise http.HttpBadRequest("Missing the required 'output_fields'"
520 op = opcodes.OpNodeQueryStorage(nodes=[node_name],
521 storage_type=storage_type,
522 output_fields=output_fields.split(","))
523 return baserlib.SubmitJob([op])
526 class R_2_nodes_name_storage_modify(baserlib.R_Generic):
527 """/2/nodes/[node_name]/storage/modify resource.
531 node_name = self.items[0]
533 storage_type = self._checkStringVariable("storage_type", None)
535 raise http.HttpBadRequest("Missing the required 'storage_type'"
538 name = self._checkStringVariable("name", None)
540 raise http.HttpBadRequest("Missing the required 'name'"
545 if "allocatable" in self.queryargs:
546 changes[constants.SF_ALLOCATABLE] = \
547 bool(self._checkIntVariable("allocatable", default=1))
549 op = opcodes.OpNodeModifyStorage(node_name=node_name,
550 storage_type=storage_type,
553 return baserlib.SubmitJob([op])
556 class R_2_nodes_name_storage_repair(baserlib.R_Generic):
557 """/2/nodes/[node_name]/storage/repair resource.
561 node_name = self.items[0]
563 storage_type = self._checkStringVariable("storage_type", None)
565 raise http.HttpBadRequest("Missing the required 'storage_type'"
568 name = self._checkStringVariable("name", None)
570 raise http.HttpBadRequest("Missing the required 'name'"
573 op = opcodes.OpRepairNodeStorage(node_name=node_name,
574 storage_type=storage_type,
576 return baserlib.SubmitJob([op])
579 def _ParseCreateGroupRequest(data, dry_run):
580 """Parses a request for creating a node group.
582 @rtype: L{opcodes.OpGroupAdd}
583 @return: Group creation opcode
591 "name": "group_name",
594 return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
598 class R_2_groups(baserlib.R_Generic):
599 """/2/groups resource.
603 """Returns a list of all node groups.
606 client = baserlib.GetClient()
609 bulkdata = client.QueryGroups([], G_FIELDS, False)
610 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
612 data = client.QueryGroups([], ["name"], False)
613 groupnames = [row[0] for row in data]
614 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
615 uri_fields=("name", "uri"))
618 """Create a node group.
623 baserlib.CheckType(self.request_body, dict, "Body contents")
624 op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
625 return baserlib.SubmitJob([op])
628 class R_2_groups_name(baserlib.R_Generic):
629 """/2/groups/[group_name] resource.
633 """Send information about a node group.
636 group_name = self.items[0]
637 client = baserlib.GetClient()
639 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
640 names=[group_name], fields=G_FIELDS,
641 use_locking=self.useLocking())
643 return baserlib.MapFields(G_FIELDS, result[0])
646 """Delete a node group.
649 op = opcodes.OpGroupRemove(group_name=self.items[0],
650 dry_run=bool(self.dryRun()))
652 return baserlib.SubmitJob([op])
655 def _ParseModifyGroupRequest(name, data):
656 """Parses a request for modifying a node group.
658 @rtype: L{opcodes.OpGroupSetParams}
659 @return: Group modify opcode
662 return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
667 class R_2_groups_name_modify(baserlib.R_Generic):
668 """/2/groups/[group_name]/modify resource.
672 """Changes some parameters of node group.
677 baserlib.CheckType(self.request_body, dict, "Body contents")
679 op = _ParseModifyGroupRequest(self.items[0], self.request_body)
681 return baserlib.SubmitJob([op])
684 def _ParseRenameGroupRequest(name, data, dry_run):
685 """Parses a request for renaming a node group.
688 @param name: name of the node group to rename
690 @param data: the body received by the rename request
692 @param dry_run: whether to perform a dry run
694 @rtype: L{opcodes.OpGroupRename}
695 @return: Node group rename opcode
698 return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
704 class R_2_groups_name_rename(baserlib.R_Generic):
705 """/2/groups/[group_name]/rename resource.
709 """Changes the name of a node group.
714 baserlib.CheckType(self.request_body, dict, "Body contents")
715 op = _ParseRenameGroupRequest(self.items[0], self.request_body,
717 return baserlib.SubmitJob([op])
720 class R_2_groups_name_assign_nodes(baserlib.R_Generic):
721 """/2/groups/[group_name]/assign-nodes resource.
725 """Assigns nodes to a group.
730 op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
731 "group_name": self.items[0],
732 "dry_run": self.dryRun(),
733 "force": self.useForce(),
736 return baserlib.SubmitJob([op])
739 def _ParseInstanceCreateRequestVersion1(data, dry_run):
740 """Parses an instance creation request version 1.
742 @rtype: L{opcodes.OpInstanceCreate}
743 @return: Instance creation opcode
752 "name": "instance_name",
755 return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
759 class R_2_instances(baserlib.R_Generic):
760 """/2/instances resource.
764 """Returns a list of all available instances.
767 client = baserlib.GetClient()
769 use_locking = self.useLocking()
771 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
772 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
774 instancesdata = client.QueryInstances([], ["name"], use_locking)
775 instanceslist = [row[0] for row in instancesdata]
776 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
777 uri_fields=("id", "uri"))
780 """Create an instance.
785 if not isinstance(self.request_body, dict):
786 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
788 # Default to request data version 0
789 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
791 if data_version == 0:
792 raise http.HttpBadRequest("Instance creation request version 0 is no"
794 elif data_version == 1:
795 data = self.request_body.copy()
796 # Remove "__version__"
797 data.pop(_REQ_DATA_VERSION, None)
798 op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
800 raise http.HttpBadRequest("Unsupported request data version %s" %
803 return baserlib.SubmitJob([op])
806 class R_2_instances_name(baserlib.R_Generic):
807 """/2/instances/[instance_name] resource.
811 """Send information about an instance.
814 client = baserlib.GetClient()
815 instance_name = self.items[0]
817 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
818 names=[instance_name],
820 use_locking=self.useLocking())
822 return baserlib.MapFields(I_FIELDS, result[0])
825 """Delete an instance.
828 op = opcodes.OpInstanceRemove(instance_name=self.items[0],
829 ignore_failures=False,
830 dry_run=bool(self.dryRun()))
831 return baserlib.SubmitJob([op])
834 class R_2_instances_name_info(baserlib.R_Generic):
835 """/2/instances/[instance_name]/info resource.
839 """Request detailed instance information.
842 instance_name = self.items[0]
843 static = bool(self._checkIntVariable("static", default=0))
845 op = opcodes.OpInstanceQueryData(instances=[instance_name],
847 return baserlib.SubmitJob([op])
850 class R_2_instances_name_reboot(baserlib.R_Generic):
851 """/2/instances/[instance_name]/reboot resource.
853 Implements an instance reboot.
857 """Reboot an instance.
859 The URI takes type=[hard|soft|full] and
860 ignore_secondaries=[False|True] parameters.
863 instance_name = self.items[0]
864 reboot_type = self.queryargs.get("type",
865 [constants.INSTANCE_REBOOT_HARD])[0]
866 ignore_secondaries = bool(self._checkIntVariable("ignore_secondaries"))
867 op = opcodes.OpInstanceReboot(instance_name=instance_name,
868 reboot_type=reboot_type,
869 ignore_secondaries=ignore_secondaries,
870 dry_run=bool(self.dryRun()))
872 return baserlib.SubmitJob([op])
875 class R_2_instances_name_startup(baserlib.R_Generic):
876 """/2/instances/[instance_name]/startup resource.
878 Implements an instance startup.
882 """Startup an instance.
884 The URI takes force=[False|True] parameter to start the instance
885 if even if secondary disks are failing.
888 instance_name = self.items[0]
889 force_startup = bool(self._checkIntVariable("force"))
890 no_remember = bool(self._checkIntVariable("no_remember"))
891 op = opcodes.OpInstanceStartup(instance_name=instance_name,
893 dry_run=bool(self.dryRun()),
894 no_remember=no_remember)
896 return baserlib.SubmitJob([op])
899 def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
900 """Parses a request for an instance shutdown.
902 @rtype: L{opcodes.OpInstanceShutdown}
903 @return: Instance shutdown opcode
906 return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
907 "instance_name": name,
909 "no_remember": no_remember,
913 class R_2_instances_name_shutdown(baserlib.R_Generic):
914 """/2/instances/[instance_name]/shutdown resource.
916 Implements an instance shutdown.
920 """Shutdown an instance.
925 no_remember = bool(self._checkIntVariable("no_remember"))
926 op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
927 bool(self.dryRun()), no_remember)
929 return baserlib.SubmitJob([op])
932 def _ParseInstanceReinstallRequest(name, data):
933 """Parses a request for reinstalling an instance.
936 if not isinstance(data, dict):
937 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
939 ostype = baserlib.CheckParameter(data, "os", default=None)
940 start = baserlib.CheckParameter(data, "start", exptype=bool,
942 osparams = baserlib.CheckParameter(data, "osparams", default=None)
945 opcodes.OpInstanceShutdown(instance_name=name),
946 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
951 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
956 class R_2_instances_name_reinstall(baserlib.R_Generic):
957 """/2/instances/[instance_name]/reinstall resource.
959 Implements an instance reinstall.
963 """Reinstall an instance.
965 The URI takes os=name and nostartup=[0|1] optional
966 parameters. By default, the instance will be started
970 if self.request_body:
972 raise http.HttpBadRequest("Can't combine query and body parameters")
974 body = self.request_body
976 # Legacy interface, do not modify/extend
978 "os": self._checkStringVariable("os"),
979 "start": not self._checkIntVariable("nostartup"),
984 ops = _ParseInstanceReinstallRequest(self.items[0], body)
986 return baserlib.SubmitJob(ops)
989 def _ParseInstanceReplaceDisksRequest(name, data):
990 """Parses a request for an instance export.
992 @rtype: L{opcodes.OpInstanceReplaceDisks}
993 @return: Instance export opcode
997 "instance_name": name,
1002 raw_disks = data["disks"]
1006 if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
1007 # Backwards compatibility for strings of the format "1, 2, 3"
1009 data["disks"] = [int(part) for part in raw_disks.split(",")]
1010 except (TypeError, ValueError), err:
1011 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1013 return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
1016 class R_2_instances_name_replace_disks(baserlib.R_Generic):
1017 """/2/instances/[instance_name]/replace-disks resource.
1021 """Replaces disks on an instance.
1024 op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
1026 return baserlib.SubmitJob([op])
1029 class R_2_instances_name_activate_disks(baserlib.R_Generic):
1030 """/2/instances/[instance_name]/activate-disks resource.
1034 """Activate disks for an instance.
1036 The URI might contain ignore_size to ignore current recorded size.
1039 instance_name = self.items[0]
1040 ignore_size = bool(self._checkIntVariable("ignore_size"))
1042 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1043 ignore_size=ignore_size)
1045 return baserlib.SubmitJob([op])
1048 class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1049 """/2/instances/[instance_name]/deactivate-disks resource.
1053 """Deactivate disks for an instance.
1056 instance_name = self.items[0]
1058 op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1060 return baserlib.SubmitJob([op])
1063 class R_2_instances_name_prepare_export(baserlib.R_Generic):
1064 """/2/instances/[instance_name]/prepare-export resource.
1068 """Prepares an export for an instance.
1073 instance_name = self.items[0]
1074 mode = self._checkStringVariable("mode")
1076 op = opcodes.OpBackupPrepare(instance_name=instance_name,
1079 return baserlib.SubmitJob([op])
1082 def _ParseExportInstanceRequest(name, data):
1083 """Parses a request for an instance export.
1085 @rtype: L{opcodes.OpBackupExport}
1086 @return: Instance export opcode
1089 # Rename "destination" to "target_node"
1091 data["target_node"] = data.pop("destination")
1095 return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1096 "instance_name": name,
1100 class R_2_instances_name_export(baserlib.R_Generic):
1101 """/2/instances/[instance_name]/export resource.
1105 """Exports an instance.
1110 if not isinstance(self.request_body, dict):
1111 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1113 op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1115 return baserlib.SubmitJob([op])
1118 def _ParseMigrateInstanceRequest(name, data):
1119 """Parses a request for an instance migration.
1121 @rtype: L{opcodes.OpInstanceMigrate}
1122 @return: Instance migration opcode
1125 return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1126 "instance_name": name,
1130 class R_2_instances_name_migrate(baserlib.R_Generic):
1131 """/2/instances/[instance_name]/migrate resource.
1135 """Migrates an instance.
1140 baserlib.CheckType(self.request_body, dict, "Body contents")
1142 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1144 return baserlib.SubmitJob([op])
1147 class R_2_instances_name_failover(baserlib.R_Generic):
1148 """/2/instances/[instance_name]/failover resource.
1152 """Does a failover of an instance.
1157 baserlib.CheckType(self.request_body, dict, "Body contents")
1159 op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
1160 "instance_name": self.items[0],
1163 return baserlib.SubmitJob([op])
1166 def _ParseRenameInstanceRequest(name, data):
1167 """Parses a request for renaming an instance.
1169 @rtype: L{opcodes.OpInstanceRename}
1170 @return: Instance rename opcode
1173 return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1174 "instance_name": name,
1178 class R_2_instances_name_rename(baserlib.R_Generic):
1179 """/2/instances/[instance_name]/rename resource.
1183 """Changes the name of an instance.
1188 baserlib.CheckType(self.request_body, dict, "Body contents")
1190 op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1192 return baserlib.SubmitJob([op])
1195 def _ParseModifyInstanceRequest(name, data):
1196 """Parses a request for modifying an instance.
1198 @rtype: L{opcodes.OpInstanceSetParams}
1199 @return: Instance modify opcode
1202 return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1203 "instance_name": name,
1207 class R_2_instances_name_modify(baserlib.R_Generic):
1208 """/2/instances/[instance_name]/modify resource.
1212 """Changes some parameters of an instance.
1217 baserlib.CheckType(self.request_body, dict, "Body contents")
1219 op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1221 return baserlib.SubmitJob([op])
1224 class R_2_instances_name_disk_grow(baserlib.R_Generic):
1225 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1229 """Increases the size of an instance disk.
1234 op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1235 "instance_name": self.items[0],
1236 "disk": int(self.items[1]),
1239 return baserlib.SubmitJob([op])
1242 class R_2_instances_name_console(baserlib.R_Generic):
1243 """/2/instances/[instance_name]/console resource.
1246 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1249 """Request information for connecting to instance's console.
1251 @return: Serialized instance console description, see
1252 L{objects.InstanceConsole}
1255 client = baserlib.GetClient()
1257 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1260 raise http.HttpServiceUnavailable("Instance console unavailable")
1262 assert isinstance(console, dict)
1266 def _GetQueryFields(args):
1271 fields = args["fields"]
1273 raise http.HttpBadRequest("Missing 'fields' query argument")
1275 return _SplitQueryFields(fields[0])
1278 def _SplitQueryFields(fields):
1282 return [i.strip() for i in fields.split(",")]
1285 class R_2_query(baserlib.R_Generic):
1286 """/2/query/[resource] resource.
1289 # Results might contain sensitive information
1290 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1292 def _Query(self, fields, filter_):
1293 return baserlib.GetClient().Query(self.items[0], fields, filter_).ToDict()
1296 """Returns resource information.
1298 @return: Query result, see L{objects.QueryResponse}
1301 return self._Query(_GetQueryFields(self.queryargs), None)
1304 """Submits job querying for resources.
1306 @return: Query result, see L{objects.QueryResponse}
1309 body = self.request_body
1311 baserlib.CheckType(body, dict, "Body contents")
1314 fields = body["fields"]
1316 fields = _GetQueryFields(self.queryargs)
1318 return self._Query(fields, self.request_body.get("filter", None))
1321 class R_2_query_fields(baserlib.R_Generic):
1322 """/2/query/[resource]/fields resource.
1326 """Retrieves list of available fields for a resource.
1328 @return: List of serialized L{objects.QueryFieldDefinition}
1332 raw_fields = self.queryargs["fields"]
1336 fields = _SplitQueryFields(raw_fields[0])
1338 return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict()
1341 class _R_Tags(baserlib.R_Generic):
1342 """ Quasiclass for tagging resources
1344 Manages tags. When inheriting this class you must define the
1350 def __init__(self, items, queryargs, req):
1351 """A tag resource constructor.
1353 We have to override the default to sort out cluster naming case.
1356 baserlib.R_Generic.__init__(self, items, queryargs, req)
1358 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1361 self.name = items[0]
1364 """Returns a list of tags.
1366 Example: ["tag1", "tag2", "tag3"]
1369 # pylint: disable=W0212
1370 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1373 """Add a set of tags.
1375 The request as a list of strings should be PUT to this URI. And
1376 you'll have back a job id.
1379 # pylint: disable=W0212
1380 if "tag" not in self.queryargs:
1381 raise http.HttpBadRequest("Please specify tag(s) to add using the"
1382 " the 'tag' parameter")
1383 return baserlib._Tags_PUT(self.TAG_LEVEL,
1384 self.queryargs["tag"], name=self.name,
1385 dry_run=bool(self.dryRun()))
1390 In order to delete a set of tags, the DELETE
1391 request should be addressed to URI like:
1392 /tags?tag=[tag]&tag=[tag]
1395 # pylint: disable=W0212
1396 if "tag" not in self.queryargs:
1397 # no we not gonna delete all tags
1398 raise http.HttpBadRequest("Cannot delete all tags - please specify"
1399 " tag(s) using the 'tag' parameter")
1400 return baserlib._Tags_DELETE(self.TAG_LEVEL,
1401 self.queryargs["tag"],
1403 dry_run=bool(self.dryRun()))
1406 class R_2_instances_name_tags(_R_Tags):
1407 """ /2/instances/[instance_name]/tags resource.
1409 Manages per-instance tags.
1412 TAG_LEVEL = constants.TAG_INSTANCE
1415 class R_2_nodes_name_tags(_R_Tags):
1416 """ /2/nodes/[node_name]/tags resource.
1418 Manages per-node tags.
1421 TAG_LEVEL = constants.TAG_NODE
1424 class R_2_groups_name_tags(_R_Tags):
1425 """ /2/groups/[group_name]/tags resource.
1427 Manages per-nodegroup tags.
1430 TAG_LEVEL = constants.TAG_NODEGROUP
1433 class R_2_tags(_R_Tags):
1434 """ /2/tags resource.
1436 Manages cluster tags.
1439 TAG_LEVEL = constants.TAG_CLUSTER