4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API resource implementations.
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
31 In the context of this module POST on ``/2/instances`` to change an existing
32 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
33 new instance) with a name specified in the request.
35 Quoting from RFC2616, section 9.6::
37 The fundamental difference between the POST and PUT requests is reflected in
38 the different meaning of the Request-URI. The URI in a POST request
39 identifies the resource that will handle the enclosed entity. That resource
40 might be a data-accepting process, a gateway to some other protocol, or a
41 separate entity that accepts annotations. In contrast, the URI in a PUT
42 request identifies the entity enclosed with the request -- the user agent
43 knows what URI is intended and the server MUST NOT attempt to apply the
44 request to some other resource. If the server desires that the request be
45 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
46 the user agent MAY then make its own decision regarding whether or not to
49 So when adding new methods, if they are operating on the URI entity itself,
50 PUT should be prefered over POST.
54 # pylint: disable-msg=C0103
56 # C0103: Invalid name, since the R_* names are not conforming
58 from ganeti import opcodes
59 from ganeti import http
60 from ganeti import constants
61 from ganeti import cli
62 from ganeti import rapi
64 from ganeti import compat
65 from ganeti import ssconf
66 from ganeti.rapi import baserlib
69 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
70 I_FIELDS = ["name", "admin_state", "os",
73 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
75 "disk.sizes", "disk_usage",
76 "beparams", "hvparams",
77 "oper_state", "oper_ram", "oper_vcpus", "status",
78 "custom_hvparams", "custom_beparams", "custom_nicparams",
81 N_FIELDS = ["name", "offline", "master_candidate", "drained",
83 "mtotal", "mnode", "mfree",
84 "pinst_cnt", "sinst_cnt",
85 "ctotal", "cnodes", "csockets",
87 "pinst_list", "sinst_list",
88 "master_capable", "vm_capable",
100 "id", "ops", "status", "summary",
102 "received_ts", "start_ts", "end_ts",
105 J_FIELDS = J_FIELDS_BULK + [
110 _NR_DRAINED = "drained"
111 _NR_MASTER_CANDIATE = "master-candidate"
112 _NR_MASTER = "master"
113 _NR_OFFLINE = "offline"
114 _NR_REGULAR = "regular"
117 constants.NR_MASTER: _NR_MASTER,
118 constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
119 constants.NR_DRAINED: _NR_DRAINED,
120 constants.NR_OFFLINE: _NR_OFFLINE,
121 constants.NR_REGULAR: _NR_REGULAR,
124 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
126 # Request data version field
127 _REQ_DATA_VERSION = "__version__"
129 # Feature string for instance creation request data version 1
130 _INST_CREATE_REQV1 = "instance-create-reqv1"
132 # Feature string for instance reinstall request version 1
133 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
135 # Feature string for node migration version 1
136 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
138 # Feature string for node evacuation with LU-generated jobs
139 _NODE_EVAC_RES1 = "node-evac-res1"
141 ALL_FEATURES = frozenset([
143 _INST_REINSTALL_REQV1,
148 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
152 class R_root(baserlib.ResourceBase):
158 """Supported for legacy reasons.
164 class R_version(baserlib.ResourceBase):
165 """/version resource.
167 This resource should be used to determine the remote API version and
168 to adapt clients accordingly.
173 """Returns the remote API version.
176 return constants.RAPI_VERSION
179 class R_2_info(baserlib.ResourceBase):
184 """Returns cluster information.
187 client = self.GetClient()
188 return client.QueryClusterInfo()
191 class R_2_features(baserlib.ResourceBase):
192 """/2/features resource.
197 """Returns list of optional RAPI features implemented.
200 return list(ALL_FEATURES)
203 class R_2_os(baserlib.ResourceBase):
208 """Return a list of all OSes.
210 Can return error 500 in case of a problem.
212 Example: ["debian-etch"]
215 cl = self.GetClient()
216 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
217 job_id = self.SubmitJob([op], cl=cl)
218 # we use custom feedback function, instead of print we log the status
219 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
220 diagnose_data = result[0]
222 if not isinstance(diagnose_data, list):
223 raise http.HttpBadGateway(message="Can't get OS list")
226 for (name, variants) in diagnose_data:
227 os_names.extend(cli.CalculateOSNames(name, variants))
232 class R_2_redist_config(baserlib.ResourceBase):
233 """/2/redistribute-config resource.
237 """Redistribute configuration to all nodes.
240 return self.SubmitJob([opcodes.OpClusterRedistConf()])
243 class R_2_cluster_modify(baserlib.ResourceBase):
244 """/2/modify resource.
248 """Modifies cluster parameters.
253 op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
256 return self.SubmitJob([op])
259 class R_2_jobs(baserlib.ResourceBase):
264 """Returns a dictionary of jobs.
266 @return: a dictionary with jobs id and uri.
269 client = self.GetClient()
272 bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
273 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
275 jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
276 return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
277 uri_fields=("id", "uri"))
280 class R_2_jobs_id(baserlib.ResourceBase):
281 """/2/jobs/[job_id] resource.
285 """Returns a job status.
287 @return: a dictionary with job parameters.
289 - id: job ID as a number
290 - status: current job status as a string
291 - ops: involved OpCodes as a list of dictionaries for each
293 - opstatus: OpCodes status as a list
294 - opresult: OpCodes results as a list of lists
297 job_id = self.items[0]
298 result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
300 raise http.HttpNotFound()
301 return baserlib.MapFields(J_FIELDS, result)
304 """Cancel not-yet-started job.
307 job_id = self.items[0]
308 result = self.GetClient().CancelJob(job_id)
312 class R_2_jobs_id_wait(baserlib.ResourceBase):
313 """/2/jobs/[job_id]/wait resource.
316 # WaitForJobChange provides access to sensitive information and blocks
317 # machine resources (it's a blocking RAPI call), hence restricting access.
318 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
321 """Waits for job changes.
324 job_id = self.items[0]
326 fields = self.getBodyParameter("fields")
327 prev_job_info = self.getBodyParameter("previous_job_info", None)
328 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
330 if not isinstance(fields, list):
331 raise http.HttpBadRequest("The 'fields' parameter should be a list")
333 if not (prev_job_info is None or isinstance(prev_job_info, list)):
334 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
337 if not (prev_log_serial is None or
338 isinstance(prev_log_serial, (int, long))):
339 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
342 client = self.GetClient()
343 result = client.WaitForJobChangeOnce(job_id, fields,
344 prev_job_info, prev_log_serial,
345 timeout=_WFJC_TIMEOUT)
347 raise http.HttpNotFound()
349 if result == constants.JOB_NOTCHANGED:
353 (job_info, log_entries) = result
356 "job_info": job_info,
357 "log_entries": log_entries,
361 class R_2_nodes(baserlib.ResourceBase):
362 """/2/nodes resource.
366 """Returns a list of all nodes.
369 client = self.GetClient()
372 bulkdata = client.QueryNodes([], N_FIELDS, False)
373 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
375 nodesdata = client.QueryNodes([], ["name"], False)
376 nodeslist = [row[0] for row in nodesdata]
377 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
378 uri_fields=("id", "uri"))
381 class R_2_nodes_name(baserlib.ResourceBase):
382 """/2/nodes/[node_name] resource.
386 """Send information about a node.
389 node_name = self.items[0]
390 client = self.GetClient()
392 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
393 names=[node_name], fields=N_FIELDS,
394 use_locking=self.useLocking())
396 return baserlib.MapFields(N_FIELDS, result[0])
399 class R_2_nodes_name_role(baserlib.ResourceBase):
400 """ /2/nodes/[node_name]/role resource.
404 """Returns the current node role.
409 node_name = self.items[0]
410 client = self.GetClient()
411 result = client.QueryNodes(names=[node_name], fields=["role"],
412 use_locking=self.useLocking())
414 return _NR_MAP[result[0][0]]
417 """Sets the node role.
422 if not isinstance(self.request_body, basestring):
423 raise http.HttpBadRequest("Invalid body contents, not a string")
425 node_name = self.items[0]
426 role = self.request_body
428 if role == _NR_REGULAR:
433 elif role == _NR_MASTER_CANDIATE:
435 offline = drained = None
437 elif role == _NR_DRAINED:
439 candidate = offline = None
441 elif role == _NR_OFFLINE:
443 candidate = drained = None
446 raise http.HttpBadRequest("Can't set '%s' role" % role)
448 op = opcodes.OpNodeSetParams(node_name=node_name,
449 master_candidate=candidate,
452 force=bool(self.useForce()))
454 return self.SubmitJob([op])
457 class R_2_nodes_name_evacuate(baserlib.ResourceBase):
458 """/2/nodes/[node_name]/evacuate resource.
462 """Evacuate all instances off a node.
465 op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
466 "node_name": self.items[0],
467 "dry_run": self.dryRun(),
470 return self.SubmitJob([op])
473 class R_2_nodes_name_migrate(baserlib.ResourceBase):
474 """/2/nodes/[node_name]/migrate resource.
478 """Migrate all primary instances from a node.
481 node_name = self.items[0]
484 # Support old-style requests
485 if "live" in self.queryargs and "mode" in self.queryargs:
486 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
489 if "live" in self.queryargs:
490 if self._checkIntVariable("live", default=1):
491 mode = constants.HT_MIGRATION_LIVE
493 mode = constants.HT_MIGRATION_NONLIVE
495 mode = self._checkStringVariable("mode", default=None)
501 data = self.request_body
503 op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
504 "node_name": node_name,
507 return self.SubmitJob([op])
510 class R_2_nodes_name_storage(baserlib.ResourceBase):
511 """/2/nodes/[node_name]/storage resource.
514 # LUNodeQueryStorage acquires locks, hence restricting access to GET
515 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
518 node_name = self.items[0]
520 storage_type = self._checkStringVariable("storage_type", None)
522 raise http.HttpBadRequest("Missing the required 'storage_type'"
525 output_fields = self._checkStringVariable("output_fields", None)
526 if not output_fields:
527 raise http.HttpBadRequest("Missing the required 'output_fields'"
530 op = opcodes.OpNodeQueryStorage(nodes=[node_name],
531 storage_type=storage_type,
532 output_fields=output_fields.split(","))
533 return self.SubmitJob([op])
536 class R_2_nodes_name_storage_modify(baserlib.ResourceBase):
537 """/2/nodes/[node_name]/storage/modify resource.
541 node_name = self.items[0]
543 storage_type = self._checkStringVariable("storage_type", None)
545 raise http.HttpBadRequest("Missing the required 'storage_type'"
548 name = self._checkStringVariable("name", None)
550 raise http.HttpBadRequest("Missing the required 'name'"
555 if "allocatable" in self.queryargs:
556 changes[constants.SF_ALLOCATABLE] = \
557 bool(self._checkIntVariable("allocatable", default=1))
559 op = opcodes.OpNodeModifyStorage(node_name=node_name,
560 storage_type=storage_type,
563 return self.SubmitJob([op])
566 class R_2_nodes_name_storage_repair(baserlib.ResourceBase):
567 """/2/nodes/[node_name]/storage/repair resource.
571 node_name = self.items[0]
573 storage_type = self._checkStringVariable("storage_type", None)
575 raise http.HttpBadRequest("Missing the required 'storage_type'"
578 name = self._checkStringVariable("name", None)
580 raise http.HttpBadRequest("Missing the required 'name'"
583 op = opcodes.OpRepairNodeStorage(node_name=node_name,
584 storage_type=storage_type,
586 return self.SubmitJob([op])
589 def _ParseCreateGroupRequest(data, dry_run):
590 """Parses a request for creating a node group.
592 @rtype: L{opcodes.OpGroupAdd}
593 @return: Group creation opcode
601 "name": "group_name",
604 return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
608 class R_2_groups(baserlib.ResourceBase):
609 """/2/groups resource.
613 """Returns a list of all node groups.
616 client = self.GetClient()
619 bulkdata = client.QueryGroups([], G_FIELDS, False)
620 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
622 data = client.QueryGroups([], ["name"], False)
623 groupnames = [row[0] for row in data]
624 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
625 uri_fields=("name", "uri"))
628 """Create a node group.
633 baserlib.CheckType(self.request_body, dict, "Body contents")
634 op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
635 return self.SubmitJob([op])
638 class R_2_groups_name(baserlib.ResourceBase):
639 """/2/groups/[group_name] resource.
643 """Send information about a node group.
646 group_name = self.items[0]
647 client = self.GetClient()
649 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
650 names=[group_name], fields=G_FIELDS,
651 use_locking=self.useLocking())
653 return baserlib.MapFields(G_FIELDS, result[0])
656 """Delete a node group.
659 op = opcodes.OpGroupRemove(group_name=self.items[0],
660 dry_run=bool(self.dryRun()))
662 return self.SubmitJob([op])
665 def _ParseModifyGroupRequest(name, data):
666 """Parses a request for modifying a node group.
668 @rtype: L{opcodes.OpGroupSetParams}
669 @return: Group modify opcode
672 return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
677 class R_2_groups_name_modify(baserlib.ResourceBase):
678 """/2/groups/[group_name]/modify resource.
682 """Changes some parameters of node group.
687 baserlib.CheckType(self.request_body, dict, "Body contents")
689 op = _ParseModifyGroupRequest(self.items[0], self.request_body)
691 return self.SubmitJob([op])
694 def _ParseRenameGroupRequest(name, data, dry_run):
695 """Parses a request for renaming a node group.
698 @param name: name of the node group to rename
700 @param data: the body received by the rename request
702 @param dry_run: whether to perform a dry run
704 @rtype: L{opcodes.OpGroupRename}
705 @return: Node group rename opcode
708 return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
714 class R_2_groups_name_rename(baserlib.ResourceBase):
715 """/2/groups/[group_name]/rename resource.
719 """Changes the name of a node group.
724 baserlib.CheckType(self.request_body, dict, "Body contents")
725 op = _ParseRenameGroupRequest(self.items[0], self.request_body,
727 return self.SubmitJob([op])
730 class R_2_groups_name_assign_nodes(baserlib.ResourceBase):
731 """/2/groups/[group_name]/assign-nodes resource.
735 """Assigns nodes to a group.
740 op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
741 "group_name": self.items[0],
742 "dry_run": self.dryRun(),
743 "force": self.useForce(),
746 return self.SubmitJob([op])
749 def _ParseInstanceCreateRequestVersion1(data, dry_run):
750 """Parses an instance creation request version 1.
752 @rtype: L{opcodes.OpInstanceCreate}
753 @return: Instance creation opcode
762 "name": "instance_name",
765 return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
769 class R_2_instances(baserlib.ResourceBase):
770 """/2/instances resource.
774 """Returns a list of all available instances.
777 client = self.GetClient()
779 use_locking = self.useLocking()
781 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
782 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
784 instancesdata = client.QueryInstances([], ["name"], use_locking)
785 instanceslist = [row[0] for row in instancesdata]
786 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
787 uri_fields=("id", "uri"))
790 """Create an instance.
795 if not isinstance(self.request_body, dict):
796 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
798 # Default to request data version 0
799 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
801 if data_version == 0:
802 raise http.HttpBadRequest("Instance creation request version 0 is no"
804 elif data_version == 1:
805 data = self.request_body.copy()
806 # Remove "__version__"
807 data.pop(_REQ_DATA_VERSION, None)
808 op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
810 raise http.HttpBadRequest("Unsupported request data version %s" %
813 return self.SubmitJob([op])
816 class R_2_instances_name(baserlib.ResourceBase):
817 """/2/instances/[instance_name] resource.
821 """Send information about an instance.
824 client = self.GetClient()
825 instance_name = self.items[0]
827 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
828 names=[instance_name],
830 use_locking=self.useLocking())
832 return baserlib.MapFields(I_FIELDS, result[0])
835 """Delete an instance.
838 op = opcodes.OpInstanceRemove(instance_name=self.items[0],
839 ignore_failures=False,
840 dry_run=bool(self.dryRun()))
841 return self.SubmitJob([op])
844 class R_2_instances_name_info(baserlib.ResourceBase):
845 """/2/instances/[instance_name]/info resource.
849 """Request detailed instance information.
852 instance_name = self.items[0]
853 static = bool(self._checkIntVariable("static", default=0))
855 op = opcodes.OpInstanceQueryData(instances=[instance_name],
857 return self.SubmitJob([op])
860 class R_2_instances_name_reboot(baserlib.ResourceBase):
861 """/2/instances/[instance_name]/reboot resource.
863 Implements an instance reboot.
867 """Reboot an instance.
869 The URI takes type=[hard|soft|full] and
870 ignore_secondaries=[False|True] parameters.
873 instance_name = self.items[0]
874 reboot_type = self.queryargs.get("type",
875 [constants.INSTANCE_REBOOT_HARD])[0]
876 ignore_secondaries = bool(self._checkIntVariable("ignore_secondaries"))
877 op = opcodes.OpInstanceReboot(instance_name=instance_name,
878 reboot_type=reboot_type,
879 ignore_secondaries=ignore_secondaries,
880 dry_run=bool(self.dryRun()))
882 return self.SubmitJob([op])
885 class R_2_instances_name_startup(baserlib.ResourceBase):
886 """/2/instances/[instance_name]/startup resource.
888 Implements an instance startup.
892 """Startup an instance.
894 The URI takes force=[False|True] parameter to start the instance
895 if even if secondary disks are failing.
898 instance_name = self.items[0]
899 force_startup = bool(self._checkIntVariable("force"))
900 no_remember = bool(self._checkIntVariable("no_remember"))
901 op = opcodes.OpInstanceStartup(instance_name=instance_name,
903 dry_run=bool(self.dryRun()),
904 no_remember=no_remember)
906 return self.SubmitJob([op])
909 def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
910 """Parses a request for an instance shutdown.
912 @rtype: L{opcodes.OpInstanceShutdown}
913 @return: Instance shutdown opcode
916 return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
917 "instance_name": name,
919 "no_remember": no_remember,
923 class R_2_instances_name_shutdown(baserlib.ResourceBase):
924 """/2/instances/[instance_name]/shutdown resource.
926 Implements an instance shutdown.
930 """Shutdown an instance.
935 baserlib.CheckType(self.request_body, dict, "Body contents")
937 no_remember = bool(self._checkIntVariable("no_remember"))
938 op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
939 bool(self.dryRun()), no_remember)
941 return self.SubmitJob([op])
944 def _ParseInstanceReinstallRequest(name, data):
945 """Parses a request for reinstalling an instance.
948 if not isinstance(data, dict):
949 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
951 ostype = baserlib.CheckParameter(data, "os", default=None)
952 start = baserlib.CheckParameter(data, "start", exptype=bool,
954 osparams = baserlib.CheckParameter(data, "osparams", default=None)
957 opcodes.OpInstanceShutdown(instance_name=name),
958 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
963 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
968 class R_2_instances_name_reinstall(baserlib.ResourceBase):
969 """/2/instances/[instance_name]/reinstall resource.
971 Implements an instance reinstall.
975 """Reinstall an instance.
977 The URI takes os=name and nostartup=[0|1] optional
978 parameters. By default, the instance will be started
982 if self.request_body:
984 raise http.HttpBadRequest("Can't combine query and body parameters")
986 body = self.request_body
988 # Legacy interface, do not modify/extend
990 "os": self._checkStringVariable("os"),
991 "start": not self._checkIntVariable("nostartup"),
996 ops = _ParseInstanceReinstallRequest(self.items[0], body)
998 return self.SubmitJob(ops)
1001 def _ParseInstanceReplaceDisksRequest(name, data):
1002 """Parses a request for an instance export.
1004 @rtype: L{opcodes.OpInstanceReplaceDisks}
1005 @return: Instance export opcode
1009 "instance_name": name,
1014 raw_disks = data["disks"]
1018 if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
1019 # Backwards compatibility for strings of the format "1, 2, 3"
1021 data["disks"] = [int(part) for part in raw_disks.split(",")]
1022 except (TypeError, ValueError), err:
1023 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1025 return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
1028 class R_2_instances_name_replace_disks(baserlib.ResourceBase):
1029 """/2/instances/[instance_name]/replace-disks resource.
1033 """Replaces disks on an instance.
1036 op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
1038 return self.SubmitJob([op])
1041 class R_2_instances_name_activate_disks(baserlib.ResourceBase):
1042 """/2/instances/[instance_name]/activate-disks resource.
1046 """Activate disks for an instance.
1048 The URI might contain ignore_size to ignore current recorded size.
1051 instance_name = self.items[0]
1052 ignore_size = bool(self._checkIntVariable("ignore_size"))
1054 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1055 ignore_size=ignore_size)
1057 return self.SubmitJob([op])
1060 class R_2_instances_name_deactivate_disks(baserlib.ResourceBase):
1061 """/2/instances/[instance_name]/deactivate-disks resource.
1065 """Deactivate disks for an instance.
1068 instance_name = self.items[0]
1070 op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1072 return self.SubmitJob([op])
1075 class R_2_instances_name_prepare_export(baserlib.ResourceBase):
1076 """/2/instances/[instance_name]/prepare-export resource.
1080 """Prepares an export for an instance.
1085 instance_name = self.items[0]
1086 mode = self._checkStringVariable("mode")
1088 op = opcodes.OpBackupPrepare(instance_name=instance_name,
1091 return self.SubmitJob([op])
1094 def _ParseExportInstanceRequest(name, data):
1095 """Parses a request for an instance export.
1097 @rtype: L{opcodes.OpBackupExport}
1098 @return: Instance export opcode
1101 # Rename "destination" to "target_node"
1103 data["target_node"] = data.pop("destination")
1107 return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1108 "instance_name": name,
1112 class R_2_instances_name_export(baserlib.ResourceBase):
1113 """/2/instances/[instance_name]/export resource.
1117 """Exports an instance.
1122 if not isinstance(self.request_body, dict):
1123 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1125 op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1127 return self.SubmitJob([op])
1130 def _ParseMigrateInstanceRequest(name, data):
1131 """Parses a request for an instance migration.
1133 @rtype: L{opcodes.OpInstanceMigrate}
1134 @return: Instance migration opcode
1137 return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1138 "instance_name": name,
1142 class R_2_instances_name_migrate(baserlib.ResourceBase):
1143 """/2/instances/[instance_name]/migrate resource.
1147 """Migrates an instance.
1152 baserlib.CheckType(self.request_body, dict, "Body contents")
1154 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1156 return self.SubmitJob([op])
1159 class R_2_instances_name_failover(baserlib.ResourceBase):
1160 """/2/instances/[instance_name]/failover resource.
1164 """Does a failover of an instance.
1169 baserlib.CheckType(self.request_body, dict, "Body contents")
1171 op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
1172 "instance_name": self.items[0],
1175 return self.SubmitJob([op])
1178 def _ParseRenameInstanceRequest(name, data):
1179 """Parses a request for renaming an instance.
1181 @rtype: L{opcodes.OpInstanceRename}
1182 @return: Instance rename opcode
1185 return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1186 "instance_name": name,
1190 class R_2_instances_name_rename(baserlib.ResourceBase):
1191 """/2/instances/[instance_name]/rename resource.
1195 """Changes the name of an instance.
1200 baserlib.CheckType(self.request_body, dict, "Body contents")
1202 op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1204 return self.SubmitJob([op])
1207 def _ParseModifyInstanceRequest(name, data):
1208 """Parses a request for modifying an instance.
1210 @rtype: L{opcodes.OpInstanceSetParams}
1211 @return: Instance modify opcode
1214 return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1215 "instance_name": name,
1219 class R_2_instances_name_modify(baserlib.ResourceBase):
1220 """/2/instances/[instance_name]/modify resource.
1224 """Changes some parameters of an instance.
1229 baserlib.CheckType(self.request_body, dict, "Body contents")
1231 op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1233 return self.SubmitJob([op])
1236 class R_2_instances_name_disk_grow(baserlib.ResourceBase):
1237 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1241 """Increases the size of an instance disk.
1246 op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1247 "instance_name": self.items[0],
1248 "disk": int(self.items[1]),
1251 return self.SubmitJob([op])
1254 class R_2_instances_name_console(baserlib.ResourceBase):
1255 """/2/instances/[instance_name]/console resource.
1258 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1261 """Request information for connecting to instance's console.
1263 @return: Serialized instance console description, see
1264 L{objects.InstanceConsole}
1267 client = self.GetClient()
1269 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1272 raise http.HttpServiceUnavailable("Instance console unavailable")
1274 assert isinstance(console, dict)
1278 def _GetQueryFields(args):
1283 fields = args["fields"]
1285 raise http.HttpBadRequest("Missing 'fields' query argument")
1287 return _SplitQueryFields(fields[0])
1290 def _SplitQueryFields(fields):
1294 return [i.strip() for i in fields.split(",")]
1297 class R_2_query(baserlib.ResourceBase):
1298 """/2/query/[resource] resource.
1301 # Results might contain sensitive information
1302 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1304 def _Query(self, fields, filter_):
1305 return self.GetClient().Query(self.items[0], fields, filter_).ToDict()
1308 """Returns resource information.
1310 @return: Query result, see L{objects.QueryResponse}
1313 return self._Query(_GetQueryFields(self.queryargs), None)
1316 """Submits job querying for resources.
1318 @return: Query result, see L{objects.QueryResponse}
1321 body = self.request_body
1323 baserlib.CheckType(body, dict, "Body contents")
1326 fields = body["fields"]
1328 fields = _GetQueryFields(self.queryargs)
1330 return self._Query(fields, self.request_body.get("filter", None))
1333 class R_2_query_fields(baserlib.ResourceBase):
1334 """/2/query/[resource]/fields resource.
1338 """Retrieves list of available fields for a resource.
1340 @return: List of serialized L{objects.QueryFieldDefinition}
1344 raw_fields = self.queryargs["fields"]
1348 fields = _SplitQueryFields(raw_fields[0])
1350 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1353 class _R_Tags(baserlib.ResourceBase):
1354 """ Quasiclass for tagging resources
1356 Manages tags. When inheriting this class you must define the
1362 def __init__(self, items, queryargs, req):
1363 """A tag resource constructor.
1365 We have to override the default to sort out cluster naming case.
1368 baserlib.ResourceBase.__init__(self, items, queryargs, req)
1370 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1373 self.name = items[0]
1376 """Returns a list of tags.
1378 Example: ["tag1", "tag2", "tag3"]
1381 kind = self.TAG_LEVEL
1383 if kind in (constants.TAG_INSTANCE,
1384 constants.TAG_NODEGROUP,
1385 constants.TAG_NODE):
1387 raise http.HttpBadRequest("Missing name on tag request")
1389 cl = self.GetClient()
1390 if kind == constants.TAG_INSTANCE:
1391 fn = cl.QueryInstances
1392 elif kind == constants.TAG_NODEGROUP:
1396 result = fn(names=[self.name], fields=["tags"], use_locking=False)
1397 if not result or not result[0]:
1398 raise http.HttpBadGateway("Invalid response from tag query")
1401 elif kind == constants.TAG_CLUSTER:
1402 assert not self.name
1403 # TODO: Use query API?
1404 ssc = ssconf.SimpleStore()
1405 tags = ssc.GetClusterTags()
1410 """Add a set of tags.
1412 The request as a list of strings should be PUT to this URI. And
1413 you'll have back a job id.
1416 # pylint: disable-msg=W0212
1417 if "tag" not in self.queryargs:
1418 raise http.HttpBadRequest("Please specify tag(s) to add using the"
1419 " the 'tag' parameter")
1420 op = opcodes.OpTagsSet(kind=self.TAG_LEVEL, name=self.name,
1421 tags=self.queryargs["tag"], dry_run=self.dryRun())
1422 return self.SubmitJob([op])
1427 In order to delete a set of tags, the DELETE
1428 request should be addressed to URI like:
1429 /tags?tag=[tag]&tag=[tag]
1432 # pylint: disable-msg=W0212
1433 if "tag" not in self.queryargs:
1434 # no we not gonna delete all tags
1435 raise http.HttpBadRequest("Cannot delete all tags - please specify"
1436 " tag(s) using the 'tag' parameter")
1437 op = opcodes.OpTagsDel(kind=self.TAG_LEVEL, name=self.name,
1438 tags=self.queryargs["tag"], dry_run=self.dryRun())
1439 return self.SubmitJob([op])
1442 class R_2_instances_name_tags(_R_Tags):
1443 """ /2/instances/[instance_name]/tags resource.
1445 Manages per-instance tags.
1448 TAG_LEVEL = constants.TAG_INSTANCE
1451 class R_2_nodes_name_tags(_R_Tags):
1452 """ /2/nodes/[node_name]/tags resource.
1454 Manages per-node tags.
1457 TAG_LEVEL = constants.TAG_NODE
1460 class R_2_groups_name_tags(_R_Tags):
1461 """ /2/groups/[group_name]/tags resource.
1463 Manages per-nodegroup tags.
1466 TAG_LEVEL = constants.TAG_NODEGROUP
1469 class R_2_tags(_R_Tags):
1470 """ /2/tags resource.
1472 Manages cluster tags.
1475 TAG_LEVEL = constants.TAG_CLUSTER