4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API resource implementations.
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
31 In the context of this module POST on ``/2/instances`` to change an existing
32 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
33 new instance) with a name specified in the request.
35 Quoting from RFC2616, section 9.6::
37 The fundamental difference between the POST and PUT requests is reflected in
38 the different meaning of the Request-URI. The URI in a POST request
39 identifies the resource that will handle the enclosed entity. That resource
40 might be a data-accepting process, a gateway to some other protocol, or a
41 separate entity that accepts annotations. In contrast, the URI in a PUT
42 request identifies the entity enclosed with the request -- the user agent
43 knows what URI is intended and the server MUST NOT attempt to apply the
44 request to some other resource. If the server desires that the request be
45 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
46 the user agent MAY then make its own decision regarding whether or not to
49 So when adding new methods, if they are operating on the URI entity itself,
50 PUT should be prefered over POST.
54 # pylint: disable-msg=C0103
56 # C0103: Invalid name, since the R_* names are not conforming
58 from ganeti import opcodes
59 from ganeti import http
60 from ganeti import constants
61 from ganeti import cli
62 from ganeti import rapi
64 from ganeti import compat
65 from ganeti import ssconf
66 from ganeti.rapi import baserlib
69 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
70 I_FIELDS = ["name", "admin_state", "os",
73 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
75 "disk.sizes", "disk_usage",
76 "beparams", "hvparams",
77 "oper_state", "oper_ram", "oper_vcpus", "status",
78 "custom_hvparams", "custom_beparams", "custom_nicparams",
81 N_FIELDS = ["name", "offline", "master_candidate", "drained",
83 "mtotal", "mnode", "mfree",
84 "pinst_cnt", "sinst_cnt",
85 "ctotal", "cnodes", "csockets",
87 "pinst_list", "sinst_list",
88 "master_capable", "vm_capable",
100 "id", "ops", "status", "summary",
102 "received_ts", "start_ts", "end_ts",
105 J_FIELDS = J_FIELDS_BULK + [
110 _NR_DRAINED = "drained"
111 _NR_MASTER_CANDIATE = "master-candidate"
112 _NR_MASTER = "master"
113 _NR_OFFLINE = "offline"
114 _NR_REGULAR = "regular"
117 constants.NR_MASTER: _NR_MASTER,
118 constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
119 constants.NR_DRAINED: _NR_DRAINED,
120 constants.NR_OFFLINE: _NR_OFFLINE,
121 constants.NR_REGULAR: _NR_REGULAR,
124 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
126 # Request data version field
127 _REQ_DATA_VERSION = "__version__"
129 # Feature string for instance creation request data version 1
130 _INST_CREATE_REQV1 = "instance-create-reqv1"
132 # Feature string for instance reinstall request version 1
133 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
135 # Feature string for node migration version 1
136 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
138 # Feature string for node evacuation with LU-generated jobs
139 _NODE_EVAC_RES1 = "node-evac-res1"
141 ALL_FEATURES = frozenset([
143 _INST_REINSTALL_REQV1,
148 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
152 class R_root(baserlib.ResourceBase):
158 """Supported for legacy reasons.
164 class R_version(baserlib.ResourceBase):
165 """/version resource.
167 This resource should be used to determine the remote API version and
168 to adapt clients accordingly.
173 """Returns the remote API version.
176 return constants.RAPI_VERSION
179 class R_2_info(baserlib.ResourceBase):
184 """Returns cluster information.
187 client = self.GetClient()
188 return client.QueryClusterInfo()
191 class R_2_features(baserlib.ResourceBase):
192 """/2/features resource.
197 """Returns list of optional RAPI features implemented.
200 return list(ALL_FEATURES)
203 class R_2_os(baserlib.ResourceBase):
208 """Return a list of all OSes.
210 Can return error 500 in case of a problem.
212 Example: ["debian-etch"]
215 cl = self.GetClient()
216 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
217 job_id = self.SubmitJob([op], cl=cl)
218 # we use custom feedback function, instead of print we log the status
219 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
220 diagnose_data = result[0]
222 if not isinstance(diagnose_data, list):
223 raise http.HttpBadGateway(message="Can't get OS list")
226 for (name, variants) in diagnose_data:
227 os_names.extend(cli.CalculateOSNames(name, variants))
232 class R_2_redist_config(baserlib.ResourceBase):
233 """/2/redistribute-config resource.
237 """Redistribute configuration to all nodes.
240 return self.SubmitJob([opcodes.OpClusterRedistConf()])
243 class R_2_cluster_modify(baserlib.OpcodeResource):
244 """/2/modify resource.
247 PUT_OPCODE = opcodes.OpClusterSetParams
250 class R_2_jobs(baserlib.ResourceBase):
255 """Returns a dictionary of jobs.
257 @return: a dictionary with jobs id and uri.
260 client = self.GetClient()
263 bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
264 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
266 jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
267 return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
268 uri_fields=("id", "uri"))
271 class R_2_jobs_id(baserlib.ResourceBase):
272 """/2/jobs/[job_id] resource.
276 """Returns a job status.
278 @return: a dictionary with job parameters.
280 - id: job ID as a number
281 - status: current job status as a string
282 - ops: involved OpCodes as a list of dictionaries for each
284 - opstatus: OpCodes status as a list
285 - opresult: OpCodes results as a list of lists
288 job_id = self.items[0]
289 result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
291 raise http.HttpNotFound()
292 return baserlib.MapFields(J_FIELDS, result)
295 """Cancel not-yet-started job.
298 job_id = self.items[0]
299 result = self.GetClient().CancelJob(job_id)
303 class R_2_jobs_id_wait(baserlib.ResourceBase):
304 """/2/jobs/[job_id]/wait resource.
307 # WaitForJobChange provides access to sensitive information and blocks
308 # machine resources (it's a blocking RAPI call), hence restricting access.
309 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
312 """Waits for job changes.
315 job_id = self.items[0]
317 fields = self.getBodyParameter("fields")
318 prev_job_info = self.getBodyParameter("previous_job_info", None)
319 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
321 if not isinstance(fields, list):
322 raise http.HttpBadRequest("The 'fields' parameter should be a list")
324 if not (prev_job_info is None or isinstance(prev_job_info, list)):
325 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
328 if not (prev_log_serial is None or
329 isinstance(prev_log_serial, (int, long))):
330 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
333 client = self.GetClient()
334 result = client.WaitForJobChangeOnce(job_id, fields,
335 prev_job_info, prev_log_serial,
336 timeout=_WFJC_TIMEOUT)
338 raise http.HttpNotFound()
340 if result == constants.JOB_NOTCHANGED:
344 (job_info, log_entries) = result
347 "job_info": job_info,
348 "log_entries": log_entries,
352 class R_2_nodes(baserlib.ResourceBase):
353 """/2/nodes resource.
357 """Returns a list of all nodes.
360 client = self.GetClient()
363 bulkdata = client.QueryNodes([], N_FIELDS, False)
364 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
366 nodesdata = client.QueryNodes([], ["name"], False)
367 nodeslist = [row[0] for row in nodesdata]
368 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
369 uri_fields=("id", "uri"))
372 class R_2_nodes_name(baserlib.ResourceBase):
373 """/2/nodes/[node_name] resource.
377 """Send information about a node.
380 node_name = self.items[0]
381 client = self.GetClient()
383 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
384 names=[node_name], fields=N_FIELDS,
385 use_locking=self.useLocking())
387 return baserlib.MapFields(N_FIELDS, result[0])
390 class R_2_nodes_name_role(baserlib.ResourceBase):
391 """ /2/nodes/[node_name]/role resource.
395 """Returns the current node role.
400 node_name = self.items[0]
401 client = self.GetClient()
402 result = client.QueryNodes(names=[node_name], fields=["role"],
403 use_locking=self.useLocking())
405 return _NR_MAP[result[0][0]]
408 """Sets the node role.
413 if not isinstance(self.request_body, basestring):
414 raise http.HttpBadRequest("Invalid body contents, not a string")
416 node_name = self.items[0]
417 role = self.request_body
419 if role == _NR_REGULAR:
424 elif role == _NR_MASTER_CANDIATE:
426 offline = drained = None
428 elif role == _NR_DRAINED:
430 candidate = offline = None
432 elif role == _NR_OFFLINE:
434 candidate = drained = None
437 raise http.HttpBadRequest("Can't set '%s' role" % role)
439 op = opcodes.OpNodeSetParams(node_name=node_name,
440 master_candidate=candidate,
443 force=bool(self.useForce()))
445 return self.SubmitJob([op])
448 class R_2_nodes_name_evacuate(baserlib.ResourceBase):
449 """/2/nodes/[node_name]/evacuate resource.
453 """Evacuate all instances off a node.
456 op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
457 "node_name": self.items[0],
458 "dry_run": self.dryRun(),
461 return self.SubmitJob([op])
464 class R_2_nodes_name_migrate(baserlib.ResourceBase):
465 """/2/nodes/[node_name]/migrate resource.
469 """Migrate all primary instances from a node.
472 node_name = self.items[0]
475 # Support old-style requests
476 if "live" in self.queryargs and "mode" in self.queryargs:
477 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
480 if "live" in self.queryargs:
481 if self._checkIntVariable("live", default=1):
482 mode = constants.HT_MIGRATION_LIVE
484 mode = constants.HT_MIGRATION_NONLIVE
486 mode = self._checkStringVariable("mode", default=None)
492 data = self.request_body
494 op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
495 "node_name": node_name,
498 return self.SubmitJob([op])
501 class R_2_nodes_name_storage(baserlib.ResourceBase):
502 """/2/nodes/[node_name]/storage resource.
505 # LUNodeQueryStorage acquires locks, hence restricting access to GET
506 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
509 node_name = self.items[0]
511 storage_type = self._checkStringVariable("storage_type", None)
513 raise http.HttpBadRequest("Missing the required 'storage_type'"
516 output_fields = self._checkStringVariable("output_fields", None)
517 if not output_fields:
518 raise http.HttpBadRequest("Missing the required 'output_fields'"
521 op = opcodes.OpNodeQueryStorage(nodes=[node_name],
522 storage_type=storage_type,
523 output_fields=output_fields.split(","))
524 return self.SubmitJob([op])
527 class R_2_nodes_name_storage_modify(baserlib.ResourceBase):
528 """/2/nodes/[node_name]/storage/modify resource.
532 node_name = self.items[0]
534 storage_type = self._checkStringVariable("storage_type", None)
536 raise http.HttpBadRequest("Missing the required 'storage_type'"
539 name = self._checkStringVariable("name", None)
541 raise http.HttpBadRequest("Missing the required 'name'"
546 if "allocatable" in self.queryargs:
547 changes[constants.SF_ALLOCATABLE] = \
548 bool(self._checkIntVariable("allocatable", default=1))
550 op = opcodes.OpNodeModifyStorage(node_name=node_name,
551 storage_type=storage_type,
554 return self.SubmitJob([op])
557 class R_2_nodes_name_storage_repair(baserlib.ResourceBase):
558 """/2/nodes/[node_name]/storage/repair resource.
562 node_name = self.items[0]
564 storage_type = self._checkStringVariable("storage_type", None)
566 raise http.HttpBadRequest("Missing the required 'storage_type'"
569 name = self._checkStringVariable("name", None)
571 raise http.HttpBadRequest("Missing the required 'name'"
574 op = opcodes.OpRepairNodeStorage(node_name=node_name,
575 storage_type=storage_type,
577 return self.SubmitJob([op])
580 def _ParseCreateGroupRequest(data, dry_run):
581 """Parses a request for creating a node group.
583 @rtype: L{opcodes.OpGroupAdd}
584 @return: Group creation opcode
592 "name": "group_name",
595 return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
599 class R_2_groups(baserlib.ResourceBase):
600 """/2/groups resource.
604 """Returns a list of all node groups.
607 client = self.GetClient()
610 bulkdata = client.QueryGroups([], G_FIELDS, False)
611 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
613 data = client.QueryGroups([], ["name"], False)
614 groupnames = [row[0] for row in data]
615 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
616 uri_fields=("name", "uri"))
619 """Create a node group.
624 baserlib.CheckType(self.request_body, dict, "Body contents")
625 op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
626 return self.SubmitJob([op])
629 class R_2_groups_name(baserlib.ResourceBase):
630 """/2/groups/[group_name] resource.
634 """Send information about a node group.
637 group_name = self.items[0]
638 client = self.GetClient()
640 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
641 names=[group_name], fields=G_FIELDS,
642 use_locking=self.useLocking())
644 return baserlib.MapFields(G_FIELDS, result[0])
647 """Delete a node group.
650 op = opcodes.OpGroupRemove(group_name=self.items[0],
651 dry_run=bool(self.dryRun()))
653 return self.SubmitJob([op])
656 def _ParseModifyGroupRequest(name, data):
657 """Parses a request for modifying a node group.
659 @rtype: L{opcodes.OpGroupSetParams}
660 @return: Group modify opcode
663 return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
668 class R_2_groups_name_modify(baserlib.ResourceBase):
669 """/2/groups/[group_name]/modify resource.
673 """Changes some parameters of node group.
678 baserlib.CheckType(self.request_body, dict, "Body contents")
680 op = _ParseModifyGroupRequest(self.items[0], self.request_body)
682 return self.SubmitJob([op])
685 def _ParseRenameGroupRequest(name, data, dry_run):
686 """Parses a request for renaming a node group.
689 @param name: name of the node group to rename
691 @param data: the body received by the rename request
693 @param dry_run: whether to perform a dry run
695 @rtype: L{opcodes.OpGroupRename}
696 @return: Node group rename opcode
699 return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
705 class R_2_groups_name_rename(baserlib.ResourceBase):
706 """/2/groups/[group_name]/rename resource.
710 """Changes the name of a node group.
715 baserlib.CheckType(self.request_body, dict, "Body contents")
716 op = _ParseRenameGroupRequest(self.items[0], self.request_body,
718 return self.SubmitJob([op])
721 class R_2_groups_name_assign_nodes(baserlib.ResourceBase):
722 """/2/groups/[group_name]/assign-nodes resource.
726 """Assigns nodes to a group.
731 op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
732 "group_name": self.items[0],
733 "dry_run": self.dryRun(),
734 "force": self.useForce(),
737 return self.SubmitJob([op])
740 def _ParseInstanceCreateRequestVersion1(data, dry_run):
741 """Parses an instance creation request version 1.
743 @rtype: L{opcodes.OpInstanceCreate}
744 @return: Instance creation opcode
753 "name": "instance_name",
756 return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
760 class R_2_instances(baserlib.ResourceBase):
761 """/2/instances resource.
765 """Returns a list of all available instances.
768 client = self.GetClient()
770 use_locking = self.useLocking()
772 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
773 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
775 instancesdata = client.QueryInstances([], ["name"], use_locking)
776 instanceslist = [row[0] for row in instancesdata]
777 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
778 uri_fields=("id", "uri"))
781 """Create an instance.
786 if not isinstance(self.request_body, dict):
787 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
789 # Default to request data version 0
790 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
792 if data_version == 0:
793 raise http.HttpBadRequest("Instance creation request version 0 is no"
795 elif data_version == 1:
796 data = self.request_body.copy()
797 # Remove "__version__"
798 data.pop(_REQ_DATA_VERSION, None)
799 op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
801 raise http.HttpBadRequest("Unsupported request data version %s" %
804 return self.SubmitJob([op])
807 class R_2_instances_name(baserlib.ResourceBase):
808 """/2/instances/[instance_name] resource.
812 """Send information about an instance.
815 client = self.GetClient()
816 instance_name = self.items[0]
818 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
819 names=[instance_name],
821 use_locking=self.useLocking())
823 return baserlib.MapFields(I_FIELDS, result[0])
826 """Delete an instance.
829 op = opcodes.OpInstanceRemove(instance_name=self.items[0],
830 ignore_failures=False,
831 dry_run=bool(self.dryRun()))
832 return self.SubmitJob([op])
835 class R_2_instances_name_info(baserlib.ResourceBase):
836 """/2/instances/[instance_name]/info resource.
840 """Request detailed instance information.
843 instance_name = self.items[0]
844 static = bool(self._checkIntVariable("static", default=0))
846 op = opcodes.OpInstanceQueryData(instances=[instance_name],
848 return self.SubmitJob([op])
851 class R_2_instances_name_reboot(baserlib.ResourceBase):
852 """/2/instances/[instance_name]/reboot resource.
854 Implements an instance reboot.
858 """Reboot an instance.
860 The URI takes type=[hard|soft|full] and
861 ignore_secondaries=[False|True] parameters.
864 instance_name = self.items[0]
865 reboot_type = self.queryargs.get("type",
866 [constants.INSTANCE_REBOOT_HARD])[0]
867 ignore_secondaries = bool(self._checkIntVariable("ignore_secondaries"))
868 op = opcodes.OpInstanceReboot(instance_name=instance_name,
869 reboot_type=reboot_type,
870 ignore_secondaries=ignore_secondaries,
871 dry_run=bool(self.dryRun()))
873 return self.SubmitJob([op])
876 class R_2_instances_name_startup(baserlib.ResourceBase):
877 """/2/instances/[instance_name]/startup resource.
879 Implements an instance startup.
883 """Startup an instance.
885 The URI takes force=[False|True] parameter to start the instance
886 if even if secondary disks are failing.
889 instance_name = self.items[0]
890 force_startup = bool(self._checkIntVariable("force"))
891 no_remember = bool(self._checkIntVariable("no_remember"))
892 op = opcodes.OpInstanceStartup(instance_name=instance_name,
894 dry_run=bool(self.dryRun()),
895 no_remember=no_remember)
897 return self.SubmitJob([op])
900 def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
901 """Parses a request for an instance shutdown.
903 @rtype: L{opcodes.OpInstanceShutdown}
904 @return: Instance shutdown opcode
907 return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
908 "instance_name": name,
910 "no_remember": no_remember,
914 class R_2_instances_name_shutdown(baserlib.ResourceBase):
915 """/2/instances/[instance_name]/shutdown resource.
917 Implements an instance shutdown.
921 """Shutdown an instance.
926 baserlib.CheckType(self.request_body, dict, "Body contents")
928 no_remember = bool(self._checkIntVariable("no_remember"))
929 op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
930 bool(self.dryRun()), no_remember)
932 return self.SubmitJob([op])
935 def _ParseInstanceReinstallRequest(name, data):
936 """Parses a request for reinstalling an instance.
939 if not isinstance(data, dict):
940 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
942 ostype = baserlib.CheckParameter(data, "os", default=None)
943 start = baserlib.CheckParameter(data, "start", exptype=bool,
945 osparams = baserlib.CheckParameter(data, "osparams", default=None)
948 opcodes.OpInstanceShutdown(instance_name=name),
949 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
954 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
959 class R_2_instances_name_reinstall(baserlib.ResourceBase):
960 """/2/instances/[instance_name]/reinstall resource.
962 Implements an instance reinstall.
966 """Reinstall an instance.
968 The URI takes os=name and nostartup=[0|1] optional
969 parameters. By default, the instance will be started
973 if self.request_body:
975 raise http.HttpBadRequest("Can't combine query and body parameters")
977 body = self.request_body
979 # Legacy interface, do not modify/extend
981 "os": self._checkStringVariable("os"),
982 "start": not self._checkIntVariable("nostartup"),
987 ops = _ParseInstanceReinstallRequest(self.items[0], body)
989 return self.SubmitJob(ops)
992 def _ParseInstanceReplaceDisksRequest(name, data):
993 """Parses a request for an instance export.
995 @rtype: L{opcodes.OpInstanceReplaceDisks}
996 @return: Instance export opcode
1000 "instance_name": name,
1005 raw_disks = data["disks"]
1009 if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
1010 # Backwards compatibility for strings of the format "1, 2, 3"
1012 data["disks"] = [int(part) for part in raw_disks.split(",")]
1013 except (TypeError, ValueError), err:
1014 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1016 return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
1019 class R_2_instances_name_replace_disks(baserlib.ResourceBase):
1020 """/2/instances/[instance_name]/replace-disks resource.
1024 """Replaces disks on an instance.
1027 op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
1029 return self.SubmitJob([op])
1032 class R_2_instances_name_activate_disks(baserlib.ResourceBase):
1033 """/2/instances/[instance_name]/activate-disks resource.
1037 """Activate disks for an instance.
1039 The URI might contain ignore_size to ignore current recorded size.
1042 instance_name = self.items[0]
1043 ignore_size = bool(self._checkIntVariable("ignore_size"))
1045 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1046 ignore_size=ignore_size)
1048 return self.SubmitJob([op])
1051 class R_2_instances_name_deactivate_disks(baserlib.ResourceBase):
1052 """/2/instances/[instance_name]/deactivate-disks resource.
1056 """Deactivate disks for an instance.
1059 instance_name = self.items[0]
1061 op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1063 return self.SubmitJob([op])
1066 class R_2_instances_name_prepare_export(baserlib.ResourceBase):
1067 """/2/instances/[instance_name]/prepare-export resource.
1071 """Prepares an export for an instance.
1076 instance_name = self.items[0]
1077 mode = self._checkStringVariable("mode")
1079 op = opcodes.OpBackupPrepare(instance_name=instance_name,
1082 return self.SubmitJob([op])
1085 def _ParseExportInstanceRequest(name, data):
1086 """Parses a request for an instance export.
1088 @rtype: L{opcodes.OpBackupExport}
1089 @return: Instance export opcode
1092 # Rename "destination" to "target_node"
1094 data["target_node"] = data.pop("destination")
1098 return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1099 "instance_name": name,
1103 class R_2_instances_name_export(baserlib.ResourceBase):
1104 """/2/instances/[instance_name]/export resource.
1108 """Exports an instance.
1113 if not isinstance(self.request_body, dict):
1114 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1116 op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1118 return self.SubmitJob([op])
1121 def _ParseMigrateInstanceRequest(name, data):
1122 """Parses a request for an instance migration.
1124 @rtype: L{opcodes.OpInstanceMigrate}
1125 @return: Instance migration opcode
1128 return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1129 "instance_name": name,
1133 class R_2_instances_name_migrate(baserlib.ResourceBase):
1134 """/2/instances/[instance_name]/migrate resource.
1138 """Migrates an instance.
1143 baserlib.CheckType(self.request_body, dict, "Body contents")
1145 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1147 return self.SubmitJob([op])
1150 class R_2_instances_name_failover(baserlib.ResourceBase):
1151 """/2/instances/[instance_name]/failover resource.
1155 """Does a failover of an instance.
1160 baserlib.CheckType(self.request_body, dict, "Body contents")
1162 op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
1163 "instance_name": self.items[0],
1166 return self.SubmitJob([op])
1169 def _ParseRenameInstanceRequest(name, data):
1170 """Parses a request for renaming an instance.
1172 @rtype: L{opcodes.OpInstanceRename}
1173 @return: Instance rename opcode
1176 return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1177 "instance_name": name,
1181 class R_2_instances_name_rename(baserlib.ResourceBase):
1182 """/2/instances/[instance_name]/rename resource.
1186 """Changes the name of an instance.
1191 baserlib.CheckType(self.request_body, dict, "Body contents")
1193 op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1195 return self.SubmitJob([op])
1198 def _ParseModifyInstanceRequest(name, data):
1199 """Parses a request for modifying an instance.
1201 @rtype: L{opcodes.OpInstanceSetParams}
1202 @return: Instance modify opcode
1205 return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1206 "instance_name": name,
1210 class R_2_instances_name_modify(baserlib.ResourceBase):
1211 """/2/instances/[instance_name]/modify resource.
1215 """Changes some parameters of an instance.
1220 baserlib.CheckType(self.request_body, dict, "Body contents")
1222 op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1224 return self.SubmitJob([op])
1227 class R_2_instances_name_disk_grow(baserlib.ResourceBase):
1228 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1232 """Increases the size of an instance disk.
1237 op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1238 "instance_name": self.items[0],
1239 "disk": int(self.items[1]),
1242 return self.SubmitJob([op])
1245 class R_2_instances_name_console(baserlib.ResourceBase):
1246 """/2/instances/[instance_name]/console resource.
1249 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1252 """Request information for connecting to instance's console.
1254 @return: Serialized instance console description, see
1255 L{objects.InstanceConsole}
1258 client = self.GetClient()
1260 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1263 raise http.HttpServiceUnavailable("Instance console unavailable")
1265 assert isinstance(console, dict)
1269 def _GetQueryFields(args):
1274 fields = args["fields"]
1276 raise http.HttpBadRequest("Missing 'fields' query argument")
1278 return _SplitQueryFields(fields[0])
1281 def _SplitQueryFields(fields):
1285 return [i.strip() for i in fields.split(",")]
1288 class R_2_query(baserlib.ResourceBase):
1289 """/2/query/[resource] resource.
1292 # Results might contain sensitive information
1293 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1295 def _Query(self, fields, filter_):
1296 return self.GetClient().Query(self.items[0], fields, filter_).ToDict()
1299 """Returns resource information.
1301 @return: Query result, see L{objects.QueryResponse}
1304 return self._Query(_GetQueryFields(self.queryargs), None)
1307 """Submits job querying for resources.
1309 @return: Query result, see L{objects.QueryResponse}
1312 body = self.request_body
1314 baserlib.CheckType(body, dict, "Body contents")
1317 fields = body["fields"]
1319 fields = _GetQueryFields(self.queryargs)
1321 return self._Query(fields, self.request_body.get("filter", None))
1324 class R_2_query_fields(baserlib.ResourceBase):
1325 """/2/query/[resource]/fields resource.
1329 """Retrieves list of available fields for a resource.
1331 @return: List of serialized L{objects.QueryFieldDefinition}
1335 raw_fields = self.queryargs["fields"]
1339 fields = _SplitQueryFields(raw_fields[0])
1341 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1344 class _R_Tags(baserlib.ResourceBase):
1345 """ Quasiclass for tagging resources
1347 Manages tags. When inheriting this class you must define the
1353 def __init__(self, items, queryargs, req):
1354 """A tag resource constructor.
1356 We have to override the default to sort out cluster naming case.
1359 baserlib.ResourceBase.__init__(self, items, queryargs, req)
1361 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1364 self.name = items[0]
1367 """Returns a list of tags.
1369 Example: ["tag1", "tag2", "tag3"]
1372 kind = self.TAG_LEVEL
1374 if kind in (constants.TAG_INSTANCE,
1375 constants.TAG_NODEGROUP,
1376 constants.TAG_NODE):
1378 raise http.HttpBadRequest("Missing name on tag request")
1380 cl = self.GetClient()
1381 if kind == constants.TAG_INSTANCE:
1382 fn = cl.QueryInstances
1383 elif kind == constants.TAG_NODEGROUP:
1387 result = fn(names=[self.name], fields=["tags"], use_locking=False)
1388 if not result or not result[0]:
1389 raise http.HttpBadGateway("Invalid response from tag query")
1392 elif kind == constants.TAG_CLUSTER:
1393 assert not self.name
1394 # TODO: Use query API?
1395 ssc = ssconf.SimpleStore()
1396 tags = ssc.GetClusterTags()
1401 """Add a set of tags.
1403 The request as a list of strings should be PUT to this URI. And
1404 you'll have back a job id.
1407 # pylint: disable-msg=W0212
1408 if "tag" not in self.queryargs:
1409 raise http.HttpBadRequest("Please specify tag(s) to add using the"
1410 " the 'tag' parameter")
1411 op = opcodes.OpTagsSet(kind=self.TAG_LEVEL, name=self.name,
1412 tags=self.queryargs["tag"], dry_run=self.dryRun())
1413 return self.SubmitJob([op])
1418 In order to delete a set of tags, the DELETE
1419 request should be addressed to URI like:
1420 /tags?tag=[tag]&tag=[tag]
1423 # pylint: disable-msg=W0212
1424 if "tag" not in self.queryargs:
1425 # no we not gonna delete all tags
1426 raise http.HttpBadRequest("Cannot delete all tags - please specify"
1427 " tag(s) using the 'tag' parameter")
1428 op = opcodes.OpTagsDel(kind=self.TAG_LEVEL, name=self.name,
1429 tags=self.queryargs["tag"], dry_run=self.dryRun())
1430 return self.SubmitJob([op])
1433 class R_2_instances_name_tags(_R_Tags):
1434 """ /2/instances/[instance_name]/tags resource.
1436 Manages per-instance tags.
1439 TAG_LEVEL = constants.TAG_INSTANCE
1442 class R_2_nodes_name_tags(_R_Tags):
1443 """ /2/nodes/[node_name]/tags resource.
1445 Manages per-node tags.
1448 TAG_LEVEL = constants.TAG_NODE
1451 class R_2_groups_name_tags(_R_Tags):
1452 """ /2/groups/[group_name]/tags resource.
1454 Manages per-nodegroup tags.
1457 TAG_LEVEL = constants.TAG_NODEGROUP
1460 class R_2_tags(_R_Tags):
1461 """ /2/tags resource.
1463 Manages cluster tags.
1466 TAG_LEVEL = constants.TAG_CLUSTER