4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API version 2 baserlib.library.
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
31 To be in context of this module for instance creation POST on
32 /2/instances is legitim while PUT would be not, due to it does create a
33 new entity and not just replace /2/instances with it.
35 So when adding new methods, if they are operating on the URI entity itself,
36 PUT should be prefered over POST.
40 # pylint: disable-msg=C0103
42 # C0103: Invalid name, since the R_* names are not conforming
44 from ganeti import opcodes
45 from ganeti import http
46 from ganeti import constants
47 from ganeti import cli
48 from ganeti import rapi
50 from ganeti.rapi import baserlib
53 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54 I_FIELDS = ["name", "admin_state", "os",
57 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
59 "disk.sizes", "disk_usage",
60 "beparams", "hvparams",
61 "oper_state", "oper_ram", "oper_vcpus", "status",
62 "custom_hvparams", "custom_beparams", "custom_nicparams",
65 N_FIELDS = ["name", "offline", "master_candidate", "drained",
67 "mtotal", "mnode", "mfree",
68 "pinst_cnt", "sinst_cnt",
69 "ctotal", "cnodes", "csockets",
71 "pinst_list", "sinst_list",
72 "master_capable", "vm_capable",
76 G_FIELDS = ["name", "uuid",
78 "node_cnt", "node_list",
79 "ctime", "mtime", "serial_no",
80 ] # "tags" is missing to be able to use _COMMON_FIELDS here.
82 _NR_DRAINED = "drained"
83 _NR_MASTER_CANDIATE = "master-candidate"
85 _NR_OFFLINE = "offline"
86 _NR_REGULAR = "regular"
89 constants.NR_MASTER: _NR_MASTER,
90 constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
91 constants.NR_DRAINED: _NR_DRAINED,
92 constants.NR_OFFLINE: _NR_OFFLINE,
93 constants.NR_REGULAR: _NR_REGULAR,
96 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
98 # Request data version field
99 _REQ_DATA_VERSION = "__version__"
101 # Feature string for instance creation request data version 1
102 _INST_CREATE_REQV1 = "instance-create-reqv1"
104 # Feature string for instance reinstall request version 1
105 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
107 # Feature string for node migration version 1
108 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
110 # Feature string for node evacuation with LU-generated jobs
111 _NODE_EVAC_RES1 = "node-evac-res1"
113 ALL_FEATURES = frozenset([
115 _INST_REINSTALL_REQV1,
120 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
124 class R_version(baserlib.R_Generic):
125 """/version resource.
127 This resource should be used to determine the remote API version and
128 to adapt clients accordingly.
133 """Returns the remote API version.
136 return constants.RAPI_VERSION
139 class R_2_info(baserlib.R_Generic):
145 """Returns cluster information.
148 client = baserlib.GetClient()
149 return client.QueryClusterInfo()
152 class R_2_features(baserlib.R_Generic):
153 """/2/features resource.
158 """Returns list of optional RAPI features implemented.
161 return list(ALL_FEATURES)
164 class R_2_os(baserlib.R_Generic):
170 """Return a list of all OSes.
172 Can return error 500 in case of a problem.
174 Example: ["debian-etch"]
177 cl = baserlib.GetClient()
178 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
179 job_id = baserlib.SubmitJob([op], cl)
180 # we use custom feedback function, instead of print we log the status
181 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
182 diagnose_data = result[0]
184 if not isinstance(diagnose_data, list):
185 raise http.HttpBadGateway(message="Can't get OS list")
188 for (name, variants) in diagnose_data:
189 os_names.extend(cli.CalculateOSNames(name, variants))
194 class R_2_redist_config(baserlib.R_Generic):
195 """/2/redistribute-config resource.
200 """Redistribute configuration to all nodes.
203 return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
206 class R_2_cluster_modify(baserlib.R_Generic):
207 """/2/modify resource.
211 """Modifies cluster parameters.
216 op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
219 return baserlib.SubmitJob([op])
222 class R_2_jobs(baserlib.R_Generic):
228 """Returns a dictionary of jobs.
230 @return: a dictionary with jobs id and uri.
234 cl = baserlib.GetClient()
235 # Convert the list of lists to the list of ids
236 result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
237 return baserlib.BuildUriList(result, "/2/jobs/%s",
238 uri_fields=("id", "uri"))
241 class R_2_jobs_id(baserlib.R_Generic):
242 """/2/jobs/[job_id] resource.
246 """Returns a job status.
248 @return: a dictionary with job parameters.
250 - id: job ID as a number
251 - status: current job status as a string
252 - ops: involved OpCodes as a list of dictionaries for each
254 - opstatus: OpCodes status as a list
255 - opresult: OpCodes results as a list of lists
258 fields = ["id", "ops", "status", "summary",
259 "opstatus", "opresult", "oplog",
260 "received_ts", "start_ts", "end_ts",
262 job_id = self.items[0]
263 result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
265 raise http.HttpNotFound()
266 return baserlib.MapFields(fields, result)
269 """Cancel not-yet-started job.
272 job_id = self.items[0]
273 result = baserlib.GetClient().CancelJob(job_id)
277 class R_2_jobs_id_wait(baserlib.R_Generic):
278 """/2/jobs/[job_id]/wait resource.
281 # WaitForJobChange provides access to sensitive information and blocks
282 # machine resources (it's a blocking RAPI call), hence restricting access.
283 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
286 """Waits for job changes.
289 job_id = self.items[0]
291 fields = self.getBodyParameter("fields")
292 prev_job_info = self.getBodyParameter("previous_job_info", None)
293 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
295 if not isinstance(fields, list):
296 raise http.HttpBadRequest("The 'fields' parameter should be a list")
298 if not (prev_job_info is None or isinstance(prev_job_info, list)):
299 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
302 if not (prev_log_serial is None or
303 isinstance(prev_log_serial, (int, long))):
304 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
307 client = baserlib.GetClient()
308 result = client.WaitForJobChangeOnce(job_id, fields,
309 prev_job_info, prev_log_serial,
310 timeout=_WFJC_TIMEOUT)
312 raise http.HttpNotFound()
314 if result == constants.JOB_NOTCHANGED:
318 (job_info, log_entries) = result
321 "job_info": job_info,
322 "log_entries": log_entries,
326 class R_2_nodes(baserlib.R_Generic):
327 """/2/nodes resource.
331 """Returns a list of all nodes.
334 client = baserlib.GetClient()
337 bulkdata = client.QueryNodes([], N_FIELDS, False)
338 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
340 nodesdata = client.QueryNodes([], ["name"], False)
341 nodeslist = [row[0] for row in nodesdata]
342 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
343 uri_fields=("id", "uri"))
346 class R_2_nodes_name(baserlib.R_Generic):
347 """/2/nodes/[node_name] resource.
351 """Send information about a node.
354 node_name = self.items[0]
355 client = baserlib.GetClient()
357 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
358 names=[node_name], fields=N_FIELDS,
359 use_locking=self.useLocking())
361 return baserlib.MapFields(N_FIELDS, result[0])
364 class R_2_nodes_name_role(baserlib.R_Generic):
365 """ /2/nodes/[node_name]/role resource.
369 """Returns the current node role.
374 node_name = self.items[0]
375 client = baserlib.GetClient()
376 result = client.QueryNodes(names=[node_name], fields=["role"],
377 use_locking=self.useLocking())
379 return _NR_MAP[result[0][0]]
382 """Sets the node role.
387 if not isinstance(self.request_body, basestring):
388 raise http.HttpBadRequest("Invalid body contents, not a string")
390 node_name = self.items[0]
391 role = self.request_body
393 if role == _NR_REGULAR:
398 elif role == _NR_MASTER_CANDIATE:
400 offline = drained = None
402 elif role == _NR_DRAINED:
404 candidate = offline = None
406 elif role == _NR_OFFLINE:
408 candidate = drained = None
411 raise http.HttpBadRequest("Can't set '%s' role" % role)
413 op = opcodes.OpNodeSetParams(node_name=node_name,
414 master_candidate=candidate,
417 force=bool(self.useForce()))
419 return baserlib.SubmitJob([op])
422 class R_2_nodes_name_evacuate(baserlib.R_Generic):
423 """/2/nodes/[node_name]/evacuate resource.
427 """Evacuate all instances off a node.
430 op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
431 "node_name": self.items[0],
432 "dry_run": self.dryRun(),
435 return baserlib.SubmitJob([op])
438 class R_2_nodes_name_migrate(baserlib.R_Generic):
439 """/2/nodes/[node_name]/migrate resource.
443 """Migrate all primary instances from a node.
446 node_name = self.items[0]
449 # Support old-style requests
450 if "live" in self.queryargs and "mode" in self.queryargs:
451 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
454 if "live" in self.queryargs:
455 if self._checkIntVariable("live", default=1):
456 mode = constants.HT_MIGRATION_LIVE
458 mode = constants.HT_MIGRATION_NONLIVE
460 mode = self._checkStringVariable("mode", default=None)
466 data = self.request_body
468 op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
469 "node_name": node_name,
472 return baserlib.SubmitJob([op])
475 class R_2_nodes_name_storage(baserlib.R_Generic):
476 """/2/nodes/[node_name]/storage resource.
479 # LUNodeQueryStorage acquires locks, hence restricting access to GET
480 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
483 node_name = self.items[0]
485 storage_type = self._checkStringVariable("storage_type", None)
487 raise http.HttpBadRequest("Missing the required 'storage_type'"
490 output_fields = self._checkStringVariable("output_fields", None)
491 if not output_fields:
492 raise http.HttpBadRequest("Missing the required 'output_fields'"
495 op = opcodes.OpNodeQueryStorage(nodes=[node_name],
496 storage_type=storage_type,
497 output_fields=output_fields.split(","))
498 return baserlib.SubmitJob([op])
501 class R_2_nodes_name_storage_modify(baserlib.R_Generic):
502 """/2/nodes/[node_name]/storage/modify resource.
506 node_name = self.items[0]
508 storage_type = self._checkStringVariable("storage_type", None)
510 raise http.HttpBadRequest("Missing the required 'storage_type'"
513 name = self._checkStringVariable("name", None)
515 raise http.HttpBadRequest("Missing the required 'name'"
520 if "allocatable" in self.queryargs:
521 changes[constants.SF_ALLOCATABLE] = \
522 bool(self._checkIntVariable("allocatable", default=1))
524 op = opcodes.OpNodeModifyStorage(node_name=node_name,
525 storage_type=storage_type,
528 return baserlib.SubmitJob([op])
531 class R_2_nodes_name_storage_repair(baserlib.R_Generic):
532 """/2/nodes/[node_name]/storage/repair resource.
536 node_name = self.items[0]
538 storage_type = self._checkStringVariable("storage_type", None)
540 raise http.HttpBadRequest("Missing the required 'storage_type'"
543 name = self._checkStringVariable("name", None)
545 raise http.HttpBadRequest("Missing the required 'name'"
548 op = opcodes.OpRepairNodeStorage(node_name=node_name,
549 storage_type=storage_type,
551 return baserlib.SubmitJob([op])
554 def _ParseCreateGroupRequest(data, dry_run):
555 """Parses a request for creating a node group.
557 @rtype: L{opcodes.OpGroupAdd}
558 @return: Group creation opcode
566 "name": "group_name",
569 return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
573 class R_2_groups(baserlib.R_Generic):
574 """/2/groups resource.
578 """Returns a list of all node groups.
581 client = baserlib.GetClient()
584 bulkdata = client.QueryGroups([], G_FIELDS, False)
585 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
587 data = client.QueryGroups([], ["name"], False)
588 groupnames = [row[0] for row in data]
589 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
590 uri_fields=("name", "uri"))
593 """Create a node group.
598 baserlib.CheckType(self.request_body, dict, "Body contents")
599 op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
600 return baserlib.SubmitJob([op])
603 class R_2_groups_name(baserlib.R_Generic):
604 """/2/groups/[group_name] resource.
608 """Send information about a node group.
611 group_name = self.items[0]
612 client = baserlib.GetClient()
614 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
615 names=[group_name], fields=G_FIELDS,
616 use_locking=self.useLocking())
618 return baserlib.MapFields(G_FIELDS, result[0])
621 """Delete a node group.
624 op = opcodes.OpGroupRemove(group_name=self.items[0],
625 dry_run=bool(self.dryRun()))
627 return baserlib.SubmitJob([op])
630 def _ParseModifyGroupRequest(name, data):
631 """Parses a request for modifying a node group.
633 @rtype: L{opcodes.OpGroupSetParams}
634 @return: Group modify opcode
637 return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
643 class R_2_groups_name_modify(baserlib.R_Generic):
644 """/2/groups/[group_name]/modify resource.
648 """Changes some parameters of node group.
653 baserlib.CheckType(self.request_body, dict, "Body contents")
655 op = _ParseModifyGroupRequest(self.items[0], self.request_body)
657 return baserlib.SubmitJob([op])
660 def _ParseRenameGroupRequest(name, data, dry_run):
661 """Parses a request for renaming a node group.
664 @param name: name of the node group to rename
666 @param data: the body received by the rename request
668 @param dry_run: whether to perform a dry run
670 @rtype: L{opcodes.OpGroupRename}
671 @return: Node group rename opcode
674 return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
680 class R_2_groups_name_rename(baserlib.R_Generic):
681 """/2/groups/[group_name]/rename resource.
685 """Changes the name of a node group.
690 baserlib.CheckType(self.request_body, dict, "Body contents")
691 op = _ParseRenameGroupRequest(self.items[0], self.request_body,
693 return baserlib.SubmitJob([op])
696 class R_2_groups_name_assign_nodes(baserlib.R_Generic):
697 """/2/groups/[group_name]/assign-nodes resource.
701 """Assigns nodes to a group.
706 op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
707 "group_name": self.items[0],
708 "dry_run": self.dryRun(),
709 "force": self.useForce(),
712 return baserlib.SubmitJob([op])
715 def _ParseInstanceCreateRequestVersion1(data, dry_run):
716 """Parses an instance creation request version 1.
718 @rtype: L{opcodes.OpInstanceCreate}
719 @return: Instance creation opcode
728 "name": "instance_name",
731 return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
735 class R_2_instances(baserlib.R_Generic):
736 """/2/instances resource.
740 """Returns a list of all available instances.
743 client = baserlib.GetClient()
745 use_locking = self.useLocking()
747 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
748 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
750 instancesdata = client.QueryInstances([], ["name"], use_locking)
751 instanceslist = [row[0] for row in instancesdata]
752 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
753 uri_fields=("id", "uri"))
756 """Create an instance.
761 if not isinstance(self.request_body, dict):
762 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
764 # Default to request data version 0
765 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
767 if data_version == 0:
768 raise http.HttpBadRequest("Instance creation request version 0 is no"
770 elif data_version == 1:
771 data = self.request_body.copy()
772 # Remove "__version__"
773 data.pop(_REQ_DATA_VERSION, None)
774 op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
776 raise http.HttpBadRequest("Unsupported request data version %s" %
779 return baserlib.SubmitJob([op])
782 class R_2_instances_name(baserlib.R_Generic):
783 """/2/instances/[instance_name] resource.
787 """Send information about an instance.
790 client = baserlib.GetClient()
791 instance_name = self.items[0]
793 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
794 names=[instance_name],
796 use_locking=self.useLocking())
798 return baserlib.MapFields(I_FIELDS, result[0])
801 """Delete an instance.
804 op = opcodes.OpInstanceRemove(instance_name=self.items[0],
805 ignore_failures=False,
806 dry_run=bool(self.dryRun()))
807 return baserlib.SubmitJob([op])
810 class R_2_instances_name_info(baserlib.R_Generic):
811 """/2/instances/[instance_name]/info resource.
815 """Request detailed instance information.
818 instance_name = self.items[0]
819 static = bool(self._checkIntVariable("static", default=0))
821 op = opcodes.OpInstanceQueryData(instances=[instance_name],
823 return baserlib.SubmitJob([op])
826 class R_2_instances_name_reboot(baserlib.R_Generic):
827 """/2/instances/[instance_name]/reboot resource.
829 Implements an instance reboot.
833 """Reboot an instance.
835 The URI takes type=[hard|soft|full] and
836 ignore_secondaries=[False|True] parameters.
839 instance_name = self.items[0]
840 reboot_type = self.queryargs.get('type',
841 [constants.INSTANCE_REBOOT_HARD])[0]
842 ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
843 op = opcodes.OpInstanceReboot(instance_name=instance_name,
844 reboot_type=reboot_type,
845 ignore_secondaries=ignore_secondaries,
846 dry_run=bool(self.dryRun()))
848 return baserlib.SubmitJob([op])
851 class R_2_instances_name_startup(baserlib.R_Generic):
852 """/2/instances/[instance_name]/startup resource.
854 Implements an instance startup.
858 """Startup an instance.
860 The URI takes force=[False|True] parameter to start the instance
861 if even if secondary disks are failing.
864 instance_name = self.items[0]
865 force_startup = bool(self._checkIntVariable('force'))
866 no_remember = bool(self._checkIntVariable('no_remember'))
867 op = opcodes.OpInstanceStartup(instance_name=instance_name,
869 dry_run=bool(self.dryRun()),
870 no_remember=no_remember)
872 return baserlib.SubmitJob([op])
875 def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
876 """Parses a request for an instance shutdown.
878 @rtype: L{opcodes.OpInstanceShutdown}
879 @return: Instance shutdown opcode
882 return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
883 "instance_name": name,
885 "no_remember": no_remember,
889 class R_2_instances_name_shutdown(baserlib.R_Generic):
890 """/2/instances/[instance_name]/shutdown resource.
892 Implements an instance shutdown.
896 """Shutdown an instance.
901 baserlib.CheckType(self.request_body, dict, "Body contents")
903 no_remember = bool(self._checkIntVariable('no_remember'))
904 op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
905 bool(self.dryRun()), no_remember)
907 return baserlib.SubmitJob([op])
910 def _ParseInstanceReinstallRequest(name, data):
911 """Parses a request for reinstalling an instance.
914 if not isinstance(data, dict):
915 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
917 ostype = baserlib.CheckParameter(data, "os", default=None)
918 start = baserlib.CheckParameter(data, "start", exptype=bool,
920 osparams = baserlib.CheckParameter(data, "osparams", default=None)
923 opcodes.OpInstanceShutdown(instance_name=name),
924 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
929 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
934 class R_2_instances_name_reinstall(baserlib.R_Generic):
935 """/2/instances/[instance_name]/reinstall resource.
937 Implements an instance reinstall.
941 """Reinstall an instance.
943 The URI takes os=name and nostartup=[0|1] optional
944 parameters. By default, the instance will be started
948 if self.request_body:
950 raise http.HttpBadRequest("Can't combine query and body parameters")
952 body = self.request_body
954 # Legacy interface, do not modify/extend
956 "os": self._checkStringVariable("os"),
957 "start": not self._checkIntVariable("nostartup"),
962 ops = _ParseInstanceReinstallRequest(self.items[0], body)
964 return baserlib.SubmitJob(ops)
967 def _ParseInstanceReplaceDisksRequest(name, data):
968 """Parses a request for an instance export.
970 @rtype: L{opcodes.OpInstanceReplaceDisks}
971 @return: Instance export opcode
975 "instance_name": name,
980 raw_disks = data["disks"]
984 if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
985 # Backwards compatibility for strings of the format "1, 2, 3"
987 data["disks"] = [int(part) for part in raw_disks.split(",")]
988 except (TypeError, ValueError), err:
989 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
991 return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
994 class R_2_instances_name_replace_disks(baserlib.R_Generic):
995 """/2/instances/[instance_name]/replace-disks resource.
999 """Replaces disks on an instance.
1002 op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
1004 return baserlib.SubmitJob([op])
1007 class R_2_instances_name_activate_disks(baserlib.R_Generic):
1008 """/2/instances/[instance_name]/activate-disks resource.
1012 """Activate disks for an instance.
1014 The URI might contain ignore_size to ignore current recorded size.
1017 instance_name = self.items[0]
1018 ignore_size = bool(self._checkIntVariable('ignore_size'))
1020 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1021 ignore_size=ignore_size)
1023 return baserlib.SubmitJob([op])
1026 class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1027 """/2/instances/[instance_name]/deactivate-disks resource.
1031 """Deactivate disks for an instance.
1034 instance_name = self.items[0]
1036 op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1038 return baserlib.SubmitJob([op])
1041 class R_2_instances_name_prepare_export(baserlib.R_Generic):
1042 """/2/instances/[instance_name]/prepare-export resource.
1046 """Prepares an export for an instance.
1051 instance_name = self.items[0]
1052 mode = self._checkStringVariable("mode")
1054 op = opcodes.OpBackupPrepare(instance_name=instance_name,
1057 return baserlib.SubmitJob([op])
1060 def _ParseExportInstanceRequest(name, data):
1061 """Parses a request for an instance export.
1063 @rtype: L{opcodes.OpBackupExport}
1064 @return: Instance export opcode
1067 # Rename "destination" to "target_node"
1069 data["target_node"] = data.pop("destination")
1073 return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1074 "instance_name": name,
1078 class R_2_instances_name_export(baserlib.R_Generic):
1079 """/2/instances/[instance_name]/export resource.
1083 """Exports an instance.
1088 if not isinstance(self.request_body, dict):
1089 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1091 op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1093 return baserlib.SubmitJob([op])
1096 def _ParseMigrateInstanceRequest(name, data):
1097 """Parses a request for an instance migration.
1099 @rtype: L{opcodes.OpInstanceMigrate}
1100 @return: Instance migration opcode
1103 return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1104 "instance_name": name,
1108 class R_2_instances_name_migrate(baserlib.R_Generic):
1109 """/2/instances/[instance_name]/migrate resource.
1113 """Migrates an instance.
1118 baserlib.CheckType(self.request_body, dict, "Body contents")
1120 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1122 return baserlib.SubmitJob([op])
1125 class R_2_instances_name_failover(baserlib.R_Generic):
1126 """/2/instances/[instance_name]/failover resource.
1130 """Does a failover of an instance.
1135 baserlib.CheckType(self.request_body, dict, "Body contents")
1137 op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
1138 "instance_name": self.items[0],
1141 return baserlib.SubmitJob([op])
1144 def _ParseRenameInstanceRequest(name, data):
1145 """Parses a request for renaming an instance.
1147 @rtype: L{opcodes.OpInstanceRename}
1148 @return: Instance rename opcode
1151 return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1152 "instance_name": name,
1156 class R_2_instances_name_rename(baserlib.R_Generic):
1157 """/2/instances/[instance_name]/rename resource.
1161 """Changes the name of an instance.
1166 baserlib.CheckType(self.request_body, dict, "Body contents")
1168 op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1170 return baserlib.SubmitJob([op])
1173 def _ParseModifyInstanceRequest(name, data):
1174 """Parses a request for modifying an instance.
1176 @rtype: L{opcodes.OpInstanceSetParams}
1177 @return: Instance modify opcode
1180 return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1181 "instance_name": name,
1185 class R_2_instances_name_modify(baserlib.R_Generic):
1186 """/2/instances/[instance_name]/modify resource.
1190 """Changes some parameters of an instance.
1195 baserlib.CheckType(self.request_body, dict, "Body contents")
1197 op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1199 return baserlib.SubmitJob([op])
1202 class R_2_instances_name_disk_grow(baserlib.R_Generic):
1203 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1207 """Increases the size of an instance disk.
1212 op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1213 "instance_name": self.items[0],
1214 "disk": int(self.items[1]),
1217 return baserlib.SubmitJob([op])
1220 class R_2_instances_name_console(baserlib.R_Generic):
1221 """/2/instances/[instance_name]/console resource.
1224 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1227 """Request information for connecting to instance's console.
1229 @return: Serialized instance console description, see
1230 L{objects.InstanceConsole}
1233 client = baserlib.GetClient()
1235 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1238 raise http.HttpServiceUnavailable("Instance console unavailable")
1240 assert isinstance(console, dict)
1244 def _GetQueryFields(args):
1249 fields = args["fields"]
1251 raise http.HttpBadRequest("Missing 'fields' query argument")
1253 return _SplitQueryFields(fields[0])
1256 def _SplitQueryFields(fields):
1260 return [i.strip() for i in fields.split(",")]
1263 class R_2_query(baserlib.R_Generic):
1264 """/2/query/[resource] resource.
1267 # Results might contain sensitive information
1268 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1270 def _Query(self, fields, filter_):
1271 return baserlib.GetClient().Query(self.items[0], fields, filter_).ToDict()
1274 """Returns resource information.
1276 @return: Query result, see L{objects.QueryResponse}
1279 return self._Query(_GetQueryFields(self.queryargs), None)
1282 """Submits job querying for resources.
1284 @return: Query result, see L{objects.QueryResponse}
1287 body = self.request_body
1289 baserlib.CheckType(body, dict, "Body contents")
1292 fields = body["fields"]
1294 fields = _GetQueryFields(self.queryargs)
1296 return self._Query(fields, self.request_body.get("filter", None))
1299 class R_2_query_fields(baserlib.R_Generic):
1300 """/2/query/[resource]/fields resource.
1304 """Retrieves list of available fields for a resource.
1306 @return: List of serialized L{objects.QueryFieldDefinition}
1310 raw_fields = self.queryargs["fields"]
1314 fields = _SplitQueryFields(raw_fields[0])
1316 return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict()
1319 class _R_Tags(baserlib.R_Generic):
1320 """ Quasiclass for tagging resources
1322 Manages tags. When inheriting this class you must define the
1328 def __init__(self, items, queryargs, req):
1329 """A tag resource constructor.
1331 We have to override the default to sort out cluster naming case.
1334 baserlib.R_Generic.__init__(self, items, queryargs, req)
1336 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1339 self.name = items[0]
1342 """Returns a list of tags.
1344 Example: ["tag1", "tag2", "tag3"]
1347 # pylint: disable-msg=W0212
1348 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1351 """Add a set of tags.
1353 The request as a list of strings should be PUT to this URI. And
1354 you'll have back a job id.
1357 # pylint: disable-msg=W0212
1358 if 'tag' not in self.queryargs:
1359 raise http.HttpBadRequest("Please specify tag(s) to add using the"
1360 " the 'tag' parameter")
1361 return baserlib._Tags_PUT(self.TAG_LEVEL,
1362 self.queryargs['tag'], name=self.name,
1363 dry_run=bool(self.dryRun()))
1368 In order to delete a set of tags, the DELETE
1369 request should be addressed to URI like:
1370 /tags?tag=[tag]&tag=[tag]
1373 # pylint: disable-msg=W0212
1374 if 'tag' not in self.queryargs:
1375 # no we not gonna delete all tags
1376 raise http.HttpBadRequest("Cannot delete all tags - please specify"
1377 " tag(s) using the 'tag' parameter")
1378 return baserlib._Tags_DELETE(self.TAG_LEVEL,
1379 self.queryargs['tag'],
1381 dry_run=bool(self.dryRun()))
1384 class R_2_instances_name_tags(_R_Tags):
1385 """ /2/instances/[instance_name]/tags resource.
1387 Manages per-instance tags.
1390 TAG_LEVEL = constants.TAG_INSTANCE
1393 class R_2_nodes_name_tags(_R_Tags):
1394 """ /2/nodes/[node_name]/tags resource.
1396 Manages per-node tags.
1399 TAG_LEVEL = constants.TAG_NODE
1402 class R_2_groups_name_tags(_R_Tags):
1403 """ /2/groups/[group_name]/tags resource.
1405 Manages per-nodegroup tags.
1408 TAG_LEVEL = constants.TAG_NODEGROUP
1411 class R_2_tags(_R_Tags):
1412 """ /2/tags resource.
1414 Manages cluster tags.
1417 TAG_LEVEL = constants.TAG_CLUSTER