4 # Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API version 2 baserlib.library.
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
31 To be in context of this module for instance creation POST on
32 /2/instances is legitim while PUT would be not, due to it does create a
33 new entity and not just replace /2/instances with it.
35 So when adding new methods, if they are operating on the URI entity itself,
36 PUT should be prefered over POST.
40 # pylint: disable-msg=C0103
42 # C0103: Invalid name, since the R_* names are not conforming
44 from ganeti import opcodes
45 from ganeti import http
46 from ganeti import constants
47 from ganeti import cli
48 from ganeti import utils
49 from ganeti import rapi
50 from ganeti.rapi import baserlib
53 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54 I_FIELDS = ["name", "admin_state", "os",
57 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
59 "disk.sizes", "disk_usage",
60 "beparams", "hvparams",
61 "oper_state", "oper_ram", "oper_vcpus", "status",
64 N_FIELDS = ["name", "offline", "master_candidate", "drained",
66 "mtotal", "mnode", "mfree",
67 "pinst_cnt", "sinst_cnt",
68 "ctotal", "cnodes", "csockets",
70 "pinst_list", "sinst_list",
73 _NR_DRAINED = "drained"
74 _NR_MASTER_CANDIATE = "master-candidate"
76 _NR_OFFLINE = "offline"
77 _NR_REGULAR = "regular"
81 "C": _NR_MASTER_CANDIATE,
87 # Request data version field
88 _REQ_DATA_VERSION = "__version__"
90 # Feature string for instance creation request data version 1
91 _INST_CREATE_REQV1 = "instance-create-reqv1"
93 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
97 class R_version(baserlib.R_Generic):
100 This resource should be used to determine the remote API version and
101 to adapt clients accordingly.
106 """Returns the remote API version.
109 return constants.RAPI_VERSION
112 class R_2_info(baserlib.R_Generic):
118 """Returns cluster information.
121 client = baserlib.GetClient()
122 return client.QueryClusterInfo()
125 class R_2_features(baserlib.R_Generic):
126 """/2/features resource.
131 """Returns list of optional RAPI features implemented.
134 return [_INST_CREATE_REQV1]
137 class R_2_os(baserlib.R_Generic):
143 """Return a list of all OSes.
145 Can return error 500 in case of a problem.
147 Example: ["debian-etch"]
150 cl = baserlib.GetClient()
151 op = opcodes.OpDiagnoseOS(output_fields=["name", "valid", "variants"],
153 job_id = baserlib.SubmitJob([op], cl)
154 # we use custom feedback function, instead of print we log the status
155 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
156 diagnose_data = result[0]
158 if not isinstance(diagnose_data, list):
159 raise http.HttpBadGateway(message="Can't get OS list")
162 for (name, valid, variants) in diagnose_data:
164 os_names.extend(cli.CalculateOSNames(name, variants))
169 class R_2_redist_config(baserlib.R_Generic):
170 """/2/redistribute-config resource.
175 """Redistribute configuration to all nodes.
178 return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
181 class R_2_jobs(baserlib.R_Generic):
187 """Returns a dictionary of jobs.
189 @return: a dictionary with jobs id and uri.
193 cl = baserlib.GetClient()
194 # Convert the list of lists to the list of ids
195 result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
196 return baserlib.BuildUriList(result, "/2/jobs/%s",
197 uri_fields=("id", "uri"))
200 class R_2_jobs_id(baserlib.R_Generic):
201 """/2/jobs/[job_id] resource.
205 """Returns a job status.
207 @return: a dictionary with job parameters.
209 - id: job ID as a number
210 - status: current job status as a string
211 - ops: involved OpCodes as a list of dictionaries for each
213 - opstatus: OpCodes status as a list
214 - opresult: OpCodes results as a list of lists
217 fields = ["id", "ops", "status", "summary",
218 "opstatus", "opresult", "oplog",
219 "received_ts", "start_ts", "end_ts",
221 job_id = self.items[0]
222 result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
224 raise http.HttpNotFound()
225 return baserlib.MapFields(fields, result)
228 """Cancel not-yet-started job.
231 job_id = self.items[0]
232 result = baserlib.GetClient().CancelJob(job_id)
236 class R_2_jobs_id_wait(baserlib.R_Generic):
237 """/2/jobs/[job_id]/wait resource.
240 # WaitForJobChange provides access to sensitive information and blocks
241 # machine resources (it's a blocking RAPI call), hence restricting access.
242 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
245 """Waits for job changes.
248 job_id = self.items[0]
250 fields = self.getBodyParameter("fields")
251 prev_job_info = self.getBodyParameter("previous_job_info", None)
252 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
254 if not isinstance(fields, list):
255 raise http.HttpBadRequest("The 'fields' parameter should be a list")
257 if not (prev_job_info is None or isinstance(prev_job_info, list)):
258 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
261 if not (prev_log_serial is None or
262 isinstance(prev_log_serial, (int, long))):
263 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
266 client = baserlib.GetClient()
267 result = client.WaitForJobChangeOnce(job_id, fields,
268 prev_job_info, prev_log_serial,
269 timeout=_WFJC_TIMEOUT)
271 raise http.HttpNotFound()
273 if result == constants.JOB_NOTCHANGED:
277 (job_info, log_entries) = result
280 "job_info": job_info,
281 "log_entries": log_entries,
285 class R_2_nodes(baserlib.R_Generic):
286 """/2/nodes resource.
290 """Returns a list of all nodes.
293 client = baserlib.GetClient()
296 bulkdata = client.QueryNodes([], N_FIELDS, False)
297 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
299 nodesdata = client.QueryNodes([], ["name"], False)
300 nodeslist = [row[0] for row in nodesdata]
301 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
302 uri_fields=("id", "uri"))
305 class R_2_nodes_name(baserlib.R_Generic):
306 """/2/nodes/[node_name] resources.
310 """Send information about a node.
313 node_name = self.items[0]
314 client = baserlib.GetClient()
316 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
317 names=[node_name], fields=N_FIELDS,
318 use_locking=self.useLocking())
320 return baserlib.MapFields(N_FIELDS, result[0])
323 class R_2_nodes_name_role(baserlib.R_Generic):
324 """ /2/nodes/[node_name]/role resource.
328 """Returns the current node role.
333 node_name = self.items[0]
334 client = baserlib.GetClient()
335 result = client.QueryNodes(names=[node_name], fields=["role"],
336 use_locking=self.useLocking())
338 return _NR_MAP[result[0][0]]
341 """Sets the node role.
346 if not isinstance(self.request_body, basestring):
347 raise http.HttpBadRequest("Invalid body contents, not a string")
349 node_name = self.items[0]
350 role = self.request_body
352 if role == _NR_REGULAR:
357 elif role == _NR_MASTER_CANDIATE:
359 offline = drained = None
361 elif role == _NR_DRAINED:
363 candidate = offline = None
365 elif role == _NR_OFFLINE:
367 candidate = drained = None
370 raise http.HttpBadRequest("Can't set '%s' role" % role)
372 op = opcodes.OpSetNodeParams(node_name=node_name,
373 master_candidate=candidate,
376 force=bool(self.useForce()))
378 return baserlib.SubmitJob([op])
381 class R_2_nodes_name_evacuate(baserlib.R_Generic):
382 """/2/nodes/[node_name]/evacuate resource.
386 """Evacuate all secondary instances off a node.
389 node_name = self.items[0]
390 remote_node = self._checkStringVariable("remote_node", default=None)
391 iallocator = self._checkStringVariable("iallocator", default=None)
392 early_r = bool(self._checkIntVariable("early_release", default=0))
393 dry_run = bool(self.dryRun())
395 cl = baserlib.GetClient()
397 op = opcodes.OpNodeEvacuationStrategy(nodes=[node_name],
398 iallocator=iallocator,
399 remote_node=remote_node)
401 job_id = baserlib.SubmitJob([op], cl)
402 # we use custom feedback function, instead of print we log the status
403 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
406 for iname, node in result:
410 op = opcodes.OpReplaceDisks(instance_name=iname,
411 remote_node=node, disks=[],
412 mode=constants.REPLACE_DISK_CHG,
413 early_release=early_r)
414 jid = baserlib.SubmitJob([op])
415 jobs.append((jid, iname, node))
420 class R_2_nodes_name_migrate(baserlib.R_Generic):
421 """/2/nodes/[node_name]/migrate resource.
425 """Migrate all primary instances from a node.
428 node_name = self.items[0]
430 if "live" in self.queryargs and "mode" in self.queryargs:
431 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
433 elif "live" in self.queryargs:
434 if self._checkIntVariable("live", default=1):
435 mode = constants.HT_MIGRATION_LIVE
437 mode = constants.HT_MIGRATION_NONLIVE
439 mode = self._checkStringVariable("mode", default=None)
441 op = opcodes.OpMigrateNode(node_name=node_name, mode=mode)
443 return baserlib.SubmitJob([op])
446 class R_2_nodes_name_storage(baserlib.R_Generic):
447 """/2/nodes/[node_name]/storage ressource.
450 # LUQueryNodeStorage acquires locks, hence restricting access to GET
451 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
454 node_name = self.items[0]
456 storage_type = self._checkStringVariable("storage_type", None)
458 raise http.HttpBadRequest("Missing the required 'storage_type'"
461 output_fields = self._checkStringVariable("output_fields", None)
462 if not output_fields:
463 raise http.HttpBadRequest("Missing the required 'output_fields'"
466 op = opcodes.OpQueryNodeStorage(nodes=[node_name],
467 storage_type=storage_type,
468 output_fields=output_fields.split(","))
469 return baserlib.SubmitJob([op])
472 class R_2_nodes_name_storage_modify(baserlib.R_Generic):
473 """/2/nodes/[node_name]/storage/modify ressource.
477 node_name = self.items[0]
479 storage_type = self._checkStringVariable("storage_type", None)
481 raise http.HttpBadRequest("Missing the required 'storage_type'"
484 name = self._checkStringVariable("name", None)
486 raise http.HttpBadRequest("Missing the required 'name'"
491 if "allocatable" in self.queryargs:
492 changes[constants.SF_ALLOCATABLE] = \
493 bool(self._checkIntVariable("allocatable", default=1))
495 op = opcodes.OpModifyNodeStorage(node_name=node_name,
496 storage_type=storage_type,
499 return baserlib.SubmitJob([op])
502 class R_2_nodes_name_storage_repair(baserlib.R_Generic):
503 """/2/nodes/[node_name]/storage/repair ressource.
507 node_name = self.items[0]
509 storage_type = self._checkStringVariable("storage_type", None)
511 raise http.HttpBadRequest("Missing the required 'storage_type'"
514 name = self._checkStringVariable("name", None)
516 raise http.HttpBadRequest("Missing the required 'name'"
519 op = opcodes.OpRepairNodeStorage(node_name=node_name,
520 storage_type=storage_type,
522 return baserlib.SubmitJob([op])
525 def _ParseInstanceCreateRequestVersion1(data, dry_run):
526 """Parses an instance creation request version 1.
528 @rtype: L{opcodes.OpCreateInstance}
529 @return: Instance creation opcode
533 disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
536 for idx, i in enumerate(disks_input):
537 baserlib.CheckType(i, dict, "Disk %d specification" % idx)
541 size = i[constants.IDISK_SIZE]
543 raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
547 constants.IDISK_SIZE: size,
550 # Optional disk access mode
552 disk_access = i[constants.IDISK_MODE]
556 disk[constants.IDISK_MODE] = disk_access
560 assert len(disks_input) == len(disks)
563 nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
566 for idx, i in enumerate(nics_input):
567 baserlib.CheckType(i, dict, "NIC %d specification" % idx)
571 for field in constants.INIC_PARAMS:
581 assert len(nics_input) == len(nics)
584 hvparams = baserlib.CheckParameter(data, "hvparams", default={})
585 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
587 beparams = baserlib.CheckParameter(data, "beparams", default={})
588 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
590 return opcodes.OpCreateInstance(
591 mode=baserlib.CheckParameter(data, "mode"),
592 instance_name=baserlib.CheckParameter(data, "name"),
593 os_type=baserlib.CheckParameter(data, "os"),
594 osparams=baserlib.CheckParameter(data, "osparams", default={}),
595 force_variant=baserlib.CheckParameter(data, "force_variant",
597 pnode=baserlib.CheckParameter(data, "pnode", default=None),
598 snode=baserlib.CheckParameter(data, "snode", default=None),
599 disk_template=baserlib.CheckParameter(data, "disk_template"),
602 src_node=baserlib.CheckParameter(data, "src_node", default=None),
603 src_path=baserlib.CheckParameter(data, "src_path", default=None),
604 start=baserlib.CheckParameter(data, "start", default=True),
606 ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
607 name_check=baserlib.CheckParameter(data, "name_check", default=True),
608 file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
610 file_driver=baserlib.CheckParameter(data, "file_driver",
611 default=constants.FD_LOOP),
612 source_handshake=baserlib.CheckParameter(data, "source_handshake",
614 source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca",
616 source_instance_name=baserlib.CheckParameter(data, "source_instance_name",
618 iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
619 hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
626 class R_2_instances(baserlib.R_Generic):
627 """/2/instances resource.
631 """Returns a list of all available instances.
634 client = baserlib.GetClient()
636 use_locking = self.useLocking()
638 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
639 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
641 instancesdata = client.QueryInstances([], ["name"], use_locking)
642 instanceslist = [row[0] for row in instancesdata]
643 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
644 uri_fields=("id", "uri"))
646 def _ParseVersion0CreateRequest(self):
647 """Parses an instance creation request version 0.
649 Request data version 0 is deprecated and should not be used anymore.
651 @rtype: L{opcodes.OpCreateInstance}
652 @return: Instance creation opcode
655 # Do not modify anymore, request data version 0 is deprecated
656 beparams = baserlib.MakeParamsDict(self.request_body,
657 constants.BES_PARAMETERS)
658 hvparams = baserlib.MakeParamsDict(self.request_body,
659 constants.HVS_PARAMETERS)
660 fn = self.getBodyParameter
663 disk_data = fn('disks')
664 if not isinstance(disk_data, list):
665 raise http.HttpBadRequest("The 'disks' parameter should be a list")
667 for idx, d in enumerate(disk_data):
668 if not isinstance(d, int):
669 raise http.HttpBadRequest("Disk %d specification wrong: should"
670 " be an integer" % idx)
671 disks.append({"size": d})
673 # nic processing (one nic only)
674 nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
675 if fn("ip", None) is not None:
676 nics[0]["ip"] = fn("ip")
677 if fn("mode", None) is not None:
678 nics[0]["mode"] = fn("mode")
679 if fn("link", None) is not None:
680 nics[0]["link"] = fn("link")
681 if fn("bridge", None) is not None:
682 nics[0]["bridge"] = fn("bridge")
684 # Do not modify anymore, request data version 0 is deprecated
685 return opcodes.OpCreateInstance(
686 mode=constants.INSTANCE_CREATE,
687 instance_name=fn('name'),
689 disk_template=fn('disk_template'),
691 pnode=fn('pnode', None),
692 snode=fn('snode', None),
693 iallocator=fn('iallocator', None),
695 start=fn('start', True),
696 ip_check=fn('ip_check', True),
697 name_check=fn('name_check', True),
699 hypervisor=fn('hypervisor', None),
702 file_storage_dir=fn('file_storage_dir', None),
703 file_driver=fn('file_driver', constants.FD_LOOP),
704 dry_run=bool(self.dryRun()),
708 """Create an instance.
713 if not isinstance(self.request_body, dict):
714 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
716 # Default to request data version 0
717 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
719 if data_version == 0:
720 op = self._ParseVersion0CreateRequest()
721 elif data_version == 1:
722 op = _ParseInstanceCreateRequestVersion1(self.request_body,
725 raise http.HttpBadRequest("Unsupported request data version %s" %
728 return baserlib.SubmitJob([op])
731 class R_2_instances_name(baserlib.R_Generic):
732 """/2/instances/[instance_name] resources.
736 """Send information about an instance.
739 client = baserlib.GetClient()
740 instance_name = self.items[0]
742 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
743 names=[instance_name],
745 use_locking=self.useLocking())
747 return baserlib.MapFields(I_FIELDS, result[0])
750 """Delete an instance.
753 op = opcodes.OpRemoveInstance(instance_name=self.items[0],
754 ignore_failures=False,
755 dry_run=bool(self.dryRun()))
756 return baserlib.SubmitJob([op])
759 class R_2_instances_name_info(baserlib.R_Generic):
760 """/2/instances/[instance_name]/info resource.
764 """Request detailed instance information.
767 instance_name = self.items[0]
768 static = bool(self._checkIntVariable("static", default=0))
770 op = opcodes.OpQueryInstanceData(instances=[instance_name],
772 return baserlib.SubmitJob([op])
775 class R_2_instances_name_reboot(baserlib.R_Generic):
776 """/2/instances/[instance_name]/reboot resource.
778 Implements an instance reboot.
782 """Reboot an instance.
784 The URI takes type=[hard|soft|full] and
785 ignore_secondaries=[False|True] parameters.
788 instance_name = self.items[0]
789 reboot_type = self.queryargs.get('type',
790 [constants.INSTANCE_REBOOT_HARD])[0]
791 ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
792 op = opcodes.OpRebootInstance(instance_name=instance_name,
793 reboot_type=reboot_type,
794 ignore_secondaries=ignore_secondaries,
795 dry_run=bool(self.dryRun()))
797 return baserlib.SubmitJob([op])
800 class R_2_instances_name_startup(baserlib.R_Generic):
801 """/2/instances/[instance_name]/startup resource.
803 Implements an instance startup.
807 """Startup an instance.
809 The URI takes force=[False|True] parameter to start the instance
810 if even if secondary disks are failing.
813 instance_name = self.items[0]
814 force_startup = bool(self._checkIntVariable('force'))
815 op = opcodes.OpStartupInstance(instance_name=instance_name,
817 dry_run=bool(self.dryRun()))
819 return baserlib.SubmitJob([op])
822 class R_2_instances_name_shutdown(baserlib.R_Generic):
823 """/2/instances/[instance_name]/shutdown resource.
825 Implements an instance shutdown.
829 """Shutdown an instance.
832 instance_name = self.items[0]
833 op = opcodes.OpShutdownInstance(instance_name=instance_name,
834 dry_run=bool(self.dryRun()))
836 return baserlib.SubmitJob([op])
839 class R_2_instances_name_reinstall(baserlib.R_Generic):
840 """/2/instances/[instance_name]/reinstall resource.
842 Implements an instance reinstall.
846 """Reinstall an instance.
848 The URI takes os=name and nostartup=[0|1] optional
849 parameters. By default, the instance will be started
853 instance_name = self.items[0]
854 ostype = self._checkStringVariable('os')
855 nostartup = self._checkIntVariable('nostartup')
857 opcodes.OpShutdownInstance(instance_name=instance_name),
858 opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
861 ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
863 return baserlib.SubmitJob(ops)
866 class R_2_instances_name_replace_disks(baserlib.R_Generic):
867 """/2/instances/[instance_name]/replace-disks resource.
871 """Replaces disks on an instance.
874 instance_name = self.items[0]
875 remote_node = self._checkStringVariable("remote_node", default=None)
876 mode = self._checkStringVariable("mode", default=None)
877 raw_disks = self._checkStringVariable("disks", default=None)
878 iallocator = self._checkStringVariable("iallocator", default=None)
882 disks = [int(part) for part in raw_disks.split(",")]
883 except ValueError, err:
884 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
888 op = opcodes.OpReplaceDisks(instance_name=instance_name,
889 remote_node=remote_node,
892 iallocator=iallocator)
894 return baserlib.SubmitJob([op])
897 class R_2_instances_name_activate_disks(baserlib.R_Generic):
898 """/2/instances/[instance_name]/activate-disks resource.
902 """Activate disks for an instance.
904 The URI might contain ignore_size to ignore current recorded size.
907 instance_name = self.items[0]
908 ignore_size = bool(self._checkIntVariable('ignore_size'))
910 op = opcodes.OpActivateInstanceDisks(instance_name=instance_name,
911 ignore_size=ignore_size)
913 return baserlib.SubmitJob([op])
916 class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
917 """/2/instances/[instance_name]/deactivate-disks resource.
921 """Deactivate disks for an instance.
924 instance_name = self.items[0]
926 op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
928 return baserlib.SubmitJob([op])
931 class R_2_instances_name_prepare_export(baserlib.R_Generic):
932 """/2/instances/[instance_name]/prepare-export resource.
936 """Prepares an export for an instance.
941 instance_name = self.items[0]
942 mode = self._checkStringVariable("mode")
944 op = opcodes.OpPrepareExport(instance_name=instance_name,
947 return baserlib.SubmitJob([op])
950 def _ParseExportInstanceRequest(name, data):
951 """Parses a request for an instance export.
953 @rtype: L{opcodes.OpExportInstance}
954 @return: Instance export opcode
957 mode = baserlib.CheckParameter(data, "mode",
958 default=constants.EXPORT_MODE_LOCAL)
959 target_node = baserlib.CheckParameter(data, "destination")
960 shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool)
961 remove_instance = baserlib.CheckParameter(data, "remove_instance",
962 exptype=bool, default=False)
963 x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None)
964 destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca",
967 return opcodes.OpExportInstance(instance_name=name,
969 target_node=target_node,
971 remove_instance=remove_instance,
972 x509_key_name=x509_key_name,
973 destination_x509_ca=destination_x509_ca)
976 class R_2_instances_name_export(baserlib.R_Generic):
977 """/2/instances/[instance_name]/export resource.
981 """Exports an instance.
986 if not isinstance(self.request_body, dict):
987 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
989 op = _ParseExportInstanceRequest(self.items[0], self.request_body)
991 return baserlib.SubmitJob([op])
994 def _ParseMigrateInstanceRequest(name, data):
995 """Parses a request for an instance migration.
997 @rtype: L{opcodes.OpMigrateInstance}
998 @return: Instance migration opcode
1001 mode = baserlib.CheckParameter(data, "mode", default=None)
1002 cleanup = baserlib.CheckParameter(data, "cleanup", exptype=bool,
1005 return opcodes.OpMigrateInstance(instance_name=name, mode=mode,
1009 class R_2_instances_name_migrate(baserlib.R_Generic):
1010 """/2/instances/[instance_name]/migrate resource.
1014 """Migrates an instance.
1019 baserlib.CheckType(self.request_body, dict, "Body contents")
1021 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1023 return baserlib.SubmitJob([op])
1026 def _ParseRenameInstanceRequest(name, data):
1027 """Parses a request for renaming an instance.
1029 @rtype: L{opcodes.OpRenameInstance}
1030 @return: Instance rename opcode
1033 new_name = baserlib.CheckParameter(data, "new_name")
1034 ip_check = baserlib.CheckParameter(data, "ip_check", default=True)
1035 name_check = baserlib.CheckParameter(data, "name_check", default=True)
1037 return opcodes.OpRenameInstance(instance_name=name, new_name=new_name,
1038 name_check=name_check, ip_check=ip_check)
1041 class R_2_instances_name_rename(baserlib.R_Generic):
1042 """/2/instances/[instance_name]/rename resource.
1046 """Changes the name of an instance.
1051 baserlib.CheckType(self.request_body, dict, "Body contents")
1053 op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1055 return baserlib.SubmitJob([op])
1058 def _ParseModifyInstanceRequest(name, data):
1059 """Parses a request for modifying an instance.
1061 @rtype: L{opcodes.OpSetInstanceParams}
1062 @return: Instance modify opcode
1065 osparams = baserlib.CheckParameter(data, "osparams", default={})
1066 force = baserlib.CheckParameter(data, "force", default=False)
1067 nics = baserlib.CheckParameter(data, "nics", default=[])
1068 disks = baserlib.CheckParameter(data, "disks", default=[])
1069 disk_template = baserlib.CheckParameter(data, "disk_template", default=None)
1070 remote_node = baserlib.CheckParameter(data, "remote_node", default=None)
1071 os_name = baserlib.CheckParameter(data, "os_name", default=None)
1072 force_variant = baserlib.CheckParameter(data, "force_variant", default=False)
1075 hvparams = baserlib.CheckParameter(data, "hvparams", default={})
1076 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES,
1077 allowed_values=[constants.VALUE_DEFAULT])
1079 beparams = baserlib.CheckParameter(data, "beparams", default={})
1080 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES,
1081 allowed_values=[constants.VALUE_DEFAULT])
1083 return opcodes.OpSetInstanceParams(instance_name=name, hvparams=hvparams,
1084 beparams=beparams, osparams=osparams,
1085 force=force, nics=nics, disks=disks,
1086 disk_template=disk_template,
1087 remote_node=remote_node, os_name=os_name,
1088 force_variant=force_variant)
1091 class R_2_instances_name_modify(baserlib.R_Generic):
1092 """/2/instances/[instance_name]/modify resource.
1096 """Changes some parameters of an instance.
1101 baserlib.CheckType(self.request_body, dict, "Body contents")
1103 op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1105 return baserlib.SubmitJob([op])
1108 class _R_Tags(baserlib.R_Generic):
1109 """ Quasiclass for tagging resources
1111 Manages tags. When inheriting this class you must define the
1117 def __init__(self, items, queryargs, req):
1118 """A tag resource constructor.
1120 We have to override the default to sort out cluster naming case.
1123 baserlib.R_Generic.__init__(self, items, queryargs, req)
1125 if self.TAG_LEVEL != constants.TAG_CLUSTER:
1126 self.name = items[0]
1131 """Returns a list of tags.
1133 Example: ["tag1", "tag2", "tag3"]
1136 # pylint: disable-msg=W0212
1137 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1140 """Add a set of tags.
1142 The request as a list of strings should be PUT to this URI. And
1143 you'll have back a job id.
1146 # pylint: disable-msg=W0212
1147 if 'tag' not in self.queryargs:
1148 raise http.HttpBadRequest("Please specify tag(s) to add using the"
1149 " the 'tag' parameter")
1150 return baserlib._Tags_PUT(self.TAG_LEVEL,
1151 self.queryargs['tag'], name=self.name,
1152 dry_run=bool(self.dryRun()))
1157 In order to delete a set of tags, the DELETE
1158 request should be addressed to URI like:
1159 /tags?tag=[tag]&tag=[tag]
1162 # pylint: disable-msg=W0212
1163 if 'tag' not in self.queryargs:
1164 # no we not gonna delete all tags
1165 raise http.HttpBadRequest("Cannot delete all tags - please specify"
1166 " tag(s) using the 'tag' parameter")
1167 return baserlib._Tags_DELETE(self.TAG_LEVEL,
1168 self.queryargs['tag'],
1170 dry_run=bool(self.dryRun()))
1173 class R_2_instances_name_tags(_R_Tags):
1174 """ /2/instances/[instance_name]/tags resource.
1176 Manages per-instance tags.
1179 TAG_LEVEL = constants.TAG_INSTANCE
1182 class R_2_nodes_name_tags(_R_Tags):
1183 """ /2/nodes/[node_name]/tags resource.
1185 Manages per-node tags.
1188 TAG_LEVEL = constants.TAG_NODE
1191 class R_2_tags(_R_Tags):
1192 """ /2/instances/tags resource.
1194 Manages cluster tags.
1197 TAG_LEVEL = constants.TAG_CLUSTER