4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API version 2 baserlib.library.
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
31 To be in context of this module for instance creation POST on
32 /2/instances is legitim while PUT would be not, due to it does create a
33 new entity and not just replace /2/instances with it.
35 So when adding new methods, if they are operating on the URI entity itself,
36 PUT should be prefered over POST.
40 # pylint: disable-msg=C0103
42 # C0103: Invalid name, since the R_* names are not conforming
44 from ganeti import opcodes
45 from ganeti import http
46 from ganeti import constants
47 from ganeti import cli
48 from ganeti import rapi
49 from ganeti.rapi import baserlib
52 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
53 I_FIELDS = ["name", "admin_state", "os",
56 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58 "disk.sizes", "disk_usage",
59 "beparams", "hvparams",
60 "oper_state", "oper_ram", "status",
63 N_FIELDS = ["name", "offline", "master_candidate", "drained",
65 "mtotal", "mnode", "mfree",
66 "pinst_cnt", "sinst_cnt",
67 "ctotal", "cnodes", "csockets",
69 "pinst_list", "sinst_list",
72 _NR_DRAINED = "drained"
73 _NR_MASTER_CANDIATE = "master-candidate"
75 _NR_OFFLINE = "offline"
76 _NR_REGULAR = "regular"
80 "C": _NR_MASTER_CANDIATE,
87 class R_version(baserlib.R_Generic):
90 This resource should be used to determine the remote API version and
91 to adapt clients accordingly.
96 """Returns the remote API version.
99 return constants.RAPI_VERSION
102 class R_2_info(baserlib.R_Generic):
108 """Returns cluster information.
111 client = baserlib.GetClient()
112 return client.QueryClusterInfo()
115 class R_2_os(baserlib.R_Generic):
121 """Return a list of all OSes.
123 Can return error 500 in case of a problem.
125 Example: ["debian-etch"]
128 cl = baserlib.GetClient()
129 op = opcodes.OpDiagnoseOS(output_fields=["name", "valid", "variants"],
131 job_id = baserlib.SubmitJob([op], cl)
132 # we use custom feedback function, instead of print we log the status
133 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
134 diagnose_data = result[0]
136 if not isinstance(diagnose_data, list):
137 raise http.HttpBadGateway(message="Can't get OS list")
140 for (name, valid, variants) in diagnose_data:
142 os_names.extend(cli.CalculateOSNames(name, variants))
147 class R_2_redist_config(baserlib.R_Generic):
148 """/2/redistribute-config resource.
153 """Redistribute configuration to all nodes.
156 return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
159 class R_2_jobs(baserlib.R_Generic):
165 """Returns a dictionary of jobs.
167 @return: a dictionary with jobs id and uri.
171 cl = baserlib.GetClient()
172 # Convert the list of lists to the list of ids
173 result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
174 return baserlib.BuildUriList(result, "/2/jobs/%s",
175 uri_fields=("id", "uri"))
178 class R_2_jobs_id(baserlib.R_Generic):
179 """/2/jobs/[job_id] resource.
183 """Returns a job status.
185 @return: a dictionary with job parameters.
187 - id: job ID as a number
188 - status: current job status as a string
189 - ops: involved OpCodes as a list of dictionaries for each
191 - opstatus: OpCodes status as a list
192 - opresult: OpCodes results as a list of lists
195 fields = ["id", "ops", "status", "summary",
196 "opstatus", "opresult", "oplog",
197 "received_ts", "start_ts", "end_ts",
199 job_id = self.items[0]
200 result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
202 raise http.HttpNotFound()
203 return baserlib.MapFields(fields, result)
206 """Cancel not-yet-started job.
209 job_id = self.items[0]
210 result = baserlib.GetClient().CancelJob(job_id)
214 class R_2_nodes(baserlib.R_Generic):
215 """/2/nodes resource.
219 """Returns a list of all nodes.
222 client = baserlib.GetClient()
225 bulkdata = client.QueryNodes([], N_FIELDS, False)
226 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
228 nodesdata = client.QueryNodes([], ["name"], False)
229 nodeslist = [row[0] for row in nodesdata]
230 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
231 uri_fields=("id", "uri"))
234 class R_2_nodes_name(baserlib.R_Generic):
235 """/2/nodes/[node_name] resources.
239 """Send information about a node.
242 node_name = self.items[0]
243 client = baserlib.GetClient()
244 result = client.QueryNodes(names=[node_name], fields=N_FIELDS,
245 use_locking=self.useLocking())
247 return baserlib.MapFields(N_FIELDS, result[0])
250 class R_2_nodes_name_role(baserlib.R_Generic):
251 """ /2/nodes/[node_name]/role resource.
255 """Returns the current node role.
260 node_name = self.items[0]
261 client = baserlib.GetClient()
262 result = client.QueryNodes(names=[node_name], fields=["role"],
263 use_locking=self.useLocking())
265 return _NR_MAP[result[0][0]]
268 """Sets the node role.
273 if not isinstance(self.req.request_body, basestring):
274 raise http.HttpBadRequest("Invalid body contents, not a string")
276 node_name = self.items[0]
277 role = self.req.request_body
279 if role == _NR_REGULAR:
284 elif role == _NR_MASTER_CANDIATE:
286 offline = drained = None
288 elif role == _NR_DRAINED:
290 candidate = offline = None
292 elif role == _NR_OFFLINE:
294 candidate = drained = None
297 raise http.HttpBadRequest("Can't set '%s' role" % role)
299 op = opcodes.OpSetNodeParams(node_name=node_name,
300 master_candidate=candidate,
303 force=bool(self.useForce()))
305 return baserlib.SubmitJob([op])
308 class R_2_nodes_name_evacuate(baserlib.R_Generic):
309 """/2/nodes/[node_name]/evacuate resource.
313 """Evacuate all secondary instances off a node.
316 node_name = self.items[0]
317 remote_node = self._checkStringVariable("remote_node", default=None)
318 iallocator = self._checkStringVariable("iallocator", default=None)
320 op = opcodes.OpEvacuateNode(node_name=node_name,
321 remote_node=remote_node,
322 iallocator=iallocator)
324 return baserlib.SubmitJob([op])
327 class R_2_nodes_name_migrate(baserlib.R_Generic):
328 """/2/nodes/[node_name]/migrate resource.
332 """Migrate all primary instances from a node.
335 node_name = self.items[0]
336 live = bool(self._checkIntVariable("live", default=1))
338 op = opcodes.OpMigrateNode(node_name=node_name, live=live)
340 return baserlib.SubmitJob([op])
343 class R_2_nodes_name_storage(baserlib.R_Generic):
344 """/2/nodes/[node_name]/storage ressource.
347 # LUQueryNodeStorage acquires locks, hence restricting access to GET
348 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
351 node_name = self.items[0]
353 storage_type = self._checkStringVariable("storage_type", None)
355 raise http.HttpBadRequest("Missing the required 'storage_type'"
358 output_fields = self._checkStringVariable("output_fields", None)
359 if not output_fields:
360 raise http.HttpBadRequest("Missing the required 'output_fields'"
363 op = opcodes.OpQueryNodeStorage(nodes=[node_name],
364 storage_type=storage_type,
365 output_fields=output_fields.split(","))
366 return baserlib.SubmitJob([op])
369 class R_2_nodes_name_storage_modify(baserlib.R_Generic):
370 """/2/nodes/[node_name]/storage/modify ressource.
374 node_name = self.items[0]
376 storage_type = self._checkStringVariable("storage_type", None)
378 raise http.HttpBadRequest("Missing the required 'storage_type'"
381 name = self._checkStringVariable("name", None)
383 raise http.HttpBadRequest("Missing the required 'name'"
388 if "allocatable" in self.queryargs:
389 changes[constants.SF_ALLOCATABLE] = \
390 bool(self._checkIntVariable("allocatable", default=1))
392 op = opcodes.OpModifyNodeStorage(node_name=node_name,
393 storage_type=storage_type,
396 return baserlib.SubmitJob([op])
399 class R_2_nodes_name_storage_repair(baserlib.R_Generic):
400 """/2/nodes/[node_name]/storage/repair ressource.
404 node_name = self.items[0]
406 storage_type = self._checkStringVariable("storage_type", None)
408 raise http.HttpBadRequest("Missing the required 'storage_type'"
411 name = self._checkStringVariable("name", None)
413 raise http.HttpBadRequest("Missing the required 'name'"
416 op = opcodes.OpRepairNodeStorage(node_name=node_name,
417 storage_type=storage_type,
419 return baserlib.SubmitJob([op])
422 class R_2_instances(baserlib.R_Generic):
423 """/2/instances resource.
427 """Returns a list of all available instances.
430 client = baserlib.GetClient()
432 use_locking = self.useLocking()
434 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
435 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
437 instancesdata = client.QueryInstances([], ["name"], use_locking)
438 instanceslist = [row[0] for row in instancesdata]
439 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
440 uri_fields=("id", "uri"))
443 """Create an instance.
448 if not isinstance(self.req.request_body, dict):
449 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
451 beparams = baserlib.MakeParamsDict(self.req.request_body,
452 constants.BES_PARAMETERS)
453 hvparams = baserlib.MakeParamsDict(self.req.request_body,
454 constants.HVS_PARAMETERS)
455 fn = self.getBodyParameter
458 disk_data = fn('disks')
459 if not isinstance(disk_data, list):
460 raise http.HttpBadRequest("The 'disks' parameter should be a list")
462 for idx, d in enumerate(disk_data):
463 if not isinstance(d, int):
464 raise http.HttpBadRequest("Disk %d specification wrong: should"
465 " be an integer" % idx)
466 disks.append({"size": d})
467 # nic processing (one nic only)
468 nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
469 if fn("ip", None) is not None:
470 nics[0]["ip"] = fn("ip")
471 if fn("mode", None) is not None:
472 nics[0]["mode"] = fn("mode")
473 if fn("link", None) is not None:
474 nics[0]["link"] = fn("link")
475 if fn("bridge", None) is not None:
476 nics[0]["bridge"] = fn("bridge")
478 op = opcodes.OpCreateInstance(
479 mode=constants.INSTANCE_CREATE,
480 instance_name=fn('name'),
482 disk_template=fn('disk_template'),
484 pnode=fn('pnode', None),
485 snode=fn('snode', None),
486 iallocator=fn('iallocator', None),
488 start=fn('start', True),
489 ip_check=fn('ip_check', True),
490 name_check=fn('name_check', True),
492 hypervisor=fn('hypervisor', None),
495 file_storage_dir=fn('file_storage_dir', None),
496 file_driver=fn('file_driver', 'loop'),
497 dry_run=bool(self.dryRun()),
500 return baserlib.SubmitJob([op])
503 class R_2_instances_name(baserlib.R_Generic):
504 """/2/instances/[instance_name] resources.
508 """Send information about an instance.
511 client = baserlib.GetClient()
512 instance_name = self.items[0]
513 result = client.QueryInstances(names=[instance_name], fields=I_FIELDS,
514 use_locking=self.useLocking())
516 return baserlib.MapFields(I_FIELDS, result[0])
519 """Delete an instance.
522 op = opcodes.OpRemoveInstance(instance_name=self.items[0],
523 ignore_failures=False,
524 dry_run=bool(self.dryRun()))
525 return baserlib.SubmitJob([op])
528 class R_2_instances_name_info(baserlib.R_Generic):
529 """/2/instances/[instance_name]/info resource.
533 """Request detailed instance information.
536 instance_name = self.items[0]
537 static = bool(self._checkIntVariable("static", default=0))
539 op = opcodes.OpQueryInstanceData(instances=[instance_name],
541 return baserlib.SubmitJob([op])
544 class R_2_instances_name_reboot(baserlib.R_Generic):
545 """/2/instances/[instance_name]/reboot resource.
547 Implements an instance reboot.
551 """Reboot an instance.
553 The URI takes type=[hard|soft|full] and
554 ignore_secondaries=[False|True] parameters.
557 instance_name = self.items[0]
558 reboot_type = self.queryargs.get('type',
559 [constants.INSTANCE_REBOOT_HARD])[0]
560 ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
561 op = opcodes.OpRebootInstance(instance_name=instance_name,
562 reboot_type=reboot_type,
563 ignore_secondaries=ignore_secondaries,
564 dry_run=bool(self.dryRun()))
566 return baserlib.SubmitJob([op])
569 class R_2_instances_name_startup(baserlib.R_Generic):
570 """/2/instances/[instance_name]/startup resource.
572 Implements an instance startup.
576 """Startup an instance.
578 The URI takes force=[False|True] parameter to start the instance
579 if even if secondary disks are failing.
582 instance_name = self.items[0]
583 force_startup = bool(self._checkIntVariable('force'))
584 op = opcodes.OpStartupInstance(instance_name=instance_name,
586 dry_run=bool(self.dryRun()))
588 return baserlib.SubmitJob([op])
591 class R_2_instances_name_shutdown(baserlib.R_Generic):
592 """/2/instances/[instance_name]/shutdown resource.
594 Implements an instance shutdown.
598 """Shutdown an instance.
601 instance_name = self.items[0]
602 op = opcodes.OpShutdownInstance(instance_name=instance_name,
603 dry_run=bool(self.dryRun()))
605 return baserlib.SubmitJob([op])
608 class R_2_instances_name_reinstall(baserlib.R_Generic):
609 """/2/instances/[instance_name]/reinstall resource.
611 Implements an instance reinstall.
615 """Reinstall an instance.
617 The URI takes os=name and nostartup=[0|1] optional
618 parameters. By default, the instance will be started
622 instance_name = self.items[0]
623 ostype = self._checkStringVariable('os')
624 nostartup = self._checkIntVariable('nostartup')
626 opcodes.OpShutdownInstance(instance_name=instance_name),
627 opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
630 ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
632 return baserlib.SubmitJob(ops)
635 class R_2_instances_name_replace_disks(baserlib.R_Generic):
636 """/2/instances/[instance_name]/replace-disks resource.
640 """Replaces disks on an instance.
643 instance_name = self.items[0]
644 remote_node = self._checkStringVariable("remote_node", default=None)
645 mode = self._checkStringVariable("mode", default=None)
646 raw_disks = self._checkStringVariable("disks", default=None)
647 iallocator = self._checkStringVariable("iallocator", default=None)
651 disks = [int(part) for part in raw_disks.split(",")]
652 except ValueError, err:
653 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
657 op = opcodes.OpReplaceDisks(instance_name=instance_name,
658 remote_node=remote_node,
661 iallocator=iallocator)
663 return baserlib.SubmitJob([op])
666 class R_2_instances_name_activate_disks(baserlib.R_Generic):
667 """/2/instances/[instance_name]/activate-disks resource.
671 """Activate disks for an instance.
673 The URI might contain ignore_size to ignore current recorded size.
676 instance_name = self.items[0]
677 ignore_size = bool(self._checkIntVariable('ignore_size'))
679 op = opcodes.OpActivateInstanceDisks(instance_name=instance_name,
680 ignore_size=ignore_size)
682 return baserlib.SubmitJob([op])
685 class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
686 """/2/instances/[instance_name]/deactivate-disks resource.
690 """Deactivate disks for an instance.
693 instance_name = self.items[0]
695 op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
697 return baserlib.SubmitJob([op])
700 class _R_Tags(baserlib.R_Generic):
701 """ Quasiclass for tagging resources
703 Manages tags. When inheriting this class you must define the
709 def __init__(self, items, queryargs, req):
710 """A tag resource constructor.
712 We have to override the default to sort out cluster naming case.
715 baserlib.R_Generic.__init__(self, items, queryargs, req)
717 if self.TAG_LEVEL != constants.TAG_CLUSTER:
723 """Returns a list of tags.
725 Example: ["tag1", "tag2", "tag3"]
728 # pylint: disable-msg=W0212
729 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
732 """Add a set of tags.
734 The request as a list of strings should be PUT to this URI. And
735 you'll have back a job id.
738 # pylint: disable-msg=W0212
739 if 'tag' not in self.queryargs:
740 raise http.HttpBadRequest("Please specify tag(s) to add using the"
741 " the 'tag' parameter")
742 return baserlib._Tags_PUT(self.TAG_LEVEL,
743 self.queryargs['tag'], name=self.name,
744 dry_run=bool(self.dryRun()))
749 In order to delete a set of tags, the DELETE
750 request should be addressed to URI like:
751 /tags?tag=[tag]&tag=[tag]
754 # pylint: disable-msg=W0212
755 if 'tag' not in self.queryargs:
756 # no we not gonna delete all tags
757 raise http.HttpBadRequest("Cannot delete all tags - please specify"
758 " tag(s) using the 'tag' parameter")
759 return baserlib._Tags_DELETE(self.TAG_LEVEL,
760 self.queryargs['tag'],
762 dry_run=bool(self.dryRun()))
765 class R_2_instances_name_tags(_R_Tags):
766 """ /2/instances/[instance_name]/tags resource.
768 Manages per-instance tags.
771 TAG_LEVEL = constants.TAG_INSTANCE
774 class R_2_nodes_name_tags(_R_Tags):
775 """ /2/nodes/[node_name]/tags resource.
777 Manages per-node tags.
780 TAG_LEVEL = constants.TAG_NODE
783 class R_2_tags(_R_Tags):
784 """ /2/instances/tags resource.
786 Manages cluster tags.
789 TAG_LEVEL = constants.TAG_CLUSTER