4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API version 2 baserlib.library.
26 from ganeti import opcodes
27 from ganeti import http
28 from ganeti import constants
29 from ganeti import cli
30 from ganeti import rapi
31 from ganeti.rapi import baserlib
34 I_FIELDS = ["name", "admin_state", "os",
37 "nic.ips", "nic.macs", "nic.modes", "nic.links",
39 "disk.sizes", "disk_usage",
40 "beparams", "hvparams",
41 "oper_state", "oper_ram", "status",
44 N_FIELDS = ["name", "offline", "master_candidate", "drained",
46 "mtotal", "mnode", "mfree",
47 "pinst_cnt", "sinst_cnt", "tags",
48 "ctotal", "cnodes", "csockets",
49 "pip", "sip", "serial_no", "role",
50 "pinst_list", "sinst_list",
53 _NR_DRAINED = "drained"
54 _NR_MASTER_CANDIATE = "master-candidate"
56 _NR_OFFLINE = "offline"
57 _NR_REGULAR = "regular"
61 "C": _NR_MASTER_CANDIATE,
68 class R_version(baserlib.R_Generic):
71 This resource should be used to determine the remote API version and
72 to adapt clients accordingly.
76 """Returns the remote API version.
79 return constants.RAPI_VERSION
82 class R_2_info(baserlib.R_Generic):
87 """Returns cluster information.
90 client = baserlib.GetClient()
91 return client.QueryClusterInfo()
94 class R_2_os(baserlib.R_Generic):
99 """Return a list of all OSes.
101 Can return error 500 in case of a problem.
103 Example: ["debian-etch"]
106 cl = baserlib.GetClient()
107 op = opcodes.OpDiagnoseOS(output_fields=["name", "valid"], names=[])
108 job_id = baserlib.SubmitJob([op], cl)
109 # we use custom feedback function, instead of print we log the status
110 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
111 diagnose_data = result[0]
113 if not isinstance(diagnose_data, list):
114 raise http.HttpBadGateway(message="Can't get OS list")
116 return [row[0] for row in diagnose_data if row[1]]
119 class R_2_redist_config(baserlib.R_Generic):
120 """/2/redistribute-config resource.
124 """Redistribute configuration to all nodes.
127 return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
130 class R_2_jobs(baserlib.R_Generic):
135 """Returns a dictionary of jobs.
137 @return: a dictionary with jobs id and uri.
141 cl = baserlib.GetClient()
142 # Convert the list of lists to the list of ids
143 result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
144 return baserlib.BuildUriList(result, "/2/jobs/%s",
145 uri_fields=("id", "uri"))
148 class R_2_jobs_id(baserlib.R_Generic):
149 """/2/jobs/[job_id] resource.
153 """Returns a job status.
155 @return: a dictionary with job parameters.
157 - id: job ID as a number
158 - status: current job status as a string
159 - ops: involved OpCodes as a list of dictionaries for each
161 - opstatus: OpCodes status as a list
162 - opresult: OpCodes results as a list of lists
165 fields = ["id", "ops", "status", "summary",
166 "opstatus", "opresult", "oplog",
167 "received_ts", "start_ts", "end_ts",
169 job_id = self.items[0]
170 result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
172 raise http.HttpNotFound()
173 return baserlib.MapFields(fields, result)
176 """Cancel not-yet-started job.
179 job_id = self.items[0]
180 result = baserlib.GetClient().CancelJob(job_id)
184 class R_2_nodes(baserlib.R_Generic):
185 """/2/nodes resource.
189 """Returns a list of all nodes.
192 client = baserlib.GetClient()
195 bulkdata = client.QueryNodes([], N_FIELDS, False)
196 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
198 nodesdata = client.QueryNodes([], ["name"], False)
199 nodeslist = [row[0] for row in nodesdata]
200 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
201 uri_fields=("id", "uri"))
204 class R_2_nodes_name(baserlib.R_Generic):
205 """/2/nodes/[node_name] resources.
209 """Send information about a node.
212 node_name = self.items[0]
213 client = baserlib.GetClient()
214 result = client.QueryNodes(names=[node_name], fields=N_FIELDS,
215 use_locking=self.useLocking())
217 return baserlib.MapFields(N_FIELDS, result[0])
220 class R_2_nodes_name_role(baserlib.R_Generic):
221 """ /2/nodes/[node_name]/role resource.
225 """Returns the current node role.
230 node_name = self.items[0]
231 client = baserlib.GetClient()
232 result = client.QueryNodes(names=[node_name], fields=["role"],
233 use_locking=self.useLocking())
235 return _NR_MAP[result[0][0]]
238 """Sets the node role.
243 if not isinstance(self.req.request_body, basestring):
244 raise http.HttpBadRequest("Invalid body contents, not a string")
246 node_name = self.items[0]
247 role = self.req.request_body
249 if role == _NR_REGULAR:
254 elif role == _NR_MASTER_CANDIATE:
256 offline = drained = None
258 elif role == _NR_DRAINED:
260 candidate = offline = None
262 elif role == _NR_OFFLINE:
264 candidate = drained = None
267 raise http.HttpBadRequest("Can't set '%s' role" % role)
269 op = opcodes.OpSetNodeParams(node_name=node_name,
270 master_candidate=candidate,
273 force=bool(self.useForce()))
275 return baserlib.SubmitJob([op])
278 class R_2_nodes_name_evacuate(baserlib.R_Generic):
279 """/2/nodes/[node_name]/evacuate resource.
283 """Evacuate all secondary instances off a node.
286 node_name = self.items[0]
287 remote_node = self._checkStringVariable("remote_node", default=None)
288 iallocator = self._checkStringVariable("iallocator", default=None)
290 op = opcodes.OpEvacuateNode(node_name=node_name,
291 remote_node=remote_node,
292 iallocator=iallocator)
294 return baserlib.SubmitJob([op])
297 class R_2_nodes_name_migrate(baserlib.R_Generic):
298 """/2/nodes/[node_name]/migrate resource.
302 """Migrate all primary instances from a node.
305 node_name = self.items[0]
306 live = bool(self._checkIntVariable("live", default=1))
308 op = opcodes.OpMigrateNode(node_name=node_name, live=live)
310 return baserlib.SubmitJob([op])
313 class R_2_nodes_name_storage(baserlib.R_Generic):
314 """/2/nodes/[node_name]/storage ressource.
317 # LUQueryNodeStorage acquires locks, hence restricting access to GET
318 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
321 node_name = self.items[0]
323 storage_type = self._checkStringVariable("storage_type", None)
325 raise http.HttpBadRequest("Missing the required 'storage_type'"
328 output_fields = self._checkStringVariable("output_fields", None)
329 if not output_fields:
330 raise http.HttpBadRequest("Missing the required 'output_fields'"
333 op = opcodes.OpQueryNodeStorage(nodes=[node_name],
334 storage_type=storage_type,
335 output_fields=output_fields.split(","))
336 return baserlib.SubmitJob([op])
339 class R_2_nodes_name_storage_modify(baserlib.R_Generic):
340 """/2/nodes/[node_name]/storage/modify ressource.
344 node_name = self.items[0]
346 storage_type = self._checkStringVariable("storage_type", None)
348 raise http.HttpBadRequest("Missing the required 'storage_type'"
351 name = self._checkStringVariable("name", None)
353 raise http.HttpBadRequest("Missing the required 'name'"
358 if "allocatable" in self.queryargs:
359 changes[constants.SF_ALLOCATABLE] = \
360 bool(self._checkIntVariable("allocatable", default=1))
362 op = opcodes.OpModifyNodeStorage(node_name=node_name,
363 storage_type=storage_type,
366 return baserlib.SubmitJob([op])
369 class R_2_nodes_name_storage_repair(baserlib.R_Generic):
370 """/2/nodes/[node_name]/storage/repair ressource.
374 node_name = self.items[0]
376 storage_type = self._checkStringVariable("storage_type", None)
378 raise http.HttpBadRequest("Missing the required 'storage_type'"
381 name = self._checkStringVariable("name", None)
383 raise http.HttpBadRequest("Missing the required 'name'"
386 op = opcodes.OpRepairNodeStorage(node_name=node_name,
387 storage_type=storage_type,
389 return baserlib.SubmitJob([op])
392 class R_2_instances(baserlib.R_Generic):
393 """/2/instances resource.
397 """Returns a list of all available instances.
400 client = baserlib.GetClient()
402 use_locking = self.useLocking()
404 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
405 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
407 instancesdata = client.QueryInstances([], ["name"], use_locking)
408 instanceslist = [row[0] for row in instancesdata]
409 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
410 uri_fields=("id", "uri"))
413 """Create an instance.
418 if not isinstance(self.req.request_body, dict):
419 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
421 beparams = baserlib.MakeParamsDict(self.req.request_body,
422 constants.BES_PARAMETERS)
423 hvparams = baserlib.MakeParamsDict(self.req.request_body,
424 constants.HVS_PARAMETERS)
425 fn = self.getBodyParameter
428 disk_data = fn('disks')
429 if not isinstance(disk_data, list):
430 raise http.HttpBadRequest("The 'disks' parameter should be a list")
432 for idx, d in enumerate(disk_data):
433 if not isinstance(d, int):
434 raise http.HttpBadRequest("Disk %d specification wrong: should"
436 disks.append({"size": d})
437 # nic processing (one nic only)
438 nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
439 if fn("ip", None) is not None:
440 nics[0]["ip"] = fn("ip")
441 if fn("mode", None) is not None:
442 nics[0]["mode"] = fn("mode")
443 if fn("link", None) is not None:
444 nics[0]["link"] = fn("link")
445 if fn("bridge", None) is not None:
446 nics[0]["bridge"] = fn("bridge")
448 op = opcodes.OpCreateInstance(
449 mode=constants.INSTANCE_CREATE,
450 instance_name=fn('name'),
452 disk_template=fn('disk_template'),
454 pnode=fn('pnode', None),
455 snode=fn('snode', None),
456 iallocator=fn('iallocator', None),
458 start=fn('start', True),
459 ip_check=fn('ip_check', True),
461 hypervisor=fn('hypervisor', None),
464 file_storage_dir=fn('file_storage_dir', None),
465 file_driver=fn('file_driver', 'loop'),
466 dry_run=bool(self.dryRun()),
469 return baserlib.SubmitJob([op])
472 class R_2_instances_name(baserlib.R_Generic):
473 """/2/instances/[instance_name] resources.
477 """Send information about an instance.
480 client = baserlib.GetClient()
481 instance_name = self.items[0]
482 result = client.QueryInstances(names=[instance_name], fields=I_FIELDS,
483 use_locking=self.useLocking())
485 return baserlib.MapFields(I_FIELDS, result[0])
488 """Delete an instance.
491 op = opcodes.OpRemoveInstance(instance_name=self.items[0],
492 ignore_failures=False,
493 dry_run=bool(self.dryRun()))
494 return baserlib.SubmitJob([op])
497 class R_2_instances_name_info(baserlib.R_Generic):
498 """/2/instances/[instance_name]/info resource.
502 """Request detailed instance information.
505 instance_name = self.items[0]
506 static = bool(self._checkIntVariable("static", default=0))
508 op = opcodes.OpQueryInstanceData(instances=[instance_name],
510 return baserlib.SubmitJob([op])
513 class R_2_instances_name_reboot(baserlib.R_Generic):
514 """/2/instances/[instance_name]/reboot resource.
516 Implements an instance reboot.
520 """Reboot an instance.
522 The URI takes type=[hard|soft|full] and
523 ignore_secondaries=[False|True] parameters.
526 instance_name = self.items[0]
527 reboot_type = self.queryargs.get('type',
528 [constants.INSTANCE_REBOOT_HARD])[0]
529 ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
530 op = opcodes.OpRebootInstance(instance_name=instance_name,
531 reboot_type=reboot_type,
532 ignore_secondaries=ignore_secondaries,
533 dry_run=bool(self.dryRun()))
535 return baserlib.SubmitJob([op])
538 class R_2_instances_name_startup(baserlib.R_Generic):
539 """/2/instances/[instance_name]/startup resource.
541 Implements an instance startup.
545 """Startup an instance.
547 The URI takes force=[False|True] parameter to start the instance
548 if even if secondary disks are failing.
551 instance_name = self.items[0]
552 force_startup = bool(self._checkIntVariable('force'))
553 op = opcodes.OpStartupInstance(instance_name=instance_name,
555 dry_run=bool(self.dryRun()))
557 return baserlib.SubmitJob([op])
560 class R_2_instances_name_shutdown(baserlib.R_Generic):
561 """/2/instances/[instance_name]/shutdown resource.
563 Implements an instance shutdown.
567 """Shutdown an instance.
570 instance_name = self.items[0]
571 op = opcodes.OpShutdownInstance(instance_name=instance_name,
572 dry_run=bool(self.dryRun()))
574 return baserlib.SubmitJob([op])
577 class R_2_instances_name_reinstall(baserlib.R_Generic):
578 """/2/instances/[instance_name]/reinstall resource.
580 Implements an instance reinstall.
584 """Reinstall an instance.
586 The URI takes os=name and nostartup=[0|1] optional
587 parameters. By default, the instance will be started
591 instance_name = self.items[0]
592 ostype = self._checkStringVariable('os')
593 nostartup = self._checkIntVariable('nostartup')
595 opcodes.OpShutdownInstance(instance_name=instance_name),
596 opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
599 ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
601 return baserlib.SubmitJob(ops)
604 class R_2_instances_name_replace_disks(baserlib.R_Generic):
605 """/2/instances/[instance_name]/replace-disks resource.
609 """Replaces disks on an instance.
612 instance_name = self.items[0]
613 remote_node = self._checkStringVariable("remote_node", default=None)
614 mode = self._checkStringVariable("mode", default=None)
615 raw_disks = self._checkStringVariable("disks", default=None)
616 iallocator = self._checkStringVariable("iallocator", default=None)
620 disks = [int(part) for part in raw_disks.split(",")]
621 except ValueError, err:
622 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
626 op = opcodes.OpReplaceDisks(instance_name=instance_name,
627 remote_node=remote_node,
630 iallocator=iallocator)
632 return baserlib.SubmitJob([op])
635 class _R_Tags(baserlib.R_Generic):
636 """ Quasiclass for tagging resources
638 Manages tags. When inheriting this class you must define the
644 def __init__(self, items, queryargs, req):
645 """A tag resource constructor.
647 We have to override the default to sort out cluster naming case.
650 baserlib.R_Generic.__init__(self, items, queryargs, req)
652 if self.TAG_LEVEL != constants.TAG_CLUSTER:
658 """Returns a list of tags.
660 Example: ["tag1", "tag2", "tag3"]
663 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
666 """Add a set of tags.
668 The request as a list of strings should be PUT to this URI. And
669 you'll have back a job id.
672 if 'tag' not in self.queryargs:
673 raise http.HttpBadRequest("Please specify tag(s) to add using the"
674 " the 'tag' parameter")
675 return baserlib._Tags_PUT(self.TAG_LEVEL,
676 self.queryargs['tag'], name=self.name,
677 dry_run=bool(self.dryRun()))
682 In order to delete a set of tags, the DELETE
683 request should be addressed to URI like:
684 /tags?tag=[tag]&tag=[tag]
687 if 'tag' not in self.queryargs:
688 # no we not gonna delete all tags
689 raise http.HttpBadRequest("Cannot delete all tags - please specify"
690 " tag(s) using the 'tag' parameter")
691 return baserlib._Tags_DELETE(self.TAG_LEVEL,
692 self.queryargs['tag'],
694 dry_run=bool(self.dryRun()))
697 class R_2_instances_name_tags(_R_Tags):
698 """ /2/instances/[instance_name]/tags resource.
700 Manages per-instance tags.
703 TAG_LEVEL = constants.TAG_INSTANCE
706 class R_2_nodes_name_tags(_R_Tags):
707 """ /2/nodes/[node_name]/tags resource.
709 Manages per-node tags.
712 TAG_LEVEL = constants.TAG_NODE
715 class R_2_tags(_R_Tags):
716 """ /2/instances/tags resource.
718 Manages cluster tags.
721 TAG_LEVEL = constants.TAG_CLUSTER