Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ d8260842

History | View | Annotate | Download (19.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
"""
25

    
26
from ganeti import opcodes
27
from ganeti import http
28
from ganeti import constants
29
from ganeti import cli
30
from ganeti import rapi
31
from ganeti.rapi import baserlib
32

    
33

    
34
I_FIELDS = ["name", "admin_state", "os",
35
            "pnode", "snodes",
36
            "disk_template",
37
            "nic.ips", "nic.macs", "nic.modes", "nic.links",
38
            "network_port",
39
            "disk.sizes", "disk_usage",
40
            "beparams", "hvparams",
41
            "oper_state", "oper_ram", "status",
42
            "tags"]
43

    
44
N_FIELDS = ["name", "offline", "master_candidate", "drained",
45
            "dtotal", "dfree",
46
            "mtotal", "mnode", "mfree",
47
            "pinst_cnt", "sinst_cnt", "tags",
48
            "ctotal", "cnodes", "csockets",
49
            ]
50

    
51
_NR_DRAINED = "drained"
52
_NR_MASTER_CANDIATE = "master-candidate"
53
_NR_MASTER = "master"
54
_NR_OFFLINE = "offline"
55
_NR_REGULAR = "regular"
56

    
57
_NR_MAP = {
58
  "M": _NR_MASTER,
59
  "C": _NR_MASTER_CANDIATE,
60
  "D": _NR_DRAINED,
61
  "O": _NR_OFFLINE,
62
  "R": _NR_REGULAR,
63
  }
64

    
65

    
66
class R_version(baserlib.R_Generic):
67
  """/version resource.
68

69
  This resource should be used to determine the remote API version and
70
  to adapt clients accordingly.
71

72
  """
73
  def GET(self):
74
    """Returns the remote API version.
75

76
    """
77
    return constants.RAPI_VERSION
78

    
79

    
80
class R_2_info(baserlib.R_Generic):
81
  """Cluster info.
82

83
  """
84
  def GET(self):
85
    """Returns cluster information.
86

87
    """
88
    client = baserlib.GetClient()
89
    return client.QueryClusterInfo()
90

    
91

    
92
class R_2_os(baserlib.R_Generic):
93
  """/2/os resource.
94

95
  """
96
  def GET(self):
97
    """Return a list of all OSes.
98

99
    Can return error 500 in case of a problem.
100

101
    Example: ["debian-etch"]
102

103
    """
104
    cl = baserlib.GetClient()
105
    op = opcodes.OpDiagnoseOS(output_fields=["name", "valid"], names=[])
106
    job_id = baserlib.SubmitJob([op], cl)
107
    # we use custom feedback function, instead of print we log the status
108
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
109
    diagnose_data = result[0]
110

    
111
    if not isinstance(diagnose_data, list):
112
      raise http.HttpBadGateway(message="Can't get OS list")
113

    
114
    return [row[0] for row in diagnose_data if row[1]]
115

    
116

    
117
class R_2_jobs(baserlib.R_Generic):
118
  """/2/jobs resource.
119

120
  """
121
  def GET(self):
122
    """Returns a dictionary of jobs.
123

124
    @return: a dictionary with jobs id and uri.
125

126
    """
127
    fields = ["id"]
128
    cl = baserlib.GetClient()
129
    # Convert the list of lists to the list of ids
130
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
131
    return baserlib.BuildUriList(result, "/2/jobs/%s",
132
                                 uri_fields=("id", "uri"))
133

    
134

    
135
class R_2_jobs_id(baserlib.R_Generic):
136
  """/2/jobs/[job_id] resource.
137

138
  """
139
  def GET(self):
140
    """Returns a job status.
141

142
    @return: a dictionary with job parameters.
143
        The result includes:
144
            - id: job ID as a number
145
            - status: current job status as a string
146
            - ops: involved OpCodes as a list of dictionaries for each
147
              opcodes in the job
148
            - opstatus: OpCodes status as a list
149
            - opresult: OpCodes results as a list of lists
150

151
    """
152
    fields = ["id", "ops", "status", "summary",
153
              "opstatus", "opresult", "oplog",
154
              "received_ts", "start_ts", "end_ts",
155
              ]
156
    job_id = self.items[0]
157
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
158
    if result is None:
159
      raise http.HttpNotFound()
160
    return baserlib.MapFields(fields, result)
161

    
162
  def DELETE(self):
163
    """Cancel not-yet-started job.
164

165
    """
166
    job_id = self.items[0]
167
    result = baserlib.GetClient().CancelJob(job_id)
168
    return result
169

    
170

    
171
class R_2_nodes(baserlib.R_Generic):
172
  """/2/nodes resource.
173

174
  """
175
  def GET(self):
176
    """Returns a list of all nodes.
177

178
    """
179
    client = baserlib.GetClient()
180

    
181
    if self.useBulk():
182
      bulkdata = client.QueryNodes([], N_FIELDS, False)
183
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
184
    else:
185
      nodesdata = client.QueryNodes([], ["name"], False)
186
      nodeslist = [row[0] for row in nodesdata]
187
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
188
                                   uri_fields=("id", "uri"))
189

    
190

    
191
class R_2_nodes_name(baserlib.R_Generic):
192
  """/2/nodes/[node_name] resources.
193

194
  """
195
  def GET(self):
196
    """Send information about a node.
197

198
    """
199
    node_name = self.items[0]
200
    client = baserlib.GetClient()
201
    result = client.QueryNodes(names=[node_name], fields=N_FIELDS,
202
                               use_locking=self.useLocking())
203

    
204
    return baserlib.MapFields(N_FIELDS, result[0])
205

    
206

    
207
class R_2_nodes_name_role(baserlib.R_Generic):
208
  """ /2/nodes/[node_name]/role resource.
209

210
  """
211
  def GET(self):
212
    """Returns the current node role.
213

214
    @return: Node role
215

216
    """
217
    node_name = self.items[0]
218
    client = baserlib.GetClient()
219
    result = client.QueryNodes(names=[node_name], fields=["role"],
220
                               use_locking=self.useLocking())
221

    
222
    return _NR_MAP[result[0][0]]
223

    
224
  def PUT(self):
225
    """Sets the node role.
226

227
    @return: a job id
228

229
    """
230
    if not isinstance(self.req.request_body, basestring):
231
      raise http.HttpBadRequest("Invalid body contents, not a string")
232

    
233
    node_name = self.items[0]
234
    role = self.req.request_body
235

    
236
    if role == _NR_REGULAR:
237
      candidate = False
238
      offline = False
239
      drained = False
240

    
241
    elif role == _NR_MASTER_CANDIATE:
242
      candidate = True
243
      offline = drained = None
244

    
245
    elif role == _NR_DRAINED:
246
      drained = True
247
      candidate = offline = None
248

    
249
    elif role == _NR_OFFLINE:
250
      offline = True
251
      candidate = drained = None
252

    
253
    else:
254
      raise http.HttpBadRequest("Can't set '%s' role" % role)
255

    
256
    op = opcodes.OpSetNodeParams(node_name=node_name,
257
                                 master_candidate=candidate,
258
                                 offline=offline,
259
                                 drained=drained,
260
                                 force=bool(self.useForce()))
261

    
262
    return baserlib.SubmitJob([op])
263

    
264

    
265
class R_2_nodes_name_evacuate(baserlib.R_Generic):
266
  """/2/nodes/[node_name]/evacuate resource.
267

268
  """
269
  def POST(self):
270
    """Evacuate all secondary instances off a node.
271

272
    """
273
    node_name = self.items[0]
274
    remote_node = self._checkStringVariable("remote_node", default=None)
275
    iallocator = self._checkStringVariable("iallocator", default=None)
276

    
277
    op = opcodes.OpEvacuateNode(node_name=node_name,
278
                                remote_node=remote_node,
279
                                iallocator=iallocator)
280

    
281
    return baserlib.SubmitJob([op])
282

    
283

    
284
class R_2_nodes_name_migrate(baserlib.R_Generic):
285
  """/2/nodes/[node_name]/migrate resource.
286

287
  """
288
  def POST(self):
289
    """Migrate all primary instances from a node.
290

291
    """
292
    node_name = self.items[0]
293
    live = bool(self._checkIntVariable("live", default=1))
294

    
295
    op = opcodes.OpMigrateNode(node_name=node_name, live=live)
296

    
297
    return baserlib.SubmitJob([op])
298

    
299

    
300
class R_2_nodes_name_storage(baserlib.R_Generic):
301
  """/2/nodes/[node_name]/storage ressource.
302

303
  """
304
  # LUQueryNodeStorage acquires locks, hence restricting access to GET
305
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
306

    
307
  def GET(self):
308
    node_name = self.items[0]
309

    
310
    storage_type = self._checkStringVariable("storage_type", None)
311
    if not storage_type:
312
      raise http.HttpBadRequest("Missing the required 'storage_type'"
313
                                " parameter")
314

    
315
    output_fields = self._checkStringVariable("output_fields", None)
316
    if not output_fields:
317
      raise http.HttpBadRequest("Missing the required 'output_fields'"
318
                                " parameter")
319

    
320
    op = opcodes.OpQueryNodeStorage(nodes=[node_name],
321
                                    storage_type=storage_type,
322
                                    output_fields=output_fields.split(","))
323
    return baserlib.SubmitJob([op])
324

    
325

    
326
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
327
  """/2/nodes/[node_name]/storage/modify ressource.
328

329
  """
330
  def PUT(self):
331
    node_name = self.items[0]
332

    
333
    storage_type = self._checkStringVariable("storage_type", None)
334
    if not storage_type:
335
      raise http.HttpBadRequest("Missing the required 'storage_type'"
336
                                " parameter")
337

    
338
    name = self._checkStringVariable("name", None)
339
    if not name:
340
      raise http.HttpBadRequest("Missing the required 'name'"
341
                                " parameter")
342

    
343
    changes = {}
344

    
345
    if "allocatable" in self.queryargs:
346
      changes[constants.SF_ALLOCATABLE] = \
347
        bool(self._checkIntVariable("allocatable", default=1))
348

    
349
    op = opcodes.OpModifyNodeStorage(node_name=node_name,
350
                                     storage_type=storage_type,
351
                                     name=name,
352
                                     changes=changes)
353
    return baserlib.SubmitJob([op])
354

    
355

    
356
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
357
  """/2/nodes/[node_name]/storage/repair ressource.
358

359
  """
360
  def PUT(self):
361
    node_name = self.items[0]
362

    
363
    storage_type = self._checkStringVariable("storage_type", None)
364
    if not storage_type:
365
      raise http.HttpBadRequest("Missing the required 'storage_type'"
366
                                " parameter")
367

    
368
    name = self._checkStringVariable("name", None)
369
    if not name:
370
      raise http.HttpBadRequest("Missing the required 'name'"
371
                                " parameter")
372

    
373
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
374
                                     storage_type=storage_type,
375
                                     name=name)
376
    return baserlib.SubmitJob([op])
377

    
378

    
379
class R_2_instances(baserlib.R_Generic):
380
  """/2/instances resource.
381

382
  """
383
  def GET(self):
384
    """Returns a list of all available instances.
385

386
    """
387
    client = baserlib.GetClient()
388

    
389
    use_locking = self.useLocking()
390
    if self.useBulk():
391
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
392
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
393
    else:
394
      instancesdata = client.QueryInstances([], ["name"], use_locking)
395
      instanceslist = [row[0] for row in instancesdata]
396
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
397
                                   uri_fields=("id", "uri"))
398

    
399
  def POST(self):
400
    """Create an instance.
401

402
    @return: a job id
403

404
    """
405
    if not isinstance(self.req.request_body, dict):
406
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
407

    
408
    beparams = baserlib.MakeParamsDict(self.req.request_body,
409
                                       constants.BES_PARAMETERS)
410
    hvparams = baserlib.MakeParamsDict(self.req.request_body,
411
                                       constants.HVS_PARAMETERS)
412
    fn = self.getBodyParameter
413

    
414
    # disk processing
415
    disk_data = fn('disks')
416
    if not isinstance(disk_data, list):
417
      raise http.HttpBadRequest("The 'disks' parameter should be a list")
418
    disks = []
419
    for idx, d in enumerate(disk_data):
420
      if not isinstance(d, int):
421
        raise http.HttpBadRequest("Disk %d specification wrong: should"
422
                                  " be an integer")
423
      disks.append({"size": d})
424
    # nic processing (one nic only)
425
    nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
426
    if fn("ip", None) is not None:
427
      nics[0]["ip"] = fn("ip")
428
    if fn("mode", None) is not None:
429
      nics[0]["mode"] = fn("mode")
430
    if fn("link", None) is not None:
431
      nics[0]["link"] = fn("link")
432
    if fn("bridge", None) is not None:
433
       nics[0]["bridge"] = fn("bridge")
434

    
435
    op = opcodes.OpCreateInstance(
436
      mode=constants.INSTANCE_CREATE,
437
      instance_name=fn('name'),
438
      disks=disks,
439
      disk_template=fn('disk_template'),
440
      os_type=fn('os'),
441
      pnode=fn('pnode', None),
442
      snode=fn('snode', None),
443
      iallocator=fn('iallocator', None),
444
      nics=nics,
445
      start=fn('start', True),
446
      ip_check=fn('ip_check', True),
447
      wait_for_sync=True,
448
      hypervisor=fn('hypervisor', None),
449
      hvparams=hvparams,
450
      beparams=beparams,
451
      file_storage_dir=fn('file_storage_dir', None),
452
      file_driver=fn('file_driver', 'loop'),
453
      dry_run=bool(self.dryRun()),
454
      )
455

    
456
    return baserlib.SubmitJob([op])
457

    
458

    
459
class R_2_instances_name(baserlib.R_Generic):
460
  """/2/instances/[instance_name] resources.
461

462
  """
463
  def GET(self):
464
    """Send information about an instance.
465

466
    """
467
    client = baserlib.GetClient()
468
    instance_name = self.items[0]
469
    result = client.QueryInstances(names=[instance_name], fields=I_FIELDS,
470
                                   use_locking=self.useLocking())
471

    
472
    return baserlib.MapFields(I_FIELDS, result[0])
473

    
474
  def DELETE(self):
475
    """Delete an instance.
476

477
    """
478
    op = opcodes.OpRemoveInstance(instance_name=self.items[0],
479
                                  ignore_failures=False,
480
                                  dry_run=bool(self.dryRun()))
481
    return baserlib.SubmitJob([op])
482

    
483

    
484
class R_2_instances_name_info(baserlib.R_Generic):
485
  """/2/instances/[instance_name]/info resource.
486

487
  """
488
  def GET(self):
489
    """Request detailed instance information.
490

491
    """
492
    instance_name = self.items[0]
493
    static = bool(self._checkIntVariable("static", default=0))
494

    
495
    op = opcodes.OpQueryInstanceData(instances=[instance_name],
496
                                     static=static)
497
    return baserlib.SubmitJob([op])
498

    
499

    
500
class R_2_instances_name_reboot(baserlib.R_Generic):
501
  """/2/instances/[instance_name]/reboot resource.
502

503
  Implements an instance reboot.
504

505
  """
506
  def POST(self):
507
    """Reboot an instance.
508

509
    The URI takes type=[hard|soft|full] and
510
    ignore_secondaries=[False|True] parameters.
511

512
    """
513
    instance_name = self.items[0]
514
    reboot_type = self.queryargs.get('type',
515
                                     [constants.INSTANCE_REBOOT_HARD])[0]
516
    ignore_secondaries = bool(self.queryargs.get('ignore_secondaries',
517
                                                 [False])[0])
518
    op = opcodes.OpRebootInstance(instance_name=instance_name,
519
                                  reboot_type=reboot_type,
520
                                  ignore_secondaries=ignore_secondaries,
521
                                  dry_run=bool(self.dryRun()))
522

    
523
    return baserlib.SubmitJob([op])
524

    
525

    
526
class R_2_instances_name_startup(baserlib.R_Generic):
527
  """/2/instances/[instance_name]/startup resource.
528

529
  Implements an instance startup.
530

531
  """
532
  def PUT(self):
533
    """Startup an instance.
534

535
    The URI takes force=[False|True] parameter to start the instance
536
    if even if secondary disks are failing.
537

538
    """
539
    instance_name = self.items[0]
540
    force_startup = bool(self.queryargs.get('force', [False])[0])
541
    op = opcodes.OpStartupInstance(instance_name=instance_name,
542
                                   force=force_startup,
543
                                   dry_run=bool(self.dryRun()))
544

    
545
    return baserlib.SubmitJob([op])
546

    
547

    
548
class R_2_instances_name_shutdown(baserlib.R_Generic):
549
  """/2/instances/[instance_name]/shutdown resource.
550

551
  Implements an instance shutdown.
552

553
  """
554
  def PUT(self):
555
    """Shutdown an instance.
556

557
    """
558
    instance_name = self.items[0]
559
    op = opcodes.OpShutdownInstance(instance_name=instance_name,
560
                                    dry_run=bool(self.dryRun()))
561

    
562
    return baserlib.SubmitJob([op])
563

    
564

    
565
class R_2_instances_name_reinstall(baserlib.R_Generic):
566
  """/2/instances/[instance_name]/reinstall resource.
567

568
  Implements an instance reinstall.
569

570
  """
571
  def POST(self):
572
    """Reinstall an instance.
573

574
    The URI takes os=name and nostartup=[0|1] optional
575
    parameters. By default, the instance will be started
576
    automatically.
577

578
    """
579
    instance_name = self.items[0]
580
    ostype = self._checkStringVariable('os')
581
    nostartup = self._checkIntVariable('nostartup')
582
    ops = [
583
      opcodes.OpShutdownInstance(instance_name=instance_name),
584
      opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
585
      ]
586
    if not nostartup:
587
      ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
588
                                           force=False))
589
    return baserlib.SubmitJob(ops)
590

    
591

    
592
class R_2_instances_name_replace_disks(baserlib.R_Generic):
593
  """/2/instances/[instance_name]/replace-disks resource.
594

595
  """
596
  def POST(self):
597
    """Replaces disks on an instance.
598

599
    """
600
    instance_name = self.items[0]
601
    remote_node = self._checkStringVariable("remote_node", default=None)
602
    mode = self._checkStringVariable("mode", default=None)
603
    raw_disks = self._checkStringVariable("disks", default=None)
604
    iallocator = self._checkStringVariable("iallocator", default=None)
605

    
606
    if raw_disks:
607
      try:
608
        disks = [int(part) for part in raw_disks.split(",")]
609
      except ValueError, err:
610
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
611
    else:
612
      disks = []
613

    
614
    op = opcodes.OpReplaceDisks(instance_name=instance_name,
615
                                remote_node=remote_node,
616
                                mode=mode,
617
                                disks=disks,
618
                                iallocator=iallocator)
619

    
620
    return baserlib.SubmitJob([op])
621

    
622

    
623
class _R_Tags(baserlib.R_Generic):
624
  """ Quasiclass for tagging resources
625

626
  Manages tags. When inheriting this class you must define the
627
  TAG_LEVEL for it.
628

629
  """
630
  TAG_LEVEL = None
631

    
632
  def __init__(self, items, queryargs, req):
633
    """A tag resource constructor.
634

635
    We have to override the default to sort out cluster naming case.
636

637
    """
638
    baserlib.R_Generic.__init__(self, items, queryargs, req)
639

    
640
    if self.TAG_LEVEL != constants.TAG_CLUSTER:
641
      self.name = items[0]
642
    else:
643
      self.name = ""
644

    
645
  def GET(self):
646
    """Returns a list of tags.
647

648
    Example: ["tag1", "tag2", "tag3"]
649

650
    """
651
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
652

    
653
  def PUT(self):
654
    """Add a set of tags.
655

656
    The request as a list of strings should be PUT to this URI. And
657
    you'll have back a job id.
658

659
    """
660
    if 'tag' not in self.queryargs:
661
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
662
                                " the 'tag' parameter")
663
    return baserlib._Tags_PUT(self.TAG_LEVEL,
664
                              self.queryargs['tag'], name=self.name,
665
                              dry_run=bool(self.dryRun()))
666

    
667
  def DELETE(self):
668
    """Delete a tag.
669

670
    In order to delete a set of tags, the DELETE
671
    request should be addressed to URI like:
672
    /tags?tag=[tag]&tag=[tag]
673

674
    """
675
    if 'tag' not in self.queryargs:
676
      # no we not gonna delete all tags
677
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
678
                                " tag(s) using the 'tag' parameter")
679
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
680
                                 self.queryargs['tag'],
681
                                 name=self.name,
682
                                 dry_run=bool(self.dryRun()))
683

    
684

    
685
class R_2_instances_name_tags(_R_Tags):
686
  """ /2/instances/[instance_name]/tags resource.
687

688
  Manages per-instance tags.
689

690
  """
691
  TAG_LEVEL = constants.TAG_INSTANCE
692

    
693

    
694
class R_2_nodes_name_tags(_R_Tags):
695
  """ /2/nodes/[node_name]/tags resource.
696

697
  Manages per-node tags.
698

699
  """
700
  TAG_LEVEL = constants.TAG_NODE
701

    
702

    
703
class R_2_tags(_R_Tags):
704
  """ /2/instances/tags resource.
705

706
  Manages cluster tags.
707

708
  """
709
  TAG_LEVEL = constants.TAG_CLUSTER