Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ ebeb600f

History | View | Annotate | Download (30.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
  PUT or POST?
25
  ============
26

27
  According to RFC2616 the main difference between PUT and POST is that
28
  POST can create new resources but PUT can only create the resource the
29
  URI was pointing to on the PUT request.
30

31
  To be in context of this module for instance creation POST on
32
  /2/instances is legitim while PUT would be not, due to it does create a
33
  new entity and not just replace /2/instances with it.
34

35
  So when adding new methods, if they are operating on the URI entity itself,
36
  PUT should be prefered over POST.
37

38
"""
39

    
40
# pylint: disable-msg=C0103
41

    
42
# C0103: Invalid name, since the R_* names are not conforming
43

    
44
from ganeti import opcodes
45
from ganeti import http
46
from ganeti import constants
47
from ganeti import cli
48
from ganeti import utils
49
from ganeti import rapi
50
from ganeti.rapi import baserlib
51

    
52

    
53
_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54
I_FIELDS = ["name", "admin_state", "os",
55
            "pnode", "snodes",
56
            "disk_template",
57
            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58
            "network_port",
59
            "disk.sizes", "disk_usage",
60
            "beparams", "hvparams",
61
            "oper_state", "oper_ram", "status",
62
            ] + _COMMON_FIELDS
63

    
64
N_FIELDS = ["name", "offline", "master_candidate", "drained",
65
            "dtotal", "dfree",
66
            "mtotal", "mnode", "mfree",
67
            "pinst_cnt", "sinst_cnt",
68
            "ctotal", "cnodes", "csockets",
69
            "pip", "sip", "role",
70
            "pinst_list", "sinst_list",
71
            ] + _COMMON_FIELDS
72

    
73
_NR_DRAINED = "drained"
74
_NR_MASTER_CANDIATE = "master-candidate"
75
_NR_MASTER = "master"
76
_NR_OFFLINE = "offline"
77
_NR_REGULAR = "regular"
78

    
79
_NR_MAP = {
80
  "M": _NR_MASTER,
81
  "C": _NR_MASTER_CANDIATE,
82
  "D": _NR_DRAINED,
83
  "O": _NR_OFFLINE,
84
  "R": _NR_REGULAR,
85
  }
86

    
87
# Request data version field
88
_REQ_DATA_VERSION = "__version__"
89

    
90
# Feature string for instance creation request data version 1
91
_INST_CREATE_REQV1 = "instance-create-reqv1"
92

    
93
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
94
_WFJC_TIMEOUT = 10
95

    
96

    
97
class R_version(baserlib.R_Generic):
98
  """/version resource.
99

100
  This resource should be used to determine the remote API version and
101
  to adapt clients accordingly.
102

103
  """
104
  @staticmethod
105
  def GET():
106
    """Returns the remote API version.
107

108
    """
109
    return constants.RAPI_VERSION
110

    
111

    
112
class R_2_info(baserlib.R_Generic):
113
  """Cluster info.
114

115
  """
116
  @staticmethod
117
  def GET():
118
    """Returns cluster information.
119

120
    """
121
    client = baserlib.GetClient()
122
    return client.QueryClusterInfo()
123

    
124

    
125
class R_2_features(baserlib.R_Generic):
126
  """/2/features resource.
127

128
  """
129
  @staticmethod
130
  def GET():
131
    """Returns list of optional RAPI features implemented.
132

133
    """
134
    return [_INST_CREATE_REQV1]
135

    
136

    
137
class R_2_os(baserlib.R_Generic):
138
  """/2/os resource.
139

140
  """
141
  @staticmethod
142
  def GET():
143
    """Return a list of all OSes.
144

145
    Can return error 500 in case of a problem.
146

147
    Example: ["debian-etch"]
148

149
    """
150
    cl = baserlib.GetClient()
151
    op = opcodes.OpDiagnoseOS(output_fields=["name", "valid", "variants"],
152
                              names=[])
153
    job_id = baserlib.SubmitJob([op], cl)
154
    # we use custom feedback function, instead of print we log the status
155
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
156
    diagnose_data = result[0]
157

    
158
    if not isinstance(diagnose_data, list):
159
      raise http.HttpBadGateway(message="Can't get OS list")
160

    
161
    os_names = []
162
    for (name, valid, variants) in diagnose_data:
163
      if valid:
164
        os_names.extend(cli.CalculateOSNames(name, variants))
165

    
166
    return os_names
167

    
168

    
169
class R_2_redist_config(baserlib.R_Generic):
170
  """/2/redistribute-config resource.
171

172
  """
173
  @staticmethod
174
  def PUT():
175
    """Redistribute configuration to all nodes.
176

177
    """
178
    return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
179

    
180

    
181
class R_2_jobs(baserlib.R_Generic):
182
  """/2/jobs resource.
183

184
  """
185
  @staticmethod
186
  def GET():
187
    """Returns a dictionary of jobs.
188

189
    @return: a dictionary with jobs id and uri.
190

191
    """
192
    fields = ["id"]
193
    cl = baserlib.GetClient()
194
    # Convert the list of lists to the list of ids
195
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
196
    return baserlib.BuildUriList(result, "/2/jobs/%s",
197
                                 uri_fields=("id", "uri"))
198

    
199

    
200
class R_2_jobs_id(baserlib.R_Generic):
201
  """/2/jobs/[job_id] resource.
202

203
  """
204
  def GET(self):
205
    """Returns a job status.
206

207
    @return: a dictionary with job parameters.
208
        The result includes:
209
            - id: job ID as a number
210
            - status: current job status as a string
211
            - ops: involved OpCodes as a list of dictionaries for each
212
              opcodes in the job
213
            - opstatus: OpCodes status as a list
214
            - opresult: OpCodes results as a list of lists
215

216
    """
217
    fields = ["id", "ops", "status", "summary",
218
              "opstatus", "opresult", "oplog",
219
              "received_ts", "start_ts", "end_ts",
220
              ]
221
    job_id = self.items[0]
222
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
223
    if result is None:
224
      raise http.HttpNotFound()
225
    return baserlib.MapFields(fields, result)
226

    
227
  def DELETE(self):
228
    """Cancel not-yet-started job.
229

230
    """
231
    job_id = self.items[0]
232
    result = baserlib.GetClient().CancelJob(job_id)
233
    return result
234

    
235

    
236
class R_2_jobs_id_wait(baserlib.R_Generic):
237
  """/2/jobs/[job_id]/wait resource.
238

239
  """
240
  # WaitForJobChange provides access to sensitive information and blocks
241
  # machine resources (it's a blocking RAPI call), hence restricting access.
242
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
243

    
244
  def GET(self):
245
    """Waits for job changes.
246

247
    """
248
    job_id = self.items[0]
249

    
250
    fields = self.getBodyParameter("fields")
251
    prev_job_info = self.getBodyParameter("previous_job_info", None)
252
    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
253

    
254
    if not isinstance(fields, list):
255
      raise http.HttpBadRequest("The 'fields' parameter should be a list")
256

    
257
    if not (prev_job_info is None or isinstance(prev_job_info, list)):
258
      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
259
                                " be a list")
260

    
261
    if not (prev_log_serial is None or
262
            isinstance(prev_log_serial, (int, long))):
263
      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
264
                                " be a number")
265

    
266
    client = baserlib.GetClient()
267
    result = client.WaitForJobChangeOnce(job_id, fields,
268
                                         prev_job_info, prev_log_serial,
269
                                         timeout=_WFJC_TIMEOUT)
270
    if not result:
271
      raise http.HttpNotFound()
272

    
273
    if result == constants.JOB_NOTCHANGED:
274
      # No changes
275
      return None
276

    
277
    (job_info, log_entries) = result
278

    
279
    return {
280
      "job_info": job_info,
281
      "log_entries": log_entries,
282
      }
283

    
284

    
285
class R_2_nodes(baserlib.R_Generic):
286
  """/2/nodes resource.
287

288
  """
289
  def GET(self):
290
    """Returns a list of all nodes.
291

292
    """
293
    client = baserlib.GetClient()
294

    
295
    if self.useBulk():
296
      bulkdata = client.QueryNodes([], N_FIELDS, False)
297
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
298
    else:
299
      nodesdata = client.QueryNodes([], ["name"], False)
300
      nodeslist = [row[0] for row in nodesdata]
301
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
302
                                   uri_fields=("id", "uri"))
303

    
304

    
305
class R_2_nodes_name(baserlib.R_Generic):
306
  """/2/nodes/[node_name] resources.
307

308
  """
309
  def GET(self):
310
    """Send information about a node.
311

312
    """
313
    node_name = self.items[0]
314
    client = baserlib.GetClient()
315

    
316
    result = baserlib.HandleItemQueryErrors(client.QueryNodes,
317
                                            names=[node_name], fields=N_FIELDS,
318
                                            use_locking=self.useLocking())
319

    
320
    return baserlib.MapFields(N_FIELDS, result[0])
321

    
322

    
323
class R_2_nodes_name_role(baserlib.R_Generic):
324
  """ /2/nodes/[node_name]/role resource.
325

326
  """
327
  def GET(self):
328
    """Returns the current node role.
329

330
    @return: Node role
331

332
    """
333
    node_name = self.items[0]
334
    client = baserlib.GetClient()
335
    result = client.QueryNodes(names=[node_name], fields=["role"],
336
                               use_locking=self.useLocking())
337

    
338
    return _NR_MAP[result[0][0]]
339

    
340
  def PUT(self):
341
    """Sets the node role.
342

343
    @return: a job id
344

345
    """
346
    if not isinstance(self.request_body, basestring):
347
      raise http.HttpBadRequest("Invalid body contents, not a string")
348

    
349
    node_name = self.items[0]
350
    role = self.request_body
351

    
352
    if role == _NR_REGULAR:
353
      candidate = False
354
      offline = False
355
      drained = False
356

    
357
    elif role == _NR_MASTER_CANDIATE:
358
      candidate = True
359
      offline = drained = None
360

    
361
    elif role == _NR_DRAINED:
362
      drained = True
363
      candidate = offline = None
364

    
365
    elif role == _NR_OFFLINE:
366
      offline = True
367
      candidate = drained = None
368

    
369
    else:
370
      raise http.HttpBadRequest("Can't set '%s' role" % role)
371

    
372
    op = opcodes.OpSetNodeParams(node_name=node_name,
373
                                 master_candidate=candidate,
374
                                 offline=offline,
375
                                 drained=drained,
376
                                 force=bool(self.useForce()))
377

    
378
    return baserlib.SubmitJob([op])
379

    
380

    
381
class R_2_nodes_name_evacuate(baserlib.R_Generic):
382
  """/2/nodes/[node_name]/evacuate resource.
383

384
  """
385
  def POST(self):
386
    """Evacuate all secondary instances off a node.
387

388
    """
389
    node_name = self.items[0]
390
    remote_node = self._checkStringVariable("remote_node", default=None)
391
    iallocator = self._checkStringVariable("iallocator", default=None)
392

    
393
    op = opcodes.OpEvacuateNode(node_name=node_name,
394
                                remote_node=remote_node,
395
                                iallocator=iallocator)
396

    
397
    return baserlib.SubmitJob([op])
398

    
399

    
400
class R_2_nodes_name_migrate(baserlib.R_Generic):
401
  """/2/nodes/[node_name]/migrate resource.
402

403
  """
404
  def POST(self):
405
    """Migrate all primary instances from a node.
406

407
    """
408
    node_name = self.items[0]
409
    live = bool(self._checkIntVariable("live", default=1))
410

    
411
    op = opcodes.OpMigrateNode(node_name=node_name, live=live)
412

    
413
    return baserlib.SubmitJob([op])
414

    
415

    
416
class R_2_nodes_name_storage(baserlib.R_Generic):
417
  """/2/nodes/[node_name]/storage ressource.
418

419
  """
420
  # LUQueryNodeStorage acquires locks, hence restricting access to GET
421
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
422

    
423
  def GET(self):
424
    node_name = self.items[0]
425

    
426
    storage_type = self._checkStringVariable("storage_type", None)
427
    if not storage_type:
428
      raise http.HttpBadRequest("Missing the required 'storage_type'"
429
                                " parameter")
430

    
431
    output_fields = self._checkStringVariable("output_fields", None)
432
    if not output_fields:
433
      raise http.HttpBadRequest("Missing the required 'output_fields'"
434
                                " parameter")
435

    
436
    op = opcodes.OpQueryNodeStorage(nodes=[node_name],
437
                                    storage_type=storage_type,
438
                                    output_fields=output_fields.split(","))
439
    return baserlib.SubmitJob([op])
440

    
441

    
442
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
443
  """/2/nodes/[node_name]/storage/modify ressource.
444

445
  """
446
  def PUT(self):
447
    node_name = self.items[0]
448

    
449
    storage_type = self._checkStringVariable("storage_type", None)
450
    if not storage_type:
451
      raise http.HttpBadRequest("Missing the required 'storage_type'"
452
                                " parameter")
453

    
454
    name = self._checkStringVariable("name", None)
455
    if not name:
456
      raise http.HttpBadRequest("Missing the required 'name'"
457
                                " parameter")
458

    
459
    changes = {}
460

    
461
    if "allocatable" in self.queryargs:
462
      changes[constants.SF_ALLOCATABLE] = \
463
        bool(self._checkIntVariable("allocatable", default=1))
464

    
465
    op = opcodes.OpModifyNodeStorage(node_name=node_name,
466
                                     storage_type=storage_type,
467
                                     name=name,
468
                                     changes=changes)
469
    return baserlib.SubmitJob([op])
470

    
471

    
472
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
473
  """/2/nodes/[node_name]/storage/repair ressource.
474

475
  """
476
  def PUT(self):
477
    node_name = self.items[0]
478

    
479
    storage_type = self._checkStringVariable("storage_type", None)
480
    if not storage_type:
481
      raise http.HttpBadRequest("Missing the required 'storage_type'"
482
                                " parameter")
483

    
484
    name = self._checkStringVariable("name", None)
485
    if not name:
486
      raise http.HttpBadRequest("Missing the required 'name'"
487
                                " parameter")
488

    
489
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
490
                                     storage_type=storage_type,
491
                                     name=name)
492
    return baserlib.SubmitJob([op])
493

    
494

    
495
def _ParseInstanceCreateRequestVersion1(data, dry_run):
496
  """Parses an instance creation request version 1.
497

498
  @rtype: L{opcodes.OpCreateInstance}
499
  @return: Instance creation opcode
500

501
  """
502
  # Disks
503
  disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
504

    
505
  disks = []
506
  for idx, i in enumerate(disks_input):
507
    baserlib.CheckType(i, dict, "Disk %d specification" % idx)
508

    
509
    # Size is mandatory
510
    try:
511
      size = i["size"]
512
    except KeyError:
513
      raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
514
                                " size" % idx)
515

    
516
    disk = {
517
      "size": size,
518
      }
519

    
520
    # Optional disk access mode
521
    try:
522
      disk_access = i["mode"]
523
    except KeyError:
524
      pass
525
    else:
526
      disk["mode"] = disk_access
527

    
528
    disks.append(disk)
529

    
530
  assert len(disks_input) == len(disks)
531

    
532
  # Network interfaces
533
  nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
534

    
535
  nics = []
536
  for idx, i in enumerate(nics_input):
537
    baserlib.CheckType(i, dict, "NIC %d specification" % idx)
538

    
539
    nic = {}
540

    
541
    for field in ["mode", "ip", "link", "bridge"]:
542
      try:
543
        value = i[field]
544
      except KeyError:
545
        continue
546

    
547
      nic[field] = value
548

    
549
    nics.append(nic)
550

    
551
  assert len(nics_input) == len(nics)
552

    
553
  # HV/BE parameters
554
  hvparams = baserlib.CheckParameter(data, "hvparams", default={})
555
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
556

    
557
  beparams = baserlib.CheckParameter(data, "beparams", default={})
558
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
559

    
560
  return opcodes.OpCreateInstance(
561
    mode=baserlib.CheckParameter(data, "mode"),
562
    instance_name=baserlib.CheckParameter(data, "name"),
563
    os_type=baserlib.CheckParameter(data, "os", default=None),
564
    force_variant=baserlib.CheckParameter(data, "force_variant",
565
                                          default=False),
566
    pnode=baserlib.CheckParameter(data, "pnode", default=None),
567
    snode=baserlib.CheckParameter(data, "snode", default=None),
568
    disk_template=baserlib.CheckParameter(data, "disk_template"),
569
    disks=disks,
570
    nics=nics,
571
    src_node=baserlib.CheckParameter(data, "src_node", default=None),
572
    src_path=baserlib.CheckParameter(data, "src_path", default=None),
573
    start=baserlib.CheckParameter(data, "start", default=True),
574
    wait_for_sync=True,
575
    ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
576
    name_check=baserlib.CheckParameter(data, "name_check", default=True),
577
    file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
578
                                             default=None),
579
    file_driver=baserlib.CheckParameter(data, "file_driver",
580
                                        default=constants.FD_LOOP),
581
    source_handshake=baserlib.CheckParameter(data, "source_handshake",
582
                                             default=None),
583
    source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca",
584
                                           default=None),
585
    source_instance_name=baserlib.CheckParameter(data, "source_instance_name",
586
                                                 default=None),
587
    iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
588
    hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
589
    hvparams=hvparams,
590
    beparams=beparams,
591
    dry_run=dry_run,
592
    )
593

    
594

    
595
class R_2_instances(baserlib.R_Generic):
596
  """/2/instances resource.
597

598
  """
599
  def GET(self):
600
    """Returns a list of all available instances.
601

602
    """
603
    client = baserlib.GetClient()
604

    
605
    use_locking = self.useLocking()
606
    if self.useBulk():
607
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
608
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
609
    else:
610
      instancesdata = client.QueryInstances([], ["name"], use_locking)
611
      instanceslist = [row[0] for row in instancesdata]
612
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
613
                                   uri_fields=("id", "uri"))
614

    
615
  def _ParseVersion0CreateRequest(self):
616
    """Parses an instance creation request version 0.
617

618
    Request data version 0 is deprecated and should not be used anymore.
619

620
    @rtype: L{opcodes.OpCreateInstance}
621
    @return: Instance creation opcode
622

623
    """
624
    # Do not modify anymore, request data version 0 is deprecated
625
    beparams = baserlib.MakeParamsDict(self.request_body,
626
                                       constants.BES_PARAMETERS)
627
    hvparams = baserlib.MakeParamsDict(self.request_body,
628
                                       constants.HVS_PARAMETERS)
629
    fn = self.getBodyParameter
630

    
631
    # disk processing
632
    disk_data = fn('disks')
633
    if not isinstance(disk_data, list):
634
      raise http.HttpBadRequest("The 'disks' parameter should be a list")
635
    disks = []
636
    for idx, d in enumerate(disk_data):
637
      if not isinstance(d, int):
638
        raise http.HttpBadRequest("Disk %d specification wrong: should"
639
                                  " be an integer" % idx)
640
      disks.append({"size": d})
641

    
642
    # nic processing (one nic only)
643
    nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
644
    if fn("ip", None) is not None:
645
      nics[0]["ip"] = fn("ip")
646
    if fn("mode", None) is not None:
647
      nics[0]["mode"] = fn("mode")
648
    if fn("link", None) is not None:
649
      nics[0]["link"] = fn("link")
650
    if fn("bridge", None) is not None:
651
      nics[0]["bridge"] = fn("bridge")
652

    
653
    # Do not modify anymore, request data version 0 is deprecated
654
    return opcodes.OpCreateInstance(
655
      mode=constants.INSTANCE_CREATE,
656
      instance_name=fn('name'),
657
      disks=disks,
658
      disk_template=fn('disk_template'),
659
      os_type=fn('os'),
660
      pnode=fn('pnode', None),
661
      snode=fn('snode', None),
662
      iallocator=fn('iallocator', None),
663
      nics=nics,
664
      start=fn('start', True),
665
      ip_check=fn('ip_check', True),
666
      name_check=fn('name_check', True),
667
      wait_for_sync=True,
668
      hypervisor=fn('hypervisor', None),
669
      hvparams=hvparams,
670
      beparams=beparams,
671
      file_storage_dir=fn('file_storage_dir', None),
672
      file_driver=fn('file_driver', constants.FD_LOOP),
673
      dry_run=bool(self.dryRun()),
674
      )
675

    
676
  def POST(self):
677
    """Create an instance.
678

679
    @return: a job id
680

681
    """
682
    if not isinstance(self.request_body, dict):
683
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
684

    
685
    # Default to request data version 0
686
    data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
687

    
688
    if data_version == 0:
689
      op = self._ParseVersion0CreateRequest()
690
    elif data_version == 1:
691
      op = _ParseInstanceCreateRequestVersion1(self.request_body,
692
                                               self.dryRun())
693
    else:
694
      raise http.HttpBadRequest("Unsupported request data version %s" %
695
                                data_version)
696

    
697
    return baserlib.SubmitJob([op])
698

    
699

    
700
class R_2_instances_name(baserlib.R_Generic):
701
  """/2/instances/[instance_name] resources.
702

703
  """
704
  def GET(self):
705
    """Send information about an instance.
706

707
    """
708
    client = baserlib.GetClient()
709
    instance_name = self.items[0]
710

    
711
    result = baserlib.HandleItemQueryErrors(client.QueryInstances,
712
                                            names=[instance_name],
713
                                            fields=I_FIELDS,
714
                                            use_locking=self.useLocking())
715

    
716
    return baserlib.MapFields(I_FIELDS, result[0])
717

    
718
  def DELETE(self):
719
    """Delete an instance.
720

721
    """
722
    op = opcodes.OpRemoveInstance(instance_name=self.items[0],
723
                                  ignore_failures=False,
724
                                  dry_run=bool(self.dryRun()))
725
    return baserlib.SubmitJob([op])
726

    
727

    
728
class R_2_instances_name_info(baserlib.R_Generic):
729
  """/2/instances/[instance_name]/info resource.
730

731
  """
732
  def GET(self):
733
    """Request detailed instance information.
734

735
    """
736
    instance_name = self.items[0]
737
    static = bool(self._checkIntVariable("static", default=0))
738

    
739
    op = opcodes.OpQueryInstanceData(instances=[instance_name],
740
                                     static=static)
741
    return baserlib.SubmitJob([op])
742

    
743

    
744
class R_2_instances_name_reboot(baserlib.R_Generic):
745
  """/2/instances/[instance_name]/reboot resource.
746

747
  Implements an instance reboot.
748

749
  """
750
  def POST(self):
751
    """Reboot an instance.
752

753
    The URI takes type=[hard|soft|full] and
754
    ignore_secondaries=[False|True] parameters.
755

756
    """
757
    instance_name = self.items[0]
758
    reboot_type = self.queryargs.get('type',
759
                                     [constants.INSTANCE_REBOOT_HARD])[0]
760
    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
761
    op = opcodes.OpRebootInstance(instance_name=instance_name,
762
                                  reboot_type=reboot_type,
763
                                  ignore_secondaries=ignore_secondaries,
764
                                  dry_run=bool(self.dryRun()))
765

    
766
    return baserlib.SubmitJob([op])
767

    
768

    
769
class R_2_instances_name_startup(baserlib.R_Generic):
770
  """/2/instances/[instance_name]/startup resource.
771

772
  Implements an instance startup.
773

774
  """
775
  def PUT(self):
776
    """Startup an instance.
777

778
    The URI takes force=[False|True] parameter to start the instance
779
    if even if secondary disks are failing.
780

781
    """
782
    instance_name = self.items[0]
783
    force_startup = bool(self._checkIntVariable('force'))
784
    op = opcodes.OpStartupInstance(instance_name=instance_name,
785
                                   force=force_startup,
786
                                   dry_run=bool(self.dryRun()))
787

    
788
    return baserlib.SubmitJob([op])
789

    
790

    
791
class R_2_instances_name_shutdown(baserlib.R_Generic):
792
  """/2/instances/[instance_name]/shutdown resource.
793

794
  Implements an instance shutdown.
795

796
  """
797
  def PUT(self):
798
    """Shutdown an instance.
799

800
    """
801
    instance_name = self.items[0]
802
    op = opcodes.OpShutdownInstance(instance_name=instance_name,
803
                                    dry_run=bool(self.dryRun()))
804

    
805
    return baserlib.SubmitJob([op])
806

    
807

    
808
class R_2_instances_name_reinstall(baserlib.R_Generic):
809
  """/2/instances/[instance_name]/reinstall resource.
810

811
  Implements an instance reinstall.
812

813
  """
814
  def POST(self):
815
    """Reinstall an instance.
816

817
    The URI takes os=name and nostartup=[0|1] optional
818
    parameters. By default, the instance will be started
819
    automatically.
820

821
    """
822
    instance_name = self.items[0]
823
    ostype = self._checkStringVariable('os')
824
    nostartup = self._checkIntVariable('nostartup')
825
    ops = [
826
      opcodes.OpShutdownInstance(instance_name=instance_name),
827
      opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
828
      ]
829
    if not nostartup:
830
      ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
831
                                           force=False))
832
    return baserlib.SubmitJob(ops)
833

    
834

    
835
class R_2_instances_name_replace_disks(baserlib.R_Generic):
836
  """/2/instances/[instance_name]/replace-disks resource.
837

838
  """
839
  def POST(self):
840
    """Replaces disks on an instance.
841

842
    """
843
    instance_name = self.items[0]
844
    remote_node = self._checkStringVariable("remote_node", default=None)
845
    mode = self._checkStringVariable("mode", default=None)
846
    raw_disks = self._checkStringVariable("disks", default=None)
847
    iallocator = self._checkStringVariable("iallocator", default=None)
848

    
849
    if raw_disks:
850
      try:
851
        disks = [int(part) for part in raw_disks.split(",")]
852
      except ValueError, err:
853
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
854
    else:
855
      disks = []
856

    
857
    op = opcodes.OpReplaceDisks(instance_name=instance_name,
858
                                remote_node=remote_node,
859
                                mode=mode,
860
                                disks=disks,
861
                                iallocator=iallocator)
862

    
863
    return baserlib.SubmitJob([op])
864

    
865

    
866
class R_2_instances_name_activate_disks(baserlib.R_Generic):
867
  """/2/instances/[instance_name]/activate-disks resource.
868

869
  """
870
  def PUT(self):
871
    """Activate disks for an instance.
872

873
    The URI might contain ignore_size to ignore current recorded size.
874

875
    """
876
    instance_name = self.items[0]
877
    ignore_size = bool(self._checkIntVariable('ignore_size'))
878

    
879
    op = opcodes.OpActivateInstanceDisks(instance_name=instance_name,
880
                                         ignore_size=ignore_size)
881

    
882
    return baserlib.SubmitJob([op])
883

    
884

    
885
class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
886
  """/2/instances/[instance_name]/deactivate-disks resource.
887

888
  """
889
  def PUT(self):
890
    """Deactivate disks for an instance.
891

892
    """
893
    instance_name = self.items[0]
894

    
895
    op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
896

    
897
    return baserlib.SubmitJob([op])
898

    
899

    
900
class R_2_instances_name_prepare_export(baserlib.R_Generic):
901
  """/2/instances/[instance_name]/prepare-export resource.
902

903
  """
904
  def PUT(self):
905
    """Prepares an export for an instance.
906

907
    @return: a job id
908

909
    """
910
    instance_name = self.items[0]
911
    mode = self._checkStringVariable("mode")
912

    
913
    op = opcodes.OpPrepareExport(instance_name=instance_name,
914
                                 mode=mode)
915

    
916
    return baserlib.SubmitJob([op])
917

    
918

    
919
def _ParseExportInstanceRequest(name, data):
920
  """Parses a request for an instance export.
921

922
  @rtype: L{opcodes.OpExportInstance}
923
  @return: Instance export opcode
924

925
  """
926
  mode = baserlib.CheckParameter(data, "mode",
927
                                 default=constants.EXPORT_MODE_LOCAL)
928
  target_node = baserlib.CheckParameter(data, "destination")
929
  shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool)
930
  remove_instance = baserlib.CheckParameter(data, "remove_instance",
931
                                            exptype=bool, default=False)
932
  x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None)
933
  destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca",
934
                                                default=None)
935

    
936
  return opcodes.OpExportInstance(instance_name=name,
937
                                  mode=mode,
938
                                  target_node=target_node,
939
                                  shutdown=shutdown,
940
                                  remove_instance=remove_instance,
941
                                  x509_key_name=x509_key_name,
942
                                  destination_x509_ca=destination_x509_ca)
943

    
944

    
945
class R_2_instances_name_export(baserlib.R_Generic):
946
  """/2/instances/[instance_name]/export resource.
947

948
  """
949
  def PUT(self):
950
    """Exports an instance.
951

952
    @return: a job id
953

954
    """
955
    if not isinstance(self.request_body, dict):
956
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
957

    
958
    op = _ParseExportInstanceRequest(self.items[0], self.request_body)
959

    
960
    return baserlib.SubmitJob([op])
961

    
962

    
963
class _R_Tags(baserlib.R_Generic):
964
  """ Quasiclass for tagging resources
965

966
  Manages tags. When inheriting this class you must define the
967
  TAG_LEVEL for it.
968

969
  """
970
  TAG_LEVEL = None
971

    
972
  def __init__(self, items, queryargs, req):
973
    """A tag resource constructor.
974

975
    We have to override the default to sort out cluster naming case.
976

977
    """
978
    baserlib.R_Generic.__init__(self, items, queryargs, req)
979

    
980
    if self.TAG_LEVEL != constants.TAG_CLUSTER:
981
      self.name = items[0]
982
    else:
983
      self.name = ""
984

    
985
  def GET(self):
986
    """Returns a list of tags.
987

988
    Example: ["tag1", "tag2", "tag3"]
989

990
    """
991
    # pylint: disable-msg=W0212
992
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
993

    
994
  def PUT(self):
995
    """Add a set of tags.
996

997
    The request as a list of strings should be PUT to this URI. And
998
    you'll have back a job id.
999

1000
    """
1001
    # pylint: disable-msg=W0212
1002
    if 'tag' not in self.queryargs:
1003
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
1004
                                " the 'tag' parameter")
1005
    return baserlib._Tags_PUT(self.TAG_LEVEL,
1006
                              self.queryargs['tag'], name=self.name,
1007
                              dry_run=bool(self.dryRun()))
1008

    
1009
  def DELETE(self):
1010
    """Delete a tag.
1011

1012
    In order to delete a set of tags, the DELETE
1013
    request should be addressed to URI like:
1014
    /tags?tag=[tag]&tag=[tag]
1015

1016
    """
1017
    # pylint: disable-msg=W0212
1018
    if 'tag' not in self.queryargs:
1019
      # no we not gonna delete all tags
1020
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
1021
                                " tag(s) using the 'tag' parameter")
1022
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
1023
                                 self.queryargs['tag'],
1024
                                 name=self.name,
1025
                                 dry_run=bool(self.dryRun()))
1026

    
1027

    
1028
class R_2_instances_name_tags(_R_Tags):
1029
  """ /2/instances/[instance_name]/tags resource.
1030

1031
  Manages per-instance tags.
1032

1033
  """
1034
  TAG_LEVEL = constants.TAG_INSTANCE
1035

    
1036

    
1037
class R_2_nodes_name_tags(_R_Tags):
1038
  """ /2/nodes/[node_name]/tags resource.
1039

1040
  Manages per-node tags.
1041

1042
  """
1043
  TAG_LEVEL = constants.TAG_NODE
1044

    
1045

    
1046
class R_2_tags(_R_Tags):
1047
  """ /2/instances/tags resource.
1048

1049
  Manages cluster tags.
1050

1051
  """
1052
  TAG_LEVEL = constants.TAG_CLUSTER