Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ 7be048f0

History | View | Annotate | Download (27.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
  PUT or POST?
25
  ============
26

27
  According to RFC2616 the main difference between PUT and POST is that
28
  POST can create new resources but PUT can only create the resource the
29
  URI was pointing to on the PUT request.
30

31
  To be in context of this module for instance creation POST on
32
  /2/instances is legitim while PUT would be not, due to it does create a
33
  new entity and not just replace /2/instances with it.
34

35
  So when adding new methods, if they are operating on the URI entity itself,
36
  PUT should be prefered over POST.
37

38
"""
39

    
40
# pylint: disable-msg=C0103
41

    
42
# C0103: Invalid name, since the R_* names are not conforming
43

    
44
from ganeti import opcodes
45
from ganeti import http
46
from ganeti import constants
47
from ganeti import cli
48
from ganeti import utils
49
from ganeti import rapi
50
from ganeti.rapi import baserlib
51

    
52

    
53
_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54
I_FIELDS = ["name", "admin_state", "os",
55
            "pnode", "snodes",
56
            "disk_template",
57
            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58
            "network_port",
59
            "disk.sizes", "disk_usage",
60
            "beparams", "hvparams",
61
            "oper_state", "oper_ram", "status",
62
            ] + _COMMON_FIELDS
63

    
64
N_FIELDS = ["name", "offline", "master_candidate", "drained",
65
            "dtotal", "dfree",
66
            "mtotal", "mnode", "mfree",
67
            "pinst_cnt", "sinst_cnt",
68
            "ctotal", "cnodes", "csockets",
69
            "pip", "sip", "role",
70
            "pinst_list", "sinst_list",
71
            ] + _COMMON_FIELDS
72

    
73
_NR_DRAINED = "drained"
74
_NR_MASTER_CANDIATE = "master-candidate"
75
_NR_MASTER = "master"
76
_NR_OFFLINE = "offline"
77
_NR_REGULAR = "regular"
78

    
79
_NR_MAP = {
80
  "M": _NR_MASTER,
81
  "C": _NR_MASTER_CANDIATE,
82
  "D": _NR_DRAINED,
83
  "O": _NR_OFFLINE,
84
  "R": _NR_REGULAR,
85
  }
86

    
87
# Request data version field
88
_REQ_DATA_VERSION = "__version__"
89

    
90
# Feature string for instance creation request data version 1
91
_INST_CREATE_REQV1 = "instance-create-reqv1"
92

    
93
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
94
_WFJC_TIMEOUT = 10
95

    
96

    
97
class R_version(baserlib.R_Generic):
98
  """/version resource.
99

100
  This resource should be used to determine the remote API version and
101
  to adapt clients accordingly.
102

103
  """
104
  @staticmethod
105
  def GET():
106
    """Returns the remote API version.
107

108
    """
109
    return constants.RAPI_VERSION
110

    
111

    
112
class R_2_info(baserlib.R_Generic):
113
  """Cluster info.
114

115
  """
116
  @staticmethod
117
  def GET():
118
    """Returns cluster information.
119

120
    """
121
    client = baserlib.GetClient()
122
    return client.QueryClusterInfo()
123

    
124

    
125
class R_2_features(baserlib.R_Generic):
126
  """/2/features resource.
127

128
  """
129
  @staticmethod
130
  def GET():
131
    """Returns list of optional RAPI features implemented.
132

133
    """
134
    return [_INST_CREATE_REQV1]
135

    
136

    
137
class R_2_os(baserlib.R_Generic):
138
  """/2/os resource.
139

140
  """
141
  @staticmethod
142
  def GET():
143
    """Return a list of all OSes.
144

145
    Can return error 500 in case of a problem.
146

147
    Example: ["debian-etch"]
148

149
    """
150
    cl = baserlib.GetClient()
151
    op = opcodes.OpDiagnoseOS(output_fields=["name", "valid", "variants"],
152
                              names=[])
153
    job_id = baserlib.SubmitJob([op], cl)
154
    # we use custom feedback function, instead of print we log the status
155
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
156
    diagnose_data = result[0]
157

    
158
    if not isinstance(diagnose_data, list):
159
      raise http.HttpBadGateway(message="Can't get OS list")
160

    
161
    os_names = []
162
    for (name, valid, variants) in diagnose_data:
163
      if valid:
164
        os_names.extend(cli.CalculateOSNames(name, variants))
165

    
166
    return os_names
167

    
168

    
169
class R_2_redist_config(baserlib.R_Generic):
170
  """/2/redistribute-config resource.
171

172
  """
173
  @staticmethod
174
  def PUT():
175
    """Redistribute configuration to all nodes.
176

177
    """
178
    return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
179

    
180

    
181
class R_2_jobs(baserlib.R_Generic):
182
  """/2/jobs resource.
183

184
  """
185
  @staticmethod
186
  def GET():
187
    """Returns a dictionary of jobs.
188

189
    @return: a dictionary with jobs id and uri.
190

191
    """
192
    fields = ["id"]
193
    cl = baserlib.GetClient()
194
    # Convert the list of lists to the list of ids
195
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
196
    return baserlib.BuildUriList(result, "/2/jobs/%s",
197
                                 uri_fields=("id", "uri"))
198

    
199

    
200
class R_2_jobs_id(baserlib.R_Generic):
201
  """/2/jobs/[job_id] resource.
202

203
  """
204
  def GET(self):
205
    """Returns a job status.
206

207
    @return: a dictionary with job parameters.
208
        The result includes:
209
            - id: job ID as a number
210
            - status: current job status as a string
211
            - ops: involved OpCodes as a list of dictionaries for each
212
              opcodes in the job
213
            - opstatus: OpCodes status as a list
214
            - opresult: OpCodes results as a list of lists
215

216
    """
217
    fields = ["id", "ops", "status", "summary",
218
              "opstatus", "opresult", "oplog",
219
              "received_ts", "start_ts", "end_ts",
220
              ]
221
    job_id = self.items[0]
222
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
223
    if result is None:
224
      raise http.HttpNotFound()
225
    return baserlib.MapFields(fields, result)
226

    
227
  def DELETE(self):
228
    """Cancel not-yet-started job.
229

230
    """
231
    job_id = self.items[0]
232
    result = baserlib.GetClient().CancelJob(job_id)
233
    return result
234

    
235

    
236
class R_2_jobs_id_wait(baserlib.R_Generic):
237
  """/2/jobs/[job_id]/wait resource.
238

239
  """
240
  # WaitForJobChange provides access to sensitive information and blocks
241
  # machine resources (it's a blocking RAPI call), hence restricting access.
242
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
243

    
244
  def GET(self):
245
    """Waits for job changes.
246

247
    """
248
    job_id = self.items[0]
249

    
250
    fields = self.getBodyParameter("fields")
251
    prev_job_info = self.getBodyParameter("previous_job_info", None)
252
    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
253

    
254
    if not isinstance(fields, list):
255
      raise http.HttpBadRequest("The 'fields' parameter should be a list")
256

    
257
    if not (prev_job_info is None or isinstance(prev_job_info, list)):
258
      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
259
                                " be a list")
260

    
261
    if not (prev_log_serial is None or
262
            isinstance(prev_log_serial, (int, long))):
263
      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
264
                                " be a number")
265

    
266
    client = baserlib.GetClient()
267
    result = client.WaitForJobChangeOnce(job_id, fields,
268
                                         prev_job_info, prev_log_serial,
269
                                         timeout=_WFJC_TIMEOUT)
270
    if not result:
271
      raise http.HttpNotFound()
272

    
273
    if result == constants.JOB_NOTCHANGED:
274
      # No changes
275
      return None
276

    
277
    (job_info, log_entries) = result
278

    
279
    return {
280
      "job_info": job_info,
281
      "log_entries": log_entries,
282
      }
283

    
284

    
285
class R_2_nodes(baserlib.R_Generic):
286
  """/2/nodes resource.
287

288
  """
289
  def GET(self):
290
    """Returns a list of all nodes.
291

292
    """
293
    client = baserlib.GetClient()
294

    
295
    if self.useBulk():
296
      bulkdata = client.QueryNodes([], N_FIELDS, False)
297
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
298
    else:
299
      nodesdata = client.QueryNodes([], ["name"], False)
300
      nodeslist = [row[0] for row in nodesdata]
301
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
302
                                   uri_fields=("id", "uri"))
303

    
304

    
305
class R_2_nodes_name(baserlib.R_Generic):
306
  """/2/nodes/[node_name] resources.
307

308
  """
309
  def GET(self):
310
    """Send information about a node.
311

312
    """
313
    node_name = self.items[0]
314
    client = baserlib.GetClient()
315

    
316
    result = baserlib.HandleItemQueryErrors(client.QueryNodes,
317
                                            names=[node_name], fields=N_FIELDS,
318
                                            use_locking=self.useLocking())
319

    
320
    return baserlib.MapFields(N_FIELDS, result[0])
321

    
322

    
323
class R_2_nodes_name_role(baserlib.R_Generic):
324
  """ /2/nodes/[node_name]/role resource.
325

326
  """
327
  def GET(self):
328
    """Returns the current node role.
329

330
    @return: Node role
331

332
    """
333
    node_name = self.items[0]
334
    client = baserlib.GetClient()
335
    result = client.QueryNodes(names=[node_name], fields=["role"],
336
                               use_locking=self.useLocking())
337

    
338
    return _NR_MAP[result[0][0]]
339

    
340
  def PUT(self):
341
    """Sets the node role.
342

343
    @return: a job id
344

345
    """
346
    if not isinstance(self.req.request_body, basestring):
347
      raise http.HttpBadRequest("Invalid body contents, not a string")
348

    
349
    node_name = self.items[0]
350
    role = self.req.request_body
351

    
352
    if role == _NR_REGULAR:
353
      candidate = False
354
      offline = False
355
      drained = False
356

    
357
    elif role == _NR_MASTER_CANDIATE:
358
      candidate = True
359
      offline = drained = None
360

    
361
    elif role == _NR_DRAINED:
362
      drained = True
363
      candidate = offline = None
364

    
365
    elif role == _NR_OFFLINE:
366
      offline = True
367
      candidate = drained = None
368

    
369
    else:
370
      raise http.HttpBadRequest("Can't set '%s' role" % role)
371

    
372
    op = opcodes.OpSetNodeParams(node_name=node_name,
373
                                 master_candidate=candidate,
374
                                 offline=offline,
375
                                 drained=drained,
376
                                 force=bool(self.useForce()))
377

    
378
    return baserlib.SubmitJob([op])
379

    
380

    
381
class R_2_nodes_name_evacuate(baserlib.R_Generic):
382
  """/2/nodes/[node_name]/evacuate resource.
383

384
  """
385
  def POST(self):
386
    """Evacuate all secondary instances off a node.
387

388
    """
389
    node_name = self.items[0]
390
    remote_node = self._checkStringVariable("remote_node", default=None)
391
    iallocator = self._checkStringVariable("iallocator", default=None)
392

    
393
    op = opcodes.OpEvacuateNode(node_name=node_name,
394
                                remote_node=remote_node,
395
                                iallocator=iallocator)
396

    
397
    return baserlib.SubmitJob([op])
398

    
399

    
400
class R_2_nodes_name_migrate(baserlib.R_Generic):
401
  """/2/nodes/[node_name]/migrate resource.
402

403
  """
404
  def POST(self):
405
    """Migrate all primary instances from a node.
406

407
    """
408
    node_name = self.items[0]
409
    live = bool(self._checkIntVariable("live", default=1))
410

    
411
    op = opcodes.OpMigrateNode(node_name=node_name, live=live)
412

    
413
    return baserlib.SubmitJob([op])
414

    
415

    
416
class R_2_nodes_name_storage(baserlib.R_Generic):
417
  """/2/nodes/[node_name]/storage ressource.
418

419
  """
420
  # LUQueryNodeStorage acquires locks, hence restricting access to GET
421
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
422

    
423
  def GET(self):
424
    node_name = self.items[0]
425

    
426
    storage_type = self._checkStringVariable("storage_type", None)
427
    if not storage_type:
428
      raise http.HttpBadRequest("Missing the required 'storage_type'"
429
                                " parameter")
430

    
431
    output_fields = self._checkStringVariable("output_fields", None)
432
    if not output_fields:
433
      raise http.HttpBadRequest("Missing the required 'output_fields'"
434
                                " parameter")
435

    
436
    op = opcodes.OpQueryNodeStorage(nodes=[node_name],
437
                                    storage_type=storage_type,
438
                                    output_fields=output_fields.split(","))
439
    return baserlib.SubmitJob([op])
440

    
441

    
442
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
443
  """/2/nodes/[node_name]/storage/modify ressource.
444

445
  """
446
  def PUT(self):
447
    node_name = self.items[0]
448

    
449
    storage_type = self._checkStringVariable("storage_type", None)
450
    if not storage_type:
451
      raise http.HttpBadRequest("Missing the required 'storage_type'"
452
                                " parameter")
453

    
454
    name = self._checkStringVariable("name", None)
455
    if not name:
456
      raise http.HttpBadRequest("Missing the required 'name'"
457
                                " parameter")
458

    
459
    changes = {}
460

    
461
    if "allocatable" in self.queryargs:
462
      changes[constants.SF_ALLOCATABLE] = \
463
        bool(self._checkIntVariable("allocatable", default=1))
464

    
465
    op = opcodes.OpModifyNodeStorage(node_name=node_name,
466
                                     storage_type=storage_type,
467
                                     name=name,
468
                                     changes=changes)
469
    return baserlib.SubmitJob([op])
470

    
471

    
472
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
473
  """/2/nodes/[node_name]/storage/repair ressource.
474

475
  """
476
  def PUT(self):
477
    node_name = self.items[0]
478

    
479
    storage_type = self._checkStringVariable("storage_type", None)
480
    if not storage_type:
481
      raise http.HttpBadRequest("Missing the required 'storage_type'"
482
                                " parameter")
483

    
484
    name = self._checkStringVariable("name", None)
485
    if not name:
486
      raise http.HttpBadRequest("Missing the required 'name'"
487
                                " parameter")
488

    
489
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
490
                                     storage_type=storage_type,
491
                                     name=name)
492
    return baserlib.SubmitJob([op])
493

    
494

    
495
def _ParseInstanceCreateRequestVersion1(data, dry_run):
496
  """Parses an instance creation request version 1.
497

498
  @rtype: L{opcodes.OpCreateInstance}
499
  @return: Instance creation opcode
500

501
  """
502
  # Disks
503
  disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
504

    
505
  disks = []
506
  for idx, i in enumerate(disks_input):
507
    baserlib.CheckType(i, dict, "Disk %d specification" % idx)
508

    
509
    # Size is mandatory
510
    try:
511
      size = i[constants.IDISK_SIZE]
512
    except KeyError:
513
      raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
514
                                " size" % idx)
515

    
516
    disk = {
517
      constants.IDISK_SIZE: size,
518
      }
519

    
520
    # Optional disk access mode
521
    try:
522
      disk_access = i[constants.IDISK_MODE]
523
    except KeyError:
524
      pass
525
    else:
526
      disk[constants.IDISK_MODE] = disk_access
527

    
528
    disks.append(disk)
529

    
530
  assert len(disks_input) == len(disks)
531

    
532
  # Network interfaces
533
  nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
534

    
535
  nics = []
536
  for idx, i in enumerate(nics_input):
537
    baserlib.CheckType(i, dict, "NIC %d specification" % idx)
538

    
539
    nic = {}
540

    
541
    for field in constants.INIC_PARAMS:
542
      try:
543
        value = i[field]
544
      except KeyError:
545
        continue
546

    
547
      nic[field] = value
548

    
549
    nics.append(nic)
550

    
551
  assert len(nics_input) == len(nics)
552

    
553
  # HV/BE parameters
554
  hvparams = baserlib.CheckParameter(data, "hvparams", default={})
555
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
556

    
557
  beparams = baserlib.CheckParameter(data, "beparams", default={})
558
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
559

    
560
  return opcodes.OpCreateInstance(
561
    mode=baserlib.CheckParameter(data, "mode"),
562
    instance_name=baserlib.CheckParameter(data, "name"),
563
    os_type=baserlib.CheckParameter(data, "os", default=None),
564
    force_variant=baserlib.CheckParameter(data, "force_variant",
565
                                          default=False),
566
    pnode=baserlib.CheckParameter(data, "pnode", default=None),
567
    snode=baserlib.CheckParameter(data, "snode", default=None),
568
    disk_template=baserlib.CheckParameter(data, "disk_template"),
569
    disks=disks,
570
    nics=nics,
571
    src_node=baserlib.CheckParameter(data, "src_node", default=None),
572
    src_path=baserlib.CheckParameter(data, "src_path", default=None),
573
    start=baserlib.CheckParameter(data, "start", default=True),
574
    wait_for_sync=True,
575
    ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
576
    name_check=baserlib.CheckParameter(data, "name_check", default=True),
577
    file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
578
                                             default=None),
579
    file_driver=baserlib.CheckParameter(data, "file_driver",
580
                                        default=constants.FD_LOOP),
581
    iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
582
    hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
583
    hvparams=hvparams,
584
    beparams=beparams,
585
    dry_run=dry_run,
586
    )
587

    
588

    
589
class R_2_instances(baserlib.R_Generic):
590
  """/2/instances resource.
591

592
  """
593
  def GET(self):
594
    """Returns a list of all available instances.
595

596
    """
597
    client = baserlib.GetClient()
598

    
599
    use_locking = self.useLocking()
600
    if self.useBulk():
601
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
602
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
603
    else:
604
      instancesdata = client.QueryInstances([], ["name"], use_locking)
605
      instanceslist = [row[0] for row in instancesdata]
606
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
607
                                   uri_fields=("id", "uri"))
608

    
609
  def _ParseVersion0CreateRequest(self):
610
    """Parses an instance creation request version 0.
611

612
    Request data version 0 is deprecated and should not be used anymore.
613

614
    @rtype: L{opcodes.OpCreateInstance}
615
    @return: Instance creation opcode
616

617
    """
618
    # Do not modify anymore, request data version 0 is deprecated
619
    beparams = baserlib.MakeParamsDict(self.req.request_body,
620
                                       constants.BES_PARAMETERS)
621
    hvparams = baserlib.MakeParamsDict(self.req.request_body,
622
                                       constants.HVS_PARAMETERS)
623
    fn = self.getBodyParameter
624

    
625
    # disk processing
626
    disk_data = fn('disks')
627
    if not isinstance(disk_data, list):
628
      raise http.HttpBadRequest("The 'disks' parameter should be a list")
629
    disks = []
630
    for idx, d in enumerate(disk_data):
631
      if not isinstance(d, int):
632
        raise http.HttpBadRequest("Disk %d specification wrong: should"
633
                                  " be an integer" % idx)
634
      disks.append({"size": d})
635

    
636
    # nic processing (one nic only)
637
    nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
638
    if fn("ip", None) is not None:
639
      nics[0]["ip"] = fn("ip")
640
    if fn("mode", None) is not None:
641
      nics[0]["mode"] = fn("mode")
642
    if fn("link", None) is not None:
643
      nics[0]["link"] = fn("link")
644
    if fn("bridge", None) is not None:
645
      nics[0]["bridge"] = fn("bridge")
646

    
647
    # Do not modify anymore, request data version 0 is deprecated
648
    return opcodes.OpCreateInstance(
649
      mode=constants.INSTANCE_CREATE,
650
      instance_name=fn('name'),
651
      disks=disks,
652
      disk_template=fn('disk_template'),
653
      os_type=fn('os'),
654
      pnode=fn('pnode', None),
655
      snode=fn('snode', None),
656
      iallocator=fn('iallocator', None),
657
      nics=nics,
658
      start=fn('start', True),
659
      ip_check=fn('ip_check', True),
660
      name_check=fn('name_check', True),
661
      wait_for_sync=True,
662
      hypervisor=fn('hypervisor', None),
663
      hvparams=hvparams,
664
      beparams=beparams,
665
      file_storage_dir=fn('file_storage_dir', None),
666
      file_driver=fn('file_driver', constants.FD_LOOP),
667
      dry_run=bool(self.dryRun()),
668
      )
669

    
670
  def POST(self):
671
    """Create an instance.
672

673
    @return: a job id
674

675
    """
676
    if not isinstance(self.req.request_body, dict):
677
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
678

    
679
    # Default to request data version 0
680
    data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
681

    
682
    if data_version == 0:
683
      op = self._ParseVersion0CreateRequest()
684
    elif data_version == 1:
685
      op = _ParseInstanceCreateRequestVersion1(self.req.request_body,
686
                                               self.dryRun())
687
    else:
688
      raise http.HttpBadRequest("Unsupported request data version %s" %
689
                                data_version)
690

    
691
    return baserlib.SubmitJob([op])
692

    
693

    
694
class R_2_instances_name(baserlib.R_Generic):
695
  """/2/instances/[instance_name] resources.
696

697
  """
698
  def GET(self):
699
    """Send information about an instance.
700

701
    """
702
    client = baserlib.GetClient()
703
    instance_name = self.items[0]
704

    
705
    result = baserlib.HandleItemQueryErrors(client.QueryInstances,
706
                                            names=[instance_name],
707
                                            fields=I_FIELDS,
708
                                            use_locking=self.useLocking())
709

    
710
    return baserlib.MapFields(I_FIELDS, result[0])
711

    
712
  def DELETE(self):
713
    """Delete an instance.
714

715
    """
716
    op = opcodes.OpRemoveInstance(instance_name=self.items[0],
717
                                  ignore_failures=False,
718
                                  dry_run=bool(self.dryRun()))
719
    return baserlib.SubmitJob([op])
720

    
721

    
722
class R_2_instances_name_info(baserlib.R_Generic):
723
  """/2/instances/[instance_name]/info resource.
724

725
  """
726
  def GET(self):
727
    """Request detailed instance information.
728

729
    """
730
    instance_name = self.items[0]
731
    static = bool(self._checkIntVariable("static", default=0))
732

    
733
    op = opcodes.OpQueryInstanceData(instances=[instance_name],
734
                                     static=static)
735
    return baserlib.SubmitJob([op])
736

    
737

    
738
class R_2_instances_name_reboot(baserlib.R_Generic):
739
  """/2/instances/[instance_name]/reboot resource.
740

741
  Implements an instance reboot.
742

743
  """
744
  def POST(self):
745
    """Reboot an instance.
746

747
    The URI takes type=[hard|soft|full] and
748
    ignore_secondaries=[False|True] parameters.
749

750
    """
751
    instance_name = self.items[0]
752
    reboot_type = self.queryargs.get('type',
753
                                     [constants.INSTANCE_REBOOT_HARD])[0]
754
    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
755
    op = opcodes.OpRebootInstance(instance_name=instance_name,
756
                                  reboot_type=reboot_type,
757
                                  ignore_secondaries=ignore_secondaries,
758
                                  dry_run=bool(self.dryRun()))
759

    
760
    return baserlib.SubmitJob([op])
761

    
762

    
763
class R_2_instances_name_startup(baserlib.R_Generic):
764
  """/2/instances/[instance_name]/startup resource.
765

766
  Implements an instance startup.
767

768
  """
769
  def PUT(self):
770
    """Startup an instance.
771

772
    The URI takes force=[False|True] parameter to start the instance
773
    if even if secondary disks are failing.
774

775
    """
776
    instance_name = self.items[0]
777
    force_startup = bool(self._checkIntVariable('force'))
778
    op = opcodes.OpStartupInstance(instance_name=instance_name,
779
                                   force=force_startup,
780
                                   dry_run=bool(self.dryRun()))
781

    
782
    return baserlib.SubmitJob([op])
783

    
784

    
785
class R_2_instances_name_shutdown(baserlib.R_Generic):
786
  """/2/instances/[instance_name]/shutdown resource.
787

788
  Implements an instance shutdown.
789

790
  """
791
  def PUT(self):
792
    """Shutdown an instance.
793

794
    """
795
    instance_name = self.items[0]
796
    op = opcodes.OpShutdownInstance(instance_name=instance_name,
797
                                    dry_run=bool(self.dryRun()))
798

    
799
    return baserlib.SubmitJob([op])
800

    
801

    
802
class R_2_instances_name_reinstall(baserlib.R_Generic):
803
  """/2/instances/[instance_name]/reinstall resource.
804

805
  Implements an instance reinstall.
806

807
  """
808
  def POST(self):
809
    """Reinstall an instance.
810

811
    The URI takes os=name and nostartup=[0|1] optional
812
    parameters. By default, the instance will be started
813
    automatically.
814

815
    """
816
    instance_name = self.items[0]
817
    ostype = self._checkStringVariable('os')
818
    nostartup = self._checkIntVariable('nostartup')
819
    ops = [
820
      opcodes.OpShutdownInstance(instance_name=instance_name),
821
      opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
822
      ]
823
    if not nostartup:
824
      ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
825
                                           force=False))
826
    return baserlib.SubmitJob(ops)
827

    
828

    
829
class R_2_instances_name_replace_disks(baserlib.R_Generic):
830
  """/2/instances/[instance_name]/replace-disks resource.
831

832
  """
833
  def POST(self):
834
    """Replaces disks on an instance.
835

836
    """
837
    instance_name = self.items[0]
838
    remote_node = self._checkStringVariable("remote_node", default=None)
839
    mode = self._checkStringVariable("mode", default=None)
840
    raw_disks = self._checkStringVariable("disks", default=None)
841
    iallocator = self._checkStringVariable("iallocator", default=None)
842

    
843
    if raw_disks:
844
      try:
845
        disks = [int(part) for part in raw_disks.split(",")]
846
      except ValueError, err:
847
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
848
    else:
849
      disks = []
850

    
851
    op = opcodes.OpReplaceDisks(instance_name=instance_name,
852
                                remote_node=remote_node,
853
                                mode=mode,
854
                                disks=disks,
855
                                iallocator=iallocator)
856

    
857
    return baserlib.SubmitJob([op])
858

    
859

    
860
class R_2_instances_name_activate_disks(baserlib.R_Generic):
861
  """/2/instances/[instance_name]/activate-disks resource.
862

863
  """
864
  def PUT(self):
865
    """Activate disks for an instance.
866

867
    The URI might contain ignore_size to ignore current recorded size.
868

869
    """
870
    instance_name = self.items[0]
871
    ignore_size = bool(self._checkIntVariable('ignore_size'))
872

    
873
    op = opcodes.OpActivateInstanceDisks(instance_name=instance_name,
874
                                         ignore_size=ignore_size)
875

    
876
    return baserlib.SubmitJob([op])
877

    
878

    
879
class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
880
  """/2/instances/[instance_name]/deactivate-disks resource.
881

882
  """
883
  def PUT(self):
884
    """Deactivate disks for an instance.
885

886
    """
887
    instance_name = self.items[0]
888

    
889
    op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
890

    
891
    return baserlib.SubmitJob([op])
892

    
893

    
894
class _R_Tags(baserlib.R_Generic):
895
  """ Quasiclass for tagging resources
896

897
  Manages tags. When inheriting this class you must define the
898
  TAG_LEVEL for it.
899

900
  """
901
  TAG_LEVEL = None
902

    
903
  def __init__(self, items, queryargs, req):
904
    """A tag resource constructor.
905

906
    We have to override the default to sort out cluster naming case.
907

908
    """
909
    baserlib.R_Generic.__init__(self, items, queryargs, req)
910

    
911
    if self.TAG_LEVEL != constants.TAG_CLUSTER:
912
      self.name = items[0]
913
    else:
914
      self.name = ""
915

    
916
  def GET(self):
917
    """Returns a list of tags.
918

919
    Example: ["tag1", "tag2", "tag3"]
920

921
    """
922
    # pylint: disable-msg=W0212
923
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
924

    
925
  def PUT(self):
926
    """Add a set of tags.
927

928
    The request as a list of strings should be PUT to this URI. And
929
    you'll have back a job id.
930

931
    """
932
    # pylint: disable-msg=W0212
933
    if 'tag' not in self.queryargs:
934
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
935
                                " the 'tag' parameter")
936
    return baserlib._Tags_PUT(self.TAG_LEVEL,
937
                              self.queryargs['tag'], name=self.name,
938
                              dry_run=bool(self.dryRun()))
939

    
940
  def DELETE(self):
941
    """Delete a tag.
942

943
    In order to delete a set of tags, the DELETE
944
    request should be addressed to URI like:
945
    /tags?tag=[tag]&tag=[tag]
946

947
    """
948
    # pylint: disable-msg=W0212
949
    if 'tag' not in self.queryargs:
950
      # no we not gonna delete all tags
951
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
952
                                " tag(s) using the 'tag' parameter")
953
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
954
                                 self.queryargs['tag'],
955
                                 name=self.name,
956
                                 dry_run=bool(self.dryRun()))
957

    
958

    
959
class R_2_instances_name_tags(_R_Tags):
960
  """ /2/instances/[instance_name]/tags resource.
961

962
  Manages per-instance tags.
963

964
  """
965
  TAG_LEVEL = constants.TAG_INSTANCE
966

    
967

    
968
class R_2_nodes_name_tags(_R_Tags):
969
  """ /2/nodes/[node_name]/tags resource.
970

971
  Manages per-node tags.
972

973
  """
974
  TAG_LEVEL = constants.TAG_NODE
975

    
976

    
977
class R_2_tags(_R_Tags):
978
  """ /2/instances/tags resource.
979

980
  Manages cluster tags.
981

982
  """
983
  TAG_LEVEL = constants.TAG_CLUSTER