Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ 4ea3de4e

History | View | Annotate | Download (30.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
  PUT or POST?
25
  ============
26

27
  According to RFC2616 the main difference between PUT and POST is that
28
  POST can create new resources but PUT can only create the resource the
29
  URI was pointing to on the PUT request.
30

31
  To be in context of this module for instance creation POST on
32
  /2/instances is legitim while PUT would be not, due to it does create a
33
  new entity and not just replace /2/instances with it.
34

35
  So when adding new methods, if they are operating on the URI entity itself,
36
  PUT should be prefered over POST.
37

38
"""
39

    
40
# pylint: disable-msg=C0103
41

    
42
# C0103: Invalid name, since the R_* names are not conforming
43

    
44
from ganeti import opcodes
45
from ganeti import http
46
from ganeti import constants
47
from ganeti import cli
48
from ganeti import utils
49
from ganeti import rapi
50
from ganeti.rapi import baserlib
51

    
52

    
53
_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54
I_FIELDS = ["name", "admin_state", "os",
55
            "pnode", "snodes",
56
            "disk_template",
57
            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58
            "network_port",
59
            "disk.sizes", "disk_usage",
60
            "beparams", "hvparams",
61
            "oper_state", "oper_ram", "oper_vcpus", "status",
62
            ] + _COMMON_FIELDS
63

    
64
N_FIELDS = ["name", "offline", "master_candidate", "drained",
65
            "dtotal", "dfree",
66
            "mtotal", "mnode", "mfree",
67
            "pinst_cnt", "sinst_cnt",
68
            "ctotal", "cnodes", "csockets",
69
            "pip", "sip", "role",
70
            "pinst_list", "sinst_list",
71
            ] + _COMMON_FIELDS
72

    
73
_NR_DRAINED = "drained"
74
_NR_MASTER_CANDIATE = "master-candidate"
75
_NR_MASTER = "master"
76
_NR_OFFLINE = "offline"
77
_NR_REGULAR = "regular"
78

    
79
_NR_MAP = {
80
  "M": _NR_MASTER,
81
  "C": _NR_MASTER_CANDIATE,
82
  "D": _NR_DRAINED,
83
  "O": _NR_OFFLINE,
84
  "R": _NR_REGULAR,
85
  }
86

    
87
# Request data version field
88
_REQ_DATA_VERSION = "__version__"
89

    
90
# Feature string for instance creation request data version 1
91
_INST_CREATE_REQV1 = "instance-create-reqv1"
92

    
93
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
94
_WFJC_TIMEOUT = 10
95

    
96

    
97
class R_version(baserlib.R_Generic):
98
  """/version resource.
99

100
  This resource should be used to determine the remote API version and
101
  to adapt clients accordingly.
102

103
  """
104
  @staticmethod
105
  def GET():
106
    """Returns the remote API version.
107

108
    """
109
    return constants.RAPI_VERSION
110

    
111

    
112
class R_2_info(baserlib.R_Generic):
113
  """Cluster info.
114

115
  """
116
  @staticmethod
117
  def GET():
118
    """Returns cluster information.
119

120
    """
121
    client = baserlib.GetClient()
122
    return client.QueryClusterInfo()
123

    
124

    
125
class R_2_features(baserlib.R_Generic):
126
  """/2/features resource.
127

128
  """
129
  @staticmethod
130
  def GET():
131
    """Returns list of optional RAPI features implemented.
132

133
    """
134
    return [_INST_CREATE_REQV1]
135

    
136

    
137
class R_2_os(baserlib.R_Generic):
138
  """/2/os resource.
139

140
  """
141
  @staticmethod
142
  def GET():
143
    """Return a list of all OSes.
144

145
    Can return error 500 in case of a problem.
146

147
    Example: ["debian-etch"]
148

149
    """
150
    cl = baserlib.GetClient()
151
    op = opcodes.OpDiagnoseOS(output_fields=["name", "valid", "variants"],
152
                              names=[])
153
    job_id = baserlib.SubmitJob([op], cl)
154
    # we use custom feedback function, instead of print we log the status
155
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
156
    diagnose_data = result[0]
157

    
158
    if not isinstance(diagnose_data, list):
159
      raise http.HttpBadGateway(message="Can't get OS list")
160

    
161
    os_names = []
162
    for (name, valid, variants) in diagnose_data:
163
      if valid:
164
        os_names.extend(cli.CalculateOSNames(name, variants))
165

    
166
    return os_names
167

    
168

    
169
class R_2_redist_config(baserlib.R_Generic):
170
  """/2/redistribute-config resource.
171

172
  """
173
  @staticmethod
174
  def PUT():
175
    """Redistribute configuration to all nodes.
176

177
    """
178
    return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
179

    
180

    
181
class R_2_jobs(baserlib.R_Generic):
182
  """/2/jobs resource.
183

184
  """
185
  @staticmethod
186
  def GET():
187
    """Returns a dictionary of jobs.
188

189
    @return: a dictionary with jobs id and uri.
190

191
    """
192
    fields = ["id"]
193
    cl = baserlib.GetClient()
194
    # Convert the list of lists to the list of ids
195
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
196
    return baserlib.BuildUriList(result, "/2/jobs/%s",
197
                                 uri_fields=("id", "uri"))
198

    
199

    
200
class R_2_jobs_id(baserlib.R_Generic):
201
  """/2/jobs/[job_id] resource.
202

203
  """
204
  def GET(self):
205
    """Returns a job status.
206

207
    @return: a dictionary with job parameters.
208
        The result includes:
209
            - id: job ID as a number
210
            - status: current job status as a string
211
            - ops: involved OpCodes as a list of dictionaries for each
212
              opcodes in the job
213
            - opstatus: OpCodes status as a list
214
            - opresult: OpCodes results as a list of lists
215

216
    """
217
    fields = ["id", "ops", "status", "summary",
218
              "opstatus", "opresult", "oplog",
219
              "received_ts", "start_ts", "end_ts",
220
              ]
221
    job_id = self.items[0]
222
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
223
    if result is None:
224
      raise http.HttpNotFound()
225
    return baserlib.MapFields(fields, result)
226

    
227
  def DELETE(self):
228
    """Cancel not-yet-started job.
229

230
    """
231
    job_id = self.items[0]
232
    result = baserlib.GetClient().CancelJob(job_id)
233
    return result
234

    
235

    
236
class R_2_jobs_id_wait(baserlib.R_Generic):
237
  """/2/jobs/[job_id]/wait resource.
238

239
  """
240
  # WaitForJobChange provides access to sensitive information and blocks
241
  # machine resources (it's a blocking RAPI call), hence restricting access.
242
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
243

    
244
  def GET(self):
245
    """Waits for job changes.
246

247
    """
248
    job_id = self.items[0]
249

    
250
    fields = self.getBodyParameter("fields")
251
    prev_job_info = self.getBodyParameter("previous_job_info", None)
252
    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
253

    
254
    if not isinstance(fields, list):
255
      raise http.HttpBadRequest("The 'fields' parameter should be a list")
256

    
257
    if not (prev_job_info is None or isinstance(prev_job_info, list)):
258
      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
259
                                " be a list")
260

    
261
    if not (prev_log_serial is None or
262
            isinstance(prev_log_serial, (int, long))):
263
      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
264
                                " be a number")
265

    
266
    client = baserlib.GetClient()
267
    result = client.WaitForJobChangeOnce(job_id, fields,
268
                                         prev_job_info, prev_log_serial,
269
                                         timeout=_WFJC_TIMEOUT)
270
    if not result:
271
      raise http.HttpNotFound()
272

    
273
    if result == constants.JOB_NOTCHANGED:
274
      # No changes
275
      return None
276

    
277
    (job_info, log_entries) = result
278

    
279
    return {
280
      "job_info": job_info,
281
      "log_entries": log_entries,
282
      }
283

    
284

    
285
class R_2_nodes(baserlib.R_Generic):
286
  """/2/nodes resource.
287

288
  """
289
  def GET(self):
290
    """Returns a list of all nodes.
291

292
    """
293
    client = baserlib.GetClient()
294

    
295
    if self.useBulk():
296
      bulkdata = client.QueryNodes([], N_FIELDS, False)
297
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
298
    else:
299
      nodesdata = client.QueryNodes([], ["name"], False)
300
      nodeslist = [row[0] for row in nodesdata]
301
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
302
                                   uri_fields=("id", "uri"))
303

    
304

    
305
class R_2_nodes_name(baserlib.R_Generic):
306
  """/2/nodes/[node_name] resources.
307

308
  """
309
  def GET(self):
310
    """Send information about a node.
311

312
    """
313
    node_name = self.items[0]
314
    client = baserlib.GetClient()
315

    
316
    result = baserlib.HandleItemQueryErrors(client.QueryNodes,
317
                                            names=[node_name], fields=N_FIELDS,
318
                                            use_locking=self.useLocking())
319

    
320
    return baserlib.MapFields(N_FIELDS, result[0])
321

    
322

    
323
class R_2_nodes_name_role(baserlib.R_Generic):
324
  """ /2/nodes/[node_name]/role resource.
325

326
  """
327
  def GET(self):
328
    """Returns the current node role.
329

330
    @return: Node role
331

332
    """
333
    node_name = self.items[0]
334
    client = baserlib.GetClient()
335
    result = client.QueryNodes(names=[node_name], fields=["role"],
336
                               use_locking=self.useLocking())
337

    
338
    return _NR_MAP[result[0][0]]
339

    
340
  def PUT(self):
341
    """Sets the node role.
342

343
    @return: a job id
344

345
    """
346
    if not isinstance(self.request_body, basestring):
347
      raise http.HttpBadRequest("Invalid body contents, not a string")
348

    
349
    node_name = self.items[0]
350
    role = self.request_body
351

    
352
    if role == _NR_REGULAR:
353
      candidate = False
354
      offline = False
355
      drained = False
356

    
357
    elif role == _NR_MASTER_CANDIATE:
358
      candidate = True
359
      offline = drained = None
360

    
361
    elif role == _NR_DRAINED:
362
      drained = True
363
      candidate = offline = None
364

    
365
    elif role == _NR_OFFLINE:
366
      offline = True
367
      candidate = drained = None
368

    
369
    else:
370
      raise http.HttpBadRequest("Can't set '%s' role" % role)
371

    
372
    op = opcodes.OpSetNodeParams(node_name=node_name,
373
                                 master_candidate=candidate,
374
                                 offline=offline,
375
                                 drained=drained,
376
                                 force=bool(self.useForce()))
377

    
378
    return baserlib.SubmitJob([op])
379

    
380

    
381
class R_2_nodes_name_evacuate(baserlib.R_Generic):
382
  """/2/nodes/[node_name]/evacuate resource.
383

384
  """
385
  def POST(self):
386
    """Evacuate all secondary instances off a node.
387

388
    """
389
    node_name = self.items[0]
390
    remote_node = self._checkStringVariable("remote_node", default=None)
391
    iallocator = self._checkStringVariable("iallocator", default=None)
392
    early_r = bool(self._checkIntVariable("early_release", default=0))
393
    dry_run = bool(self.dryRun())
394

    
395
    cl = baserlib.GetClient()
396

    
397
    op = opcodes.OpNodeEvacuationStrategy(nodes=[node_name],
398
                                          iallocator=iallocator,
399
                                          remote_node=remote_node)
400

    
401
    job_id = baserlib.SubmitJob([op], cl)
402
    # we use custom feedback function, instead of print we log the status
403
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
404

    
405
    jobs = []
406
    for iname, node in result:
407
      if dry_run:
408
        jid = None
409
      else:
410
        op = opcodes.OpReplaceDisks(instance_name=iname,
411
                                    remote_node=node, disks=[],
412
                                    mode=constants.REPLACE_DISK_CHG,
413
                                    early_release=early_r)
414
        jid = baserlib.SubmitJob([op])
415
      jobs.append((jid, iname, node))
416

    
417
    return jobs
418

    
419

    
420
class R_2_nodes_name_migrate(baserlib.R_Generic):
421
  """/2/nodes/[node_name]/migrate resource.
422

423
  """
424
  def POST(self):
425
    """Migrate all primary instances from a node.
426

427
    """
428
    node_name = self.items[0]
429
    live = bool(self._checkIntVariable("live", default=1))
430

    
431
    op = opcodes.OpMigrateNode(node_name=node_name, live=live)
432

    
433
    return baserlib.SubmitJob([op])
434

    
435

    
436
class R_2_nodes_name_storage(baserlib.R_Generic):
437
  """/2/nodes/[node_name]/storage ressource.
438

439
  """
440
  # LUQueryNodeStorage acquires locks, hence restricting access to GET
441
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
442

    
443
  def GET(self):
444
    node_name = self.items[0]
445

    
446
    storage_type = self._checkStringVariable("storage_type", None)
447
    if not storage_type:
448
      raise http.HttpBadRequest("Missing the required 'storage_type'"
449
                                " parameter")
450

    
451
    output_fields = self._checkStringVariable("output_fields", None)
452
    if not output_fields:
453
      raise http.HttpBadRequest("Missing the required 'output_fields'"
454
                                " parameter")
455

    
456
    op = opcodes.OpQueryNodeStorage(nodes=[node_name],
457
                                    storage_type=storage_type,
458
                                    output_fields=output_fields.split(","))
459
    return baserlib.SubmitJob([op])
460

    
461

    
462
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
463
  """/2/nodes/[node_name]/storage/modify ressource.
464

465
  """
466
  def PUT(self):
467
    node_name = self.items[0]
468

    
469
    storage_type = self._checkStringVariable("storage_type", None)
470
    if not storage_type:
471
      raise http.HttpBadRequest("Missing the required 'storage_type'"
472
                                " parameter")
473

    
474
    name = self._checkStringVariable("name", None)
475
    if not name:
476
      raise http.HttpBadRequest("Missing the required 'name'"
477
                                " parameter")
478

    
479
    changes = {}
480

    
481
    if "allocatable" in self.queryargs:
482
      changes[constants.SF_ALLOCATABLE] = \
483
        bool(self._checkIntVariable("allocatable", default=1))
484

    
485
    op = opcodes.OpModifyNodeStorage(node_name=node_name,
486
                                     storage_type=storage_type,
487
                                     name=name,
488
                                     changes=changes)
489
    return baserlib.SubmitJob([op])
490

    
491

    
492
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
493
  """/2/nodes/[node_name]/storage/repair ressource.
494

495
  """
496
  def PUT(self):
497
    node_name = self.items[0]
498

    
499
    storage_type = self._checkStringVariable("storage_type", None)
500
    if not storage_type:
501
      raise http.HttpBadRequest("Missing the required 'storage_type'"
502
                                " parameter")
503

    
504
    name = self._checkStringVariable("name", None)
505
    if not name:
506
      raise http.HttpBadRequest("Missing the required 'name'"
507
                                " parameter")
508

    
509
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
510
                                     storage_type=storage_type,
511
                                     name=name)
512
    return baserlib.SubmitJob([op])
513

    
514

    
515
def _ParseInstanceCreateRequestVersion1(data, dry_run):
516
  """Parses an instance creation request version 1.
517

518
  @rtype: L{opcodes.OpCreateInstance}
519
  @return: Instance creation opcode
520

521
  """
522
  # Disks
523
  disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
524

    
525
  disks = []
526
  for idx, i in enumerate(disks_input):
527
    baserlib.CheckType(i, dict, "Disk %d specification" % idx)
528

    
529
    # Size is mandatory
530
    try:
531
      size = i["size"]
532
    except KeyError:
533
      raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
534
                                " size" % idx)
535

    
536
    disk = {
537
      "size": size,
538
      }
539

    
540
    # Optional disk access mode
541
    try:
542
      disk_access = i["mode"]
543
    except KeyError:
544
      pass
545
    else:
546
      disk["mode"] = disk_access
547

    
548
    disks.append(disk)
549

    
550
  assert len(disks_input) == len(disks)
551

    
552
  # Network interfaces
553
  nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
554

    
555
  nics = []
556
  for idx, i in enumerate(nics_input):
557
    baserlib.CheckType(i, dict, "NIC %d specification" % idx)
558

    
559
    nic = {}
560

    
561
    for field in ["mode", "ip", "link", "bridge"]:
562
      try:
563
        value = i[field]
564
      except KeyError:
565
        continue
566

    
567
      nic[field] = value
568

    
569
    nics.append(nic)
570

    
571
  assert len(nics_input) == len(nics)
572

    
573
  # HV/BE parameters
574
  hvparams = baserlib.CheckParameter(data, "hvparams", default={})
575
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
576

    
577
  beparams = baserlib.CheckParameter(data, "beparams", default={})
578
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
579

    
580
  return opcodes.OpCreateInstance(
581
    mode=baserlib.CheckParameter(data, "mode"),
582
    instance_name=baserlib.CheckParameter(data, "name"),
583
    os_type=baserlib.CheckParameter(data, "os", default=None),
584
    force_variant=baserlib.CheckParameter(data, "force_variant",
585
                                          default=False),
586
    pnode=baserlib.CheckParameter(data, "pnode", default=None),
587
    snode=baserlib.CheckParameter(data, "snode", default=None),
588
    disk_template=baserlib.CheckParameter(data, "disk_template"),
589
    disks=disks,
590
    nics=nics,
591
    src_node=baserlib.CheckParameter(data, "src_node", default=None),
592
    src_path=baserlib.CheckParameter(data, "src_path", default=None),
593
    start=baserlib.CheckParameter(data, "start", default=True),
594
    wait_for_sync=True,
595
    ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
596
    name_check=baserlib.CheckParameter(data, "name_check", default=True),
597
    file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
598
                                             default=None),
599
    file_driver=baserlib.CheckParameter(data, "file_driver",
600
                                        default=constants.FD_LOOP),
601
    source_handshake=baserlib.CheckParameter(data, "source_handshake",
602
                                             default=None),
603
    source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca",
604
                                           default=None),
605
    source_instance_name=baserlib.CheckParameter(data, "source_instance_name",
606
                                                 default=None),
607
    iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
608
    hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
609
    hvparams=hvparams,
610
    beparams=beparams,
611
    dry_run=dry_run,
612
    )
613

    
614

    
615
class R_2_instances(baserlib.R_Generic):
616
  """/2/instances resource.
617

618
  """
619
  def GET(self):
620
    """Returns a list of all available instances.
621

622
    """
623
    client = baserlib.GetClient()
624

    
625
    use_locking = self.useLocking()
626
    if self.useBulk():
627
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
628
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
629
    else:
630
      instancesdata = client.QueryInstances([], ["name"], use_locking)
631
      instanceslist = [row[0] for row in instancesdata]
632
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
633
                                   uri_fields=("id", "uri"))
634

    
635
  def _ParseVersion0CreateRequest(self):
636
    """Parses an instance creation request version 0.
637

638
    Request data version 0 is deprecated and should not be used anymore.
639

640
    @rtype: L{opcodes.OpCreateInstance}
641
    @return: Instance creation opcode
642

643
    """
644
    # Do not modify anymore, request data version 0 is deprecated
645
    beparams = baserlib.MakeParamsDict(self.request_body,
646
                                       constants.BES_PARAMETERS)
647
    hvparams = baserlib.MakeParamsDict(self.request_body,
648
                                       constants.HVS_PARAMETERS)
649
    fn = self.getBodyParameter
650

    
651
    # disk processing
652
    disk_data = fn('disks')
653
    if not isinstance(disk_data, list):
654
      raise http.HttpBadRequest("The 'disks' parameter should be a list")
655
    disks = []
656
    for idx, d in enumerate(disk_data):
657
      if not isinstance(d, int):
658
        raise http.HttpBadRequest("Disk %d specification wrong: should"
659
                                  " be an integer" % idx)
660
      disks.append({"size": d})
661

    
662
    # nic processing (one nic only)
663
    nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
664
    if fn("ip", None) is not None:
665
      nics[0]["ip"] = fn("ip")
666
    if fn("mode", None) is not None:
667
      nics[0]["mode"] = fn("mode")
668
    if fn("link", None) is not None:
669
      nics[0]["link"] = fn("link")
670
    if fn("bridge", None) is not None:
671
      nics[0]["bridge"] = fn("bridge")
672

    
673
    # Do not modify anymore, request data version 0 is deprecated
674
    return opcodes.OpCreateInstance(
675
      mode=constants.INSTANCE_CREATE,
676
      instance_name=fn('name'),
677
      disks=disks,
678
      disk_template=fn('disk_template'),
679
      os_type=fn('os'),
680
      pnode=fn('pnode', None),
681
      snode=fn('snode', None),
682
      iallocator=fn('iallocator', None),
683
      nics=nics,
684
      start=fn('start', True),
685
      ip_check=fn('ip_check', True),
686
      name_check=fn('name_check', True),
687
      wait_for_sync=True,
688
      hypervisor=fn('hypervisor', None),
689
      hvparams=hvparams,
690
      beparams=beparams,
691
      file_storage_dir=fn('file_storage_dir', None),
692
      file_driver=fn('file_driver', constants.FD_LOOP),
693
      dry_run=bool(self.dryRun()),
694
      )
695

    
696
  def POST(self):
697
    """Create an instance.
698

699
    @return: a job id
700

701
    """
702
    if not isinstance(self.request_body, dict):
703
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
704

    
705
    # Default to request data version 0
706
    data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
707

    
708
    if data_version == 0:
709
      op = self._ParseVersion0CreateRequest()
710
    elif data_version == 1:
711
      op = _ParseInstanceCreateRequestVersion1(self.request_body,
712
                                               self.dryRun())
713
    else:
714
      raise http.HttpBadRequest("Unsupported request data version %s" %
715
                                data_version)
716

    
717
    return baserlib.SubmitJob([op])
718

    
719

    
720
class R_2_instances_name(baserlib.R_Generic):
721
  """/2/instances/[instance_name] resources.
722

723
  """
724
  def GET(self):
725
    """Send information about an instance.
726

727
    """
728
    client = baserlib.GetClient()
729
    instance_name = self.items[0]
730

    
731
    result = baserlib.HandleItemQueryErrors(client.QueryInstances,
732
                                            names=[instance_name],
733
                                            fields=I_FIELDS,
734
                                            use_locking=self.useLocking())
735

    
736
    return baserlib.MapFields(I_FIELDS, result[0])
737

    
738
  def DELETE(self):
739
    """Delete an instance.
740

741
    """
742
    op = opcodes.OpRemoveInstance(instance_name=self.items[0],
743
                                  ignore_failures=False,
744
                                  dry_run=bool(self.dryRun()))
745
    return baserlib.SubmitJob([op])
746

    
747

    
748
class R_2_instances_name_info(baserlib.R_Generic):
749
  """/2/instances/[instance_name]/info resource.
750

751
  """
752
  def GET(self):
753
    """Request detailed instance information.
754

755
    """
756
    instance_name = self.items[0]
757
    static = bool(self._checkIntVariable("static", default=0))
758

    
759
    op = opcodes.OpQueryInstanceData(instances=[instance_name],
760
                                     static=static)
761
    return baserlib.SubmitJob([op])
762

    
763

    
764
class R_2_instances_name_reboot(baserlib.R_Generic):
765
  """/2/instances/[instance_name]/reboot resource.
766

767
  Implements an instance reboot.
768

769
  """
770
  def POST(self):
771
    """Reboot an instance.
772

773
    The URI takes type=[hard|soft|full] and
774
    ignore_secondaries=[False|True] parameters.
775

776
    """
777
    instance_name = self.items[0]
778
    reboot_type = self.queryargs.get('type',
779
                                     [constants.INSTANCE_REBOOT_HARD])[0]
780
    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
781
    op = opcodes.OpRebootInstance(instance_name=instance_name,
782
                                  reboot_type=reboot_type,
783
                                  ignore_secondaries=ignore_secondaries,
784
                                  dry_run=bool(self.dryRun()))
785

    
786
    return baserlib.SubmitJob([op])
787

    
788

    
789
class R_2_instances_name_startup(baserlib.R_Generic):
790
  """/2/instances/[instance_name]/startup resource.
791

792
  Implements an instance startup.
793

794
  """
795
  def PUT(self):
796
    """Startup an instance.
797

798
    The URI takes force=[False|True] parameter to start the instance
799
    if even if secondary disks are failing.
800

801
    """
802
    instance_name = self.items[0]
803
    force_startup = bool(self._checkIntVariable('force'))
804
    op = opcodes.OpStartupInstance(instance_name=instance_name,
805
                                   force=force_startup,
806
                                   dry_run=bool(self.dryRun()))
807

    
808
    return baserlib.SubmitJob([op])
809

    
810

    
811
class R_2_instances_name_shutdown(baserlib.R_Generic):
812
  """/2/instances/[instance_name]/shutdown resource.
813

814
  Implements an instance shutdown.
815

816
  """
817
  def PUT(self):
818
    """Shutdown an instance.
819

820
    """
821
    instance_name = self.items[0]
822
    op = opcodes.OpShutdownInstance(instance_name=instance_name,
823
                                    dry_run=bool(self.dryRun()))
824

    
825
    return baserlib.SubmitJob([op])
826

    
827

    
828
class R_2_instances_name_reinstall(baserlib.R_Generic):
829
  """/2/instances/[instance_name]/reinstall resource.
830

831
  Implements an instance reinstall.
832

833
  """
834
  def POST(self):
835
    """Reinstall an instance.
836

837
    The URI takes os=name and nostartup=[0|1] optional
838
    parameters. By default, the instance will be started
839
    automatically.
840

841
    """
842
    instance_name = self.items[0]
843
    ostype = self._checkStringVariable('os')
844
    nostartup = self._checkIntVariable('nostartup')
845
    ops = [
846
      opcodes.OpShutdownInstance(instance_name=instance_name),
847
      opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
848
      ]
849
    if not nostartup:
850
      ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
851
                                           force=False))
852
    return baserlib.SubmitJob(ops)
853

    
854

    
855
class R_2_instances_name_replace_disks(baserlib.R_Generic):
856
  """/2/instances/[instance_name]/replace-disks resource.
857

858
  """
859
  def POST(self):
860
    """Replaces disks on an instance.
861

862
    """
863
    instance_name = self.items[0]
864
    remote_node = self._checkStringVariable("remote_node", default=None)
865
    mode = self._checkStringVariable("mode", default=None)
866
    raw_disks = self._checkStringVariable("disks", default=None)
867
    iallocator = self._checkStringVariable("iallocator", default=None)
868

    
869
    if raw_disks:
870
      try:
871
        disks = [int(part) for part in raw_disks.split(",")]
872
      except ValueError, err:
873
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
874
    else:
875
      disks = []
876

    
877
    op = opcodes.OpReplaceDisks(instance_name=instance_name,
878
                                remote_node=remote_node,
879
                                mode=mode,
880
                                disks=disks,
881
                                iallocator=iallocator)
882

    
883
    return baserlib.SubmitJob([op])
884

    
885

    
886
class R_2_instances_name_activate_disks(baserlib.R_Generic):
887
  """/2/instances/[instance_name]/activate-disks resource.
888

889
  """
890
  def PUT(self):
891
    """Activate disks for an instance.
892

893
    The URI might contain ignore_size to ignore current recorded size.
894

895
    """
896
    instance_name = self.items[0]
897
    ignore_size = bool(self._checkIntVariable('ignore_size'))
898

    
899
    op = opcodes.OpActivateInstanceDisks(instance_name=instance_name,
900
                                         ignore_size=ignore_size)
901

    
902
    return baserlib.SubmitJob([op])
903

    
904

    
905
class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
906
  """/2/instances/[instance_name]/deactivate-disks resource.
907

908
  """
909
  def PUT(self):
910
    """Deactivate disks for an instance.
911

912
    """
913
    instance_name = self.items[0]
914

    
915
    op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
916

    
917
    return baserlib.SubmitJob([op])
918

    
919

    
920
class R_2_instances_name_prepare_export(baserlib.R_Generic):
921
  """/2/instances/[instance_name]/prepare-export resource.
922

923
  """
924
  def PUT(self):
925
    """Prepares an export for an instance.
926

927
    @return: a job id
928

929
    """
930
    instance_name = self.items[0]
931
    mode = self._checkStringVariable("mode")
932

    
933
    op = opcodes.OpPrepareExport(instance_name=instance_name,
934
                                 mode=mode)
935

    
936
    return baserlib.SubmitJob([op])
937

    
938

    
939
def _ParseExportInstanceRequest(name, data):
940
  """Parses a request for an instance export.
941

942
  @rtype: L{opcodes.OpExportInstance}
943
  @return: Instance export opcode
944

945
  """
946
  mode = baserlib.CheckParameter(data, "mode",
947
                                 default=constants.EXPORT_MODE_LOCAL)
948
  target_node = baserlib.CheckParameter(data, "destination")
949
  shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool)
950
  remove_instance = baserlib.CheckParameter(data, "remove_instance",
951
                                            exptype=bool, default=False)
952
  x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None)
953
  destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca",
954
                                                default=None)
955

    
956
  return opcodes.OpExportInstance(instance_name=name,
957
                                  mode=mode,
958
                                  target_node=target_node,
959
                                  shutdown=shutdown,
960
                                  remove_instance=remove_instance,
961
                                  x509_key_name=x509_key_name,
962
                                  destination_x509_ca=destination_x509_ca)
963

    
964

    
965
class R_2_instances_name_export(baserlib.R_Generic):
966
  """/2/instances/[instance_name]/export resource.
967

968
  """
969
  def PUT(self):
970
    """Exports an instance.
971

972
    @return: a job id
973

974
    """
975
    if not isinstance(self.request_body, dict):
976
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
977

    
978
    op = _ParseExportInstanceRequest(self.items[0], self.request_body)
979

    
980
    return baserlib.SubmitJob([op])
981

    
982

    
983
class _R_Tags(baserlib.R_Generic):
984
  """ Quasiclass for tagging resources
985

986
  Manages tags. When inheriting this class you must define the
987
  TAG_LEVEL for it.
988

989
  """
990
  TAG_LEVEL = None
991

    
992
  def __init__(self, items, queryargs, req):
993
    """A tag resource constructor.
994

995
    We have to override the default to sort out cluster naming case.
996

997
    """
998
    baserlib.R_Generic.__init__(self, items, queryargs, req)
999

    
1000
    if self.TAG_LEVEL != constants.TAG_CLUSTER:
1001
      self.name = items[0]
1002
    else:
1003
      self.name = ""
1004

    
1005
  def GET(self):
1006
    """Returns a list of tags.
1007

1008
    Example: ["tag1", "tag2", "tag3"]
1009

1010
    """
1011
    # pylint: disable-msg=W0212
1012
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1013

    
1014
  def PUT(self):
1015
    """Add a set of tags.
1016

1017
    The request as a list of strings should be PUT to this URI. And
1018
    you'll have back a job id.
1019

1020
    """
1021
    # pylint: disable-msg=W0212
1022
    if 'tag' not in self.queryargs:
1023
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
1024
                                " the 'tag' parameter")
1025
    return baserlib._Tags_PUT(self.TAG_LEVEL,
1026
                              self.queryargs['tag'], name=self.name,
1027
                              dry_run=bool(self.dryRun()))
1028

    
1029
  def DELETE(self):
1030
    """Delete a tag.
1031

1032
    In order to delete a set of tags, the DELETE
1033
    request should be addressed to URI like:
1034
    /tags?tag=[tag]&tag=[tag]
1035

1036
    """
1037
    # pylint: disable-msg=W0212
1038
    if 'tag' not in self.queryargs:
1039
      # no we not gonna delete all tags
1040
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
1041
                                " tag(s) using the 'tag' parameter")
1042
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
1043
                                 self.queryargs['tag'],
1044
                                 name=self.name,
1045
                                 dry_run=bool(self.dryRun()))
1046

    
1047

    
1048
class R_2_instances_name_tags(_R_Tags):
1049
  """ /2/instances/[instance_name]/tags resource.
1050

1051
  Manages per-instance tags.
1052

1053
  """
1054
  TAG_LEVEL = constants.TAG_INSTANCE
1055

    
1056

    
1057
class R_2_nodes_name_tags(_R_Tags):
1058
  """ /2/nodes/[node_name]/tags resource.
1059

1060
  Manages per-node tags.
1061

1062
  """
1063
  TAG_LEVEL = constants.TAG_NODE
1064

    
1065

    
1066
class R_2_tags(_R_Tags):
1067
  """ /2/instances/tags resource.
1068

1069
  Manages cluster tags.
1070

1071
  """
1072
  TAG_LEVEL = constants.TAG_CLUSTER