Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ de40437a

History | View | Annotate | Download (35.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
  PUT or POST?
25
  ============
26

27
  According to RFC2616 the main difference between PUT and POST is that
28
  POST can create new resources but PUT can only create the resource the
29
  URI was pointing to on the PUT request.
30

31
  To be in context of this module for instance creation POST on
32
  /2/instances is legitim while PUT would be not, due to it does create a
33
  new entity and not just replace /2/instances with it.
34

35
  So when adding new methods, if they are operating on the URI entity itself,
36
  PUT should be prefered over POST.
37

38
"""
39

    
40
# pylint: disable-msg=C0103
41

    
42
# C0103: Invalid name, since the R_* names are not conforming
43

    
44
from ganeti import opcodes
45
from ganeti import http
46
from ganeti import constants
47
from ganeti import cli
48
from ganeti import rapi
49
from ganeti import ht
50
from ganeti.rapi import baserlib
51

    
52

    
53
_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54
I_FIELDS = ["name", "admin_state", "os",
55
            "pnode", "snodes",
56
            "disk_template",
57
            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58
            "network_port",
59
            "disk.sizes", "disk_usage",
60
            "beparams", "hvparams",
61
            "oper_state", "oper_ram", "oper_vcpus", "status",
62
            "custom_hvparams", "custom_beparams", "custom_nicparams",
63
            ] + _COMMON_FIELDS
64

    
65
N_FIELDS = ["name", "offline", "master_candidate", "drained",
66
            "dtotal", "dfree",
67
            "mtotal", "mnode", "mfree",
68
            "pinst_cnt", "sinst_cnt",
69
            "ctotal", "cnodes", "csockets",
70
            "pip", "sip", "role",
71
            "pinst_list", "sinst_list",
72
            "master_capable", "vm_capable",
73
            "group.uuid",
74
            ] + _COMMON_FIELDS
75

    
76
G_FIELDS = ["name", "uuid",
77
            "alloc_policy",
78
            "node_cnt", "node_list",
79
            "ctime", "mtime", "serial_no",
80
            ]  # "tags" is missing to be able to use _COMMON_FIELDS here.
81

    
82
_NR_DRAINED = "drained"
83
_NR_MASTER_CANDIATE = "master-candidate"
84
_NR_MASTER = "master"
85
_NR_OFFLINE = "offline"
86
_NR_REGULAR = "regular"
87

    
88
_NR_MAP = {
89
  constants.NR_MASTER: _NR_MASTER,
90
  constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
91
  constants.NR_DRAINED: _NR_DRAINED,
92
  constants.NR_OFFLINE: _NR_OFFLINE,
93
  constants.NR_REGULAR: _NR_REGULAR,
94
  }
95

    
96
assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
97

    
98
# Request data version field
99
_REQ_DATA_VERSION = "__version__"
100

    
101
# Feature string for instance creation request data version 1
102
_INST_CREATE_REQV1 = "instance-create-reqv1"
103

    
104
# Feature string for instance reinstall request version 1
105
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
106

    
107
# Feature string for node migration version 1
108
_NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
109

    
110
# Feature string for node evacuation with LU-generated jobs
111
_NODE_EVAC_RES1 = "node-evac-res1"
112

    
113
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
114
_WFJC_TIMEOUT = 10
115

    
116

    
117
class R_version(baserlib.R_Generic):
118
  """/version resource.
119

120
  This resource should be used to determine the remote API version and
121
  to adapt clients accordingly.
122

123
  """
124
  @staticmethod
125
  def GET():
126
    """Returns the remote API version.
127

128
    """
129
    return constants.RAPI_VERSION
130

    
131

    
132
class R_2_info(baserlib.R_Generic):
133
  """/2/info resource.
134

135
  """
136
  @staticmethod
137
  def GET():
138
    """Returns cluster information.
139

140
    """
141
    client = baserlib.GetClient()
142
    return client.QueryClusterInfo()
143

    
144

    
145
class R_2_features(baserlib.R_Generic):
146
  """/2/features resource.
147

148
  """
149
  @staticmethod
150
  def GET():
151
    """Returns list of optional RAPI features implemented.
152

153
    """
154
    return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1, _NODE_MIGRATE_REQV1,
155
            _NODE_EVAC_RES1]
156

    
157

    
158
class R_2_os(baserlib.R_Generic):
159
  """/2/os resource.
160

161
  """
162
  @staticmethod
163
  def GET():
164
    """Return a list of all OSes.
165

166
    Can return error 500 in case of a problem.
167

168
    Example: ["debian-etch"]
169

170
    """
171
    cl = baserlib.GetClient()
172
    op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
173
    job_id = baserlib.SubmitJob([op], cl)
174
    # we use custom feedback function, instead of print we log the status
175
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
176
    diagnose_data = result[0]
177

    
178
    if not isinstance(diagnose_data, list):
179
      raise http.HttpBadGateway(message="Can't get OS list")
180

    
181
    os_names = []
182
    for (name, variants) in diagnose_data:
183
      os_names.extend(cli.CalculateOSNames(name, variants))
184

    
185
    return os_names
186

    
187

    
188
class R_2_redist_config(baserlib.R_Generic):
189
  """/2/redistribute-config resource.
190

191
  """
192
  @staticmethod
193
  def PUT():
194
    """Redistribute configuration to all nodes.
195

196
    """
197
    return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
198

    
199

    
200
class R_2_cluster_modify(baserlib.R_Generic):
201
  """/2/modify resource.
202

203
  """
204
  def PUT(self):
205
    """Modifies cluster parameters.
206

207
    @return: a job id
208

209
    """
210
    op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
211
                             None)
212

    
213
    return baserlib.SubmitJob([op])
214

    
215

    
216
class R_2_jobs(baserlib.R_Generic):
217
  """/2/jobs resource.
218

219
  """
220
  @staticmethod
221
  def GET():
222
    """Returns a dictionary of jobs.
223

224
    @return: a dictionary with jobs id and uri.
225

226
    """
227
    fields = ["id"]
228
    cl = baserlib.GetClient()
229
    # Convert the list of lists to the list of ids
230
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
231
    return baserlib.BuildUriList(result, "/2/jobs/%s",
232
                                 uri_fields=("id", "uri"))
233

    
234

    
235
class R_2_jobs_id(baserlib.R_Generic):
236
  """/2/jobs/[job_id] resource.
237

238
  """
239
  def GET(self):
240
    """Returns a job status.
241

242
    @return: a dictionary with job parameters.
243
        The result includes:
244
            - id: job ID as a number
245
            - status: current job status as a string
246
            - ops: involved OpCodes as a list of dictionaries for each
247
              opcodes in the job
248
            - opstatus: OpCodes status as a list
249
            - opresult: OpCodes results as a list of lists
250

251
    """
252
    fields = ["id", "ops", "status", "summary",
253
              "opstatus", "opresult", "oplog",
254
              "received_ts", "start_ts", "end_ts",
255
              ]
256
    job_id = self.items[0]
257
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
258
    if result is None:
259
      raise http.HttpNotFound()
260
    return baserlib.MapFields(fields, result)
261

    
262
  def DELETE(self):
263
    """Cancel not-yet-started job.
264

265
    """
266
    job_id = self.items[0]
267
    result = baserlib.GetClient().CancelJob(job_id)
268
    return result
269

    
270

    
271
class R_2_jobs_id_wait(baserlib.R_Generic):
272
  """/2/jobs/[job_id]/wait resource.
273

274
  """
275
  # WaitForJobChange provides access to sensitive information and blocks
276
  # machine resources (it's a blocking RAPI call), hence restricting access.
277
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
278

    
279
  def GET(self):
280
    """Waits for job changes.
281

282
    """
283
    job_id = self.items[0]
284

    
285
    fields = self.getBodyParameter("fields")
286
    prev_job_info = self.getBodyParameter("previous_job_info", None)
287
    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
288

    
289
    if not isinstance(fields, list):
290
      raise http.HttpBadRequest("The 'fields' parameter should be a list")
291

    
292
    if not (prev_job_info is None or isinstance(prev_job_info, list)):
293
      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
294
                                " be a list")
295

    
296
    if not (prev_log_serial is None or
297
            isinstance(prev_log_serial, (int, long))):
298
      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
299
                                " be a number")
300

    
301
    client = baserlib.GetClient()
302
    result = client.WaitForJobChangeOnce(job_id, fields,
303
                                         prev_job_info, prev_log_serial,
304
                                         timeout=_WFJC_TIMEOUT)
305
    if not result:
306
      raise http.HttpNotFound()
307

    
308
    if result == constants.JOB_NOTCHANGED:
309
      # No changes
310
      return None
311

    
312
    (job_info, log_entries) = result
313

    
314
    return {
315
      "job_info": job_info,
316
      "log_entries": log_entries,
317
      }
318

    
319

    
320
class R_2_nodes(baserlib.R_Generic):
321
  """/2/nodes resource.
322

323
  """
324
  def GET(self):
325
    """Returns a list of all nodes.
326

327
    """
328
    client = baserlib.GetClient()
329

    
330
    if self.useBulk():
331
      bulkdata = client.QueryNodes([], N_FIELDS, False)
332
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
333
    else:
334
      nodesdata = client.QueryNodes([], ["name"], False)
335
      nodeslist = [row[0] for row in nodesdata]
336
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
337
                                   uri_fields=("id", "uri"))
338

    
339

    
340
class R_2_nodes_name(baserlib.R_Generic):
341
  """/2/nodes/[node_name] resource.
342

343
  """
344
  def GET(self):
345
    """Send information about a node.
346

347
    """
348
    node_name = self.items[0]
349
    client = baserlib.GetClient()
350

    
351
    result = baserlib.HandleItemQueryErrors(client.QueryNodes,
352
                                            names=[node_name], fields=N_FIELDS,
353
                                            use_locking=self.useLocking())
354

    
355
    return baserlib.MapFields(N_FIELDS, result[0])
356

    
357

    
358
class R_2_nodes_name_role(baserlib.R_Generic):
359
  """ /2/nodes/[node_name]/role resource.
360

361
  """
362
  def GET(self):
363
    """Returns the current node role.
364

365
    @return: Node role
366

367
    """
368
    node_name = self.items[0]
369
    client = baserlib.GetClient()
370
    result = client.QueryNodes(names=[node_name], fields=["role"],
371
                               use_locking=self.useLocking())
372

    
373
    return _NR_MAP[result[0][0]]
374

    
375
  def PUT(self):
376
    """Sets the node role.
377

378
    @return: a job id
379

380
    """
381
    if not isinstance(self.request_body, basestring):
382
      raise http.HttpBadRequest("Invalid body contents, not a string")
383

    
384
    node_name = self.items[0]
385
    role = self.request_body
386

    
387
    if role == _NR_REGULAR:
388
      candidate = False
389
      offline = False
390
      drained = False
391

    
392
    elif role == _NR_MASTER_CANDIATE:
393
      candidate = True
394
      offline = drained = None
395

    
396
    elif role == _NR_DRAINED:
397
      drained = True
398
      candidate = offline = None
399

    
400
    elif role == _NR_OFFLINE:
401
      offline = True
402
      candidate = drained = None
403

    
404
    else:
405
      raise http.HttpBadRequest("Can't set '%s' role" % role)
406

    
407
    op = opcodes.OpNodeSetParams(node_name=node_name,
408
                                 master_candidate=candidate,
409
                                 offline=offline,
410
                                 drained=drained,
411
                                 force=bool(self.useForce()))
412

    
413
    return baserlib.SubmitJob([op])
414

    
415

    
416
class R_2_nodes_name_evacuate(baserlib.R_Generic):
417
  """/2/nodes/[node_name]/evacuate resource.
418

419
  """
420
  def POST(self):
421
    """Evacuate all instances off a node.
422

423
    """
424
    op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
425
      "node_name": self.items[0],
426
      "dry_run": self.dryRun(),
427
      })
428

    
429
    return baserlib.SubmitJob([op])
430

    
431

    
432
class R_2_nodes_name_migrate(baserlib.R_Generic):
433
  """/2/nodes/[node_name]/migrate resource.
434

435
  """
436
  def POST(self):
437
    """Migrate all primary instances from a node.
438

439
    """
440
    node_name = self.items[0]
441

    
442
    if self.queryargs:
443
      # Support old-style requests
444
      if "live" in self.queryargs and "mode" in self.queryargs:
445
        raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
446
                                  " be passed")
447

    
448
      if "live" in self.queryargs:
449
        if self._checkIntVariable("live", default=1):
450
          mode = constants.HT_MIGRATION_LIVE
451
        else:
452
          mode = constants.HT_MIGRATION_NONLIVE
453
      else:
454
        mode = self._checkStringVariable("mode", default=None)
455

    
456
      data = {
457
        "mode": mode,
458
        }
459
    else:
460
      data = self.request_body
461

    
462
    op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
463
      "node_name": node_name,
464
      })
465

    
466
    return baserlib.SubmitJob([op])
467

    
468

    
469
class R_2_nodes_name_storage(baserlib.R_Generic):
470
  """/2/nodes/[node_name]/storage resource.
471

472
  """
473
  # LUNodeQueryStorage acquires locks, hence restricting access to GET
474
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
475

    
476
  def GET(self):
477
    node_name = self.items[0]
478

    
479
    storage_type = self._checkStringVariable("storage_type", None)
480
    if not storage_type:
481
      raise http.HttpBadRequest("Missing the required 'storage_type'"
482
                                " parameter")
483

    
484
    output_fields = self._checkStringVariable("output_fields", None)
485
    if not output_fields:
486
      raise http.HttpBadRequest("Missing the required 'output_fields'"
487
                                " parameter")
488

    
489
    op = opcodes.OpNodeQueryStorage(nodes=[node_name],
490
                                    storage_type=storage_type,
491
                                    output_fields=output_fields.split(","))
492
    return baserlib.SubmitJob([op])
493

    
494

    
495
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
496
  """/2/nodes/[node_name]/storage/modify resource.
497

498
  """
499
  def PUT(self):
500
    node_name = self.items[0]
501

    
502
    storage_type = self._checkStringVariable("storage_type", None)
503
    if not storage_type:
504
      raise http.HttpBadRequest("Missing the required 'storage_type'"
505
                                " parameter")
506

    
507
    name = self._checkStringVariable("name", None)
508
    if not name:
509
      raise http.HttpBadRequest("Missing the required 'name'"
510
                                " parameter")
511

    
512
    changes = {}
513

    
514
    if "allocatable" in self.queryargs:
515
      changes[constants.SF_ALLOCATABLE] = \
516
        bool(self._checkIntVariable("allocatable", default=1))
517

    
518
    op = opcodes.OpNodeModifyStorage(node_name=node_name,
519
                                     storage_type=storage_type,
520
                                     name=name,
521
                                     changes=changes)
522
    return baserlib.SubmitJob([op])
523

    
524

    
525
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
526
  """/2/nodes/[node_name]/storage/repair resource.
527

528
  """
529
  def PUT(self):
530
    node_name = self.items[0]
531

    
532
    storage_type = self._checkStringVariable("storage_type", None)
533
    if not storage_type:
534
      raise http.HttpBadRequest("Missing the required 'storage_type'"
535
                                " parameter")
536

    
537
    name = self._checkStringVariable("name", None)
538
    if not name:
539
      raise http.HttpBadRequest("Missing the required 'name'"
540
                                " parameter")
541

    
542
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
543
                                     storage_type=storage_type,
544
                                     name=name)
545
    return baserlib.SubmitJob([op])
546

    
547

    
548
def _ParseCreateGroupRequest(data, dry_run):
549
  """Parses a request for creating a node group.
550

551
  @rtype: L{opcodes.OpGroupAdd}
552
  @return: Group creation opcode
553

554
  """
555
  override = {
556
    "dry_run": dry_run,
557
    }
558

    
559
  rename = {
560
    "name": "group_name",
561
    }
562

    
563
  return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
564
                             rename=rename)
565

    
566

    
567
class R_2_groups(baserlib.R_Generic):
568
  """/2/groups resource.
569

570
  """
571
  def GET(self):
572
    """Returns a list of all node groups.
573

574
    """
575
    client = baserlib.GetClient()
576

    
577
    if self.useBulk():
578
      bulkdata = client.QueryGroups([], G_FIELDS, False)
579
      return baserlib.MapBulkFields(bulkdata, G_FIELDS)
580
    else:
581
      data = client.QueryGroups([], ["name"], False)
582
      groupnames = [row[0] for row in data]
583
      return baserlib.BuildUriList(groupnames, "/2/groups/%s",
584
                                   uri_fields=("name", "uri"))
585

    
586
  def POST(self):
587
    """Create a node group.
588

589
    @return: a job id
590

591
    """
592
    baserlib.CheckType(self.request_body, dict, "Body contents")
593
    op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
594
    return baserlib.SubmitJob([op])
595

    
596

    
597
class R_2_groups_name(baserlib.R_Generic):
598
  """/2/groups/[group_name] resource.
599

600
  """
601
  def GET(self):
602
    """Send information about a node group.
603

604
    """
605
    group_name = self.items[0]
606
    client = baserlib.GetClient()
607

    
608
    result = baserlib.HandleItemQueryErrors(client.QueryGroups,
609
                                            names=[group_name], fields=G_FIELDS,
610
                                            use_locking=self.useLocking())
611

    
612
    return baserlib.MapFields(G_FIELDS, result[0])
613

    
614
  def DELETE(self):
615
    """Delete a node group.
616

617
    """
618
    op = opcodes.OpGroupRemove(group_name=self.items[0],
619
                               dry_run=bool(self.dryRun()))
620

    
621
    return baserlib.SubmitJob([op])
622

    
623

    
624
def _ParseModifyGroupRequest(name, data):
625
  """Parses a request for modifying a node group.
626

627
  @rtype: L{opcodes.OpGroupSetParams}
628
  @return: Group modify opcode
629

630
  """
631
  return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
632
    "group_name": name,
633
    })
634

    
635

    
636

    
637
class R_2_groups_name_modify(baserlib.R_Generic):
638
  """/2/groups/[group_name]/modify resource.
639

640
  """
641
  def PUT(self):
642
    """Changes some parameters of node group.
643

644
    @return: a job id
645

646
    """
647
    baserlib.CheckType(self.request_body, dict, "Body contents")
648

    
649
    op = _ParseModifyGroupRequest(self.items[0], self.request_body)
650

    
651
    return baserlib.SubmitJob([op])
652

    
653

    
654
def _ParseRenameGroupRequest(name, data, dry_run):
655
  """Parses a request for renaming a node group.
656

657
  @type name: string
658
  @param name: name of the node group to rename
659
  @type data: dict
660
  @param data: the body received by the rename request
661
  @type dry_run: bool
662
  @param dry_run: whether to perform a dry run
663

664
  @rtype: L{opcodes.OpGroupRename}
665
  @return: Node group rename opcode
666

667
  """
668
  return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
669
    "group_name": name,
670
    "dry_run": dry_run,
671
    })
672

    
673

    
674
class R_2_groups_name_rename(baserlib.R_Generic):
675
  """/2/groups/[group_name]/rename resource.
676

677
  """
678
  def PUT(self):
679
    """Changes the name of a node group.
680

681
    @return: a job id
682

683
    """
684
    baserlib.CheckType(self.request_body, dict, "Body contents")
685
    op = _ParseRenameGroupRequest(self.items[0], self.request_body,
686
                                  self.dryRun())
687
    return baserlib.SubmitJob([op])
688

    
689

    
690
class R_2_groups_name_assign_nodes(baserlib.R_Generic):
691
  """/2/groups/[group_name]/assign-nodes resource.
692

693
  """
694
  def PUT(self):
695
    """Assigns nodes to a group.
696

697
    @return: a job id
698

699
    """
700
    op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
701
      "group_name": self.items[0],
702
      "dry_run": self.dryRun(),
703
      "force": self.useForce(),
704
      })
705

    
706
    return baserlib.SubmitJob([op])
707

    
708

    
709
def _ParseInstanceCreateRequestVersion1(data, dry_run):
710
  """Parses an instance creation request version 1.
711

712
  @rtype: L{opcodes.OpInstanceCreate}
713
  @return: Instance creation opcode
714

715
  """
716
  override = {
717
    "dry_run": dry_run,
718
    }
719

    
720
  rename = {
721
    "os": "os_type",
722
    "name": "instance_name",
723
    }
724

    
725
  return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
726
                             rename=rename)
727

    
728

    
729
class R_2_instances(baserlib.R_Generic):
730
  """/2/instances resource.
731

732
  """
733
  def GET(self):
734
    """Returns a list of all available instances.
735

736
    """
737
    client = baserlib.GetClient()
738

    
739
    use_locking = self.useLocking()
740
    if self.useBulk():
741
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
742
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
743
    else:
744
      instancesdata = client.QueryInstances([], ["name"], use_locking)
745
      instanceslist = [row[0] for row in instancesdata]
746
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
747
                                   uri_fields=("id", "uri"))
748

    
749
  def POST(self):
750
    """Create an instance.
751

752
    @return: a job id
753

754
    """
755
    if not isinstance(self.request_body, dict):
756
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
757

    
758
    # Default to request data version 0
759
    data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
760

    
761
    if data_version == 0:
762
      raise http.HttpBadRequest("Instance creation request version 0 is no"
763
                                " longer supported")
764
    elif data_version == 1:
765
      data = self.request_body.copy()
766
      # Remove "__version__"
767
      data.pop(_REQ_DATA_VERSION, None)
768
      op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
769
    else:
770
      raise http.HttpBadRequest("Unsupported request data version %s" %
771
                                data_version)
772

    
773
    return baserlib.SubmitJob([op])
774

    
775

    
776
class R_2_instances_name(baserlib.R_Generic):
777
  """/2/instances/[instance_name] resource.
778

779
  """
780
  def GET(self):
781
    """Send information about an instance.
782

783
    """
784
    client = baserlib.GetClient()
785
    instance_name = self.items[0]
786

    
787
    result = baserlib.HandleItemQueryErrors(client.QueryInstances,
788
                                            names=[instance_name],
789
                                            fields=I_FIELDS,
790
                                            use_locking=self.useLocking())
791

    
792
    return baserlib.MapFields(I_FIELDS, result[0])
793

    
794
  def DELETE(self):
795
    """Delete an instance.
796

797
    """
798
    op = opcodes.OpInstanceRemove(instance_name=self.items[0],
799
                                  ignore_failures=False,
800
                                  dry_run=bool(self.dryRun()))
801
    return baserlib.SubmitJob([op])
802

    
803

    
804
class R_2_instances_name_info(baserlib.R_Generic):
805
  """/2/instances/[instance_name]/info resource.
806

807
  """
808
  def GET(self):
809
    """Request detailed instance information.
810

811
    """
812
    instance_name = self.items[0]
813
    static = bool(self._checkIntVariable("static", default=0))
814

    
815
    op = opcodes.OpInstanceQueryData(instances=[instance_name],
816
                                     static=static)
817
    return baserlib.SubmitJob([op])
818

    
819

    
820
class R_2_instances_name_reboot(baserlib.R_Generic):
821
  """/2/instances/[instance_name]/reboot resource.
822

823
  Implements an instance reboot.
824

825
  """
826
  def POST(self):
827
    """Reboot an instance.
828

829
    The URI takes type=[hard|soft|full] and
830
    ignore_secondaries=[False|True] parameters.
831

832
    """
833
    instance_name = self.items[0]
834
    reboot_type = self.queryargs.get('type',
835
                                     [constants.INSTANCE_REBOOT_HARD])[0]
836
    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
837
    op = opcodes.OpInstanceReboot(instance_name=instance_name,
838
                                  reboot_type=reboot_type,
839
                                  ignore_secondaries=ignore_secondaries,
840
                                  dry_run=bool(self.dryRun()))
841

    
842
    return baserlib.SubmitJob([op])
843

    
844

    
845
class R_2_instances_name_startup(baserlib.R_Generic):
846
  """/2/instances/[instance_name]/startup resource.
847

848
  Implements an instance startup.
849

850
  """
851
  def PUT(self):
852
    """Startup an instance.
853

854
    The URI takes force=[False|True] parameter to start the instance
855
    if even if secondary disks are failing.
856

857
    """
858
    instance_name = self.items[0]
859
    force_startup = bool(self._checkIntVariable('force'))
860
    no_remember = bool(self._checkIntVariable('no_remember'))
861
    op = opcodes.OpInstanceStartup(instance_name=instance_name,
862
                                   force=force_startup,
863
                                   dry_run=bool(self.dryRun()),
864
                                   no_remember=no_remember)
865

    
866
    return baserlib.SubmitJob([op])
867

    
868

    
869
def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
870
  """Parses a request for an instance shutdown.
871

872
  @rtype: L{opcodes.OpInstanceShutdown}
873
  @return: Instance shutdown opcode
874

875
  """
876
  return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
877
    "instance_name": name,
878
    "dry_run": dry_run,
879
    "no_remember": no_remember,
880
    })
881

    
882

    
883
class R_2_instances_name_shutdown(baserlib.R_Generic):
884
  """/2/instances/[instance_name]/shutdown resource.
885

886
  Implements an instance shutdown.
887

888
  """
889
  def PUT(self):
890
    """Shutdown an instance.
891

892
    @return: a job id
893

894
    """
895
    baserlib.CheckType(self.request_body, dict, "Body contents")
896

    
897
    no_remember = bool(self._checkIntVariable('no_remember'))
898
    op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
899
                                       bool(self.dryRun()), no_remember)
900

    
901
    return baserlib.SubmitJob([op])
902

    
903

    
904
def _ParseInstanceReinstallRequest(name, data):
905
  """Parses a request for reinstalling an instance.
906

907
  """
908
  if not isinstance(data, dict):
909
    raise http.HttpBadRequest("Invalid body contents, not a dictionary")
910

    
911
  ostype = baserlib.CheckParameter(data, "os", default=None)
912
  start = baserlib.CheckParameter(data, "start", exptype=bool,
913
                                  default=True)
914
  osparams = baserlib.CheckParameter(data, "osparams", default=None)
915

    
916
  ops = [
917
    opcodes.OpInstanceShutdown(instance_name=name),
918
    opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
919
                                osparams=osparams),
920
    ]
921

    
922
  if start:
923
    ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
924

    
925
  return ops
926

    
927

    
928
class R_2_instances_name_reinstall(baserlib.R_Generic):
929
  """/2/instances/[instance_name]/reinstall resource.
930

931
  Implements an instance reinstall.
932

933
  """
934
  def POST(self):
935
    """Reinstall an instance.
936

937
    The URI takes os=name and nostartup=[0|1] optional
938
    parameters. By default, the instance will be started
939
    automatically.
940

941
    """
942
    if self.request_body:
943
      if self.queryargs:
944
        raise http.HttpBadRequest("Can't combine query and body parameters")
945

    
946
      body = self.request_body
947
    elif self.queryargs:
948
      # Legacy interface, do not modify/extend
949
      body = {
950
        "os": self._checkStringVariable("os"),
951
        "start": not self._checkIntVariable("nostartup"),
952
        }
953
    else:
954
      body = {}
955

    
956
    ops = _ParseInstanceReinstallRequest(self.items[0], body)
957

    
958
    return baserlib.SubmitJob(ops)
959

    
960

    
961
def _ParseInstanceReplaceDisksRequest(name, data):
962
  """Parses a request for an instance export.
963

964
  @rtype: L{opcodes.OpInstanceReplaceDisks}
965
  @return: Instance export opcode
966

967
  """
968
  override = {
969
    "instance_name": name,
970
    }
971

    
972
  # Parse disks
973
  try:
974
    raw_disks = data["disks"]
975
  except KeyError:
976
    pass
977
  else:
978
    if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
979
      # Backwards compatibility for strings of the format "1, 2, 3"
980
      try:
981
        data["disks"] = [int(part) for part in raw_disks.split(",")]
982
      except (TypeError, ValueError), err:
983
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
984

    
985
  return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
986

    
987

    
988
class R_2_instances_name_replace_disks(baserlib.R_Generic):
989
  """/2/instances/[instance_name]/replace-disks resource.
990

991
  """
992
  def POST(self):
993
    """Replaces disks on an instance.
994

995
    """
996
    op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
997

    
998
    return baserlib.SubmitJob([op])
999

    
1000

    
1001
class R_2_instances_name_activate_disks(baserlib.R_Generic):
1002
  """/2/instances/[instance_name]/activate-disks resource.
1003

1004
  """
1005
  def PUT(self):
1006
    """Activate disks for an instance.
1007

1008
    The URI might contain ignore_size to ignore current recorded size.
1009

1010
    """
1011
    instance_name = self.items[0]
1012
    ignore_size = bool(self._checkIntVariable('ignore_size'))
1013

    
1014
    op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1015
                                         ignore_size=ignore_size)
1016

    
1017
    return baserlib.SubmitJob([op])
1018

    
1019

    
1020
class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1021
  """/2/instances/[instance_name]/deactivate-disks resource.
1022

1023
  """
1024
  def PUT(self):
1025
    """Deactivate disks for an instance.
1026

1027
    """
1028
    instance_name = self.items[0]
1029

    
1030
    op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1031

    
1032
    return baserlib.SubmitJob([op])
1033

    
1034

    
1035
class R_2_instances_name_prepare_export(baserlib.R_Generic):
1036
  """/2/instances/[instance_name]/prepare-export resource.
1037

1038
  """
1039
  def PUT(self):
1040
    """Prepares an export for an instance.
1041

1042
    @return: a job id
1043

1044
    """
1045
    instance_name = self.items[0]
1046
    mode = self._checkStringVariable("mode")
1047

    
1048
    op = opcodes.OpBackupPrepare(instance_name=instance_name,
1049
                                 mode=mode)
1050

    
1051
    return baserlib.SubmitJob([op])
1052

    
1053

    
1054
def _ParseExportInstanceRequest(name, data):
1055
  """Parses a request for an instance export.
1056

1057
  @rtype: L{opcodes.OpBackupExport}
1058
  @return: Instance export opcode
1059

1060
  """
1061
  # Rename "destination" to "target_node"
1062
  try:
1063
    data["target_node"] = data.pop("destination")
1064
  except KeyError:
1065
    pass
1066

    
1067
  return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1068
    "instance_name": name,
1069
    })
1070

    
1071

    
1072
class R_2_instances_name_export(baserlib.R_Generic):
1073
  """/2/instances/[instance_name]/export resource.
1074

1075
  """
1076
  def PUT(self):
1077
    """Exports an instance.
1078

1079
    @return: a job id
1080

1081
    """
1082
    if not isinstance(self.request_body, dict):
1083
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1084

    
1085
    op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1086

    
1087
    return baserlib.SubmitJob([op])
1088

    
1089

    
1090
def _ParseMigrateInstanceRequest(name, data):
1091
  """Parses a request for an instance migration.
1092

1093
  @rtype: L{opcodes.OpInstanceMigrate}
1094
  @return: Instance migration opcode
1095

1096
  """
1097
  return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1098
    "instance_name": name,
1099
    })
1100

    
1101

    
1102
class R_2_instances_name_migrate(baserlib.R_Generic):
1103
  """/2/instances/[instance_name]/migrate resource.
1104

1105
  """
1106
  def PUT(self):
1107
    """Migrates an instance.
1108

1109
    @return: a job id
1110

1111
    """
1112
    baserlib.CheckType(self.request_body, dict, "Body contents")
1113

    
1114
    op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1115

    
1116
    return baserlib.SubmitJob([op])
1117

    
1118

    
1119
def _ParseRenameInstanceRequest(name, data):
1120
  """Parses a request for renaming an instance.
1121

1122
  @rtype: L{opcodes.OpInstanceRename}
1123
  @return: Instance rename opcode
1124

1125
  """
1126
  return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1127
    "instance_name": name,
1128
    })
1129

    
1130

    
1131
class R_2_instances_name_rename(baserlib.R_Generic):
1132
  """/2/instances/[instance_name]/rename resource.
1133

1134
  """
1135
  def PUT(self):
1136
    """Changes the name of an instance.
1137

1138
    @return: a job id
1139

1140
    """
1141
    baserlib.CheckType(self.request_body, dict, "Body contents")
1142

    
1143
    op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1144

    
1145
    return baserlib.SubmitJob([op])
1146

    
1147

    
1148
def _ParseModifyInstanceRequest(name, data):
1149
  """Parses a request for modifying an instance.
1150

1151
  @rtype: L{opcodes.OpInstanceSetParams}
1152
  @return: Instance modify opcode
1153

1154
  """
1155
  return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1156
    "instance_name": name,
1157
    })
1158

    
1159

    
1160
class R_2_instances_name_modify(baserlib.R_Generic):
1161
  """/2/instances/[instance_name]/modify resource.
1162

1163
  """
1164
  def PUT(self):
1165
    """Changes some parameters of an instance.
1166

1167
    @return: a job id
1168

1169
    """
1170
    baserlib.CheckType(self.request_body, dict, "Body contents")
1171

    
1172
    op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1173

    
1174
    return baserlib.SubmitJob([op])
1175

    
1176

    
1177
class R_2_instances_name_disk_grow(baserlib.R_Generic):
1178
  """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1179

1180
  """
1181
  def POST(self):
1182
    """Increases the size of an instance disk.
1183

1184
    @return: a job id
1185

1186
    """
1187
    op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1188
      "instance_name": self.items[0],
1189
      "disk": int(self.items[1]),
1190
      })
1191

    
1192
    return baserlib.SubmitJob([op])
1193

    
1194

    
1195
class R_2_instances_name_console(baserlib.R_Generic):
1196
  """/2/instances/[instance_name]/console resource.
1197

1198
  """
1199
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1200

    
1201
  def GET(self):
1202
    """Request information for connecting to instance's console.
1203

1204
    @return: Serialized instance console description, see
1205
             L{objects.InstanceConsole}
1206

1207
    """
1208
    client = baserlib.GetClient()
1209

    
1210
    ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1211

    
1212
    if console is None:
1213
      raise http.HttpServiceUnavailable("Instance console unavailable")
1214

    
1215
    assert isinstance(console, dict)
1216
    return console
1217

    
1218

    
1219
def _GetQueryFields(args):
1220
  """
1221

1222
  """
1223
  try:
1224
    fields = args["fields"]
1225
  except KeyError:
1226
    raise http.HttpBadRequest("Missing 'fields' query argument")
1227

    
1228
  return _SplitQueryFields(fields[0])
1229

    
1230

    
1231
def _SplitQueryFields(fields):
1232
  """
1233

1234
  """
1235
  return [i.strip() for i in fields.split(",")]
1236

    
1237

    
1238
class R_2_query(baserlib.R_Generic):
1239
  """/2/query/[resource] resource.
1240

1241
  """
1242
  # Results might contain sensitive information
1243
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1244

    
1245
  def _Query(self, fields, filter_):
1246
    return baserlib.GetClient().Query(self.items[0], fields, filter_).ToDict()
1247

    
1248
  def GET(self):
1249
    """Returns resource information.
1250

1251
    @return: Query result, see L{objects.QueryResponse}
1252

1253
    """
1254
    return self._Query(_GetQueryFields(self.queryargs), None)
1255

    
1256
  def PUT(self):
1257
    """Submits job querying for resources.
1258

1259
    @return: Query result, see L{objects.QueryResponse}
1260

1261
    """
1262
    body = self.request_body
1263

    
1264
    baserlib.CheckType(body, dict, "Body contents")
1265

    
1266
    try:
1267
      fields = body["fields"]
1268
    except KeyError:
1269
      fields = _GetQueryFields(self.queryargs)
1270

    
1271
    return self._Query(fields, self.request_body.get("filter", None))
1272

    
1273

    
1274
class R_2_query_fields(baserlib.R_Generic):
1275
  """/2/query/[resource]/fields resource.
1276

1277
  """
1278
  def GET(self):
1279
    """Retrieves list of available fields for a resource.
1280

1281
    @return: List of serialized L{objects.QueryFieldDefinition}
1282

1283
    """
1284
    try:
1285
      raw_fields = self.queryargs["fields"]
1286
    except KeyError:
1287
      fields = None
1288
    else:
1289
      fields = _SplitQueryFields(raw_fields[0])
1290

    
1291
    return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict()
1292

    
1293

    
1294
class _R_Tags(baserlib.R_Generic):
1295
  """ Quasiclass for tagging resources
1296

1297
  Manages tags. When inheriting this class you must define the
1298
  TAG_LEVEL for it.
1299

1300
  """
1301
  TAG_LEVEL = None
1302

    
1303
  def __init__(self, items, queryargs, req):
1304
    """A tag resource constructor.
1305

1306
    We have to override the default to sort out cluster naming case.
1307

1308
    """
1309
    baserlib.R_Generic.__init__(self, items, queryargs, req)
1310

    
1311
    if self.TAG_LEVEL == constants.TAG_CLUSTER:
1312
      self.name = None
1313
    else:
1314
      self.name = items[0]
1315

    
1316
  def GET(self):
1317
    """Returns a list of tags.
1318

1319
    Example: ["tag1", "tag2", "tag3"]
1320

1321
    """
1322
    # pylint: disable-msg=W0212
1323
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1324

    
1325
  def PUT(self):
1326
    """Add a set of tags.
1327

1328
    The request as a list of strings should be PUT to this URI. And
1329
    you'll have back a job id.
1330

1331
    """
1332
    # pylint: disable-msg=W0212
1333
    if 'tag' not in self.queryargs:
1334
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
1335
                                " the 'tag' parameter")
1336
    return baserlib._Tags_PUT(self.TAG_LEVEL,
1337
                              self.queryargs['tag'], name=self.name,
1338
                              dry_run=bool(self.dryRun()))
1339

    
1340
  def DELETE(self):
1341
    """Delete a tag.
1342

1343
    In order to delete a set of tags, the DELETE
1344
    request should be addressed to URI like:
1345
    /tags?tag=[tag]&tag=[tag]
1346

1347
    """
1348
    # pylint: disable-msg=W0212
1349
    if 'tag' not in self.queryargs:
1350
      # no we not gonna delete all tags
1351
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
1352
                                " tag(s) using the 'tag' parameter")
1353
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
1354
                                 self.queryargs['tag'],
1355
                                 name=self.name,
1356
                                 dry_run=bool(self.dryRun()))
1357

    
1358

    
1359
class R_2_instances_name_tags(_R_Tags):
1360
  """ /2/instances/[instance_name]/tags resource.
1361

1362
  Manages per-instance tags.
1363

1364
  """
1365
  TAG_LEVEL = constants.TAG_INSTANCE
1366

    
1367

    
1368
class R_2_nodes_name_tags(_R_Tags):
1369
  """ /2/nodes/[node_name]/tags resource.
1370

1371
  Manages per-node tags.
1372

1373
  """
1374
  TAG_LEVEL = constants.TAG_NODE
1375

    
1376

    
1377
class R_2_groups_name_tags(_R_Tags):
1378
  """ /2/groups/[group_name]/tags resource.
1379

1380
  Manages per-nodegroup tags.
1381

1382
  """
1383
  TAG_LEVEL = constants.TAG_NODEGROUP
1384

    
1385

    
1386
class R_2_tags(_R_Tags):
1387
  """ /2/tags resource.
1388

1389
  Manages cluster tags.
1390

1391
  """
1392
  TAG_LEVEL = constants.TAG_CLUSTER