Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ b7a1c816

History | View | Annotate | Download (36.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
  PUT or POST?
25
  ============
26

27
  According to RFC2616 the main difference between PUT and POST is that
28
  POST can create new resources but PUT can only create the resource the
29
  URI was pointing to on the PUT request.
30

31
  To be in context of this module for instance creation POST on
32
  /2/instances is legitim while PUT would be not, due to it does create a
33
  new entity and not just replace /2/instances with it.
34

35
  So when adding new methods, if they are operating on the URI entity itself,
36
  PUT should be prefered over POST.
37

38
"""
39

    
40
# pylint: disable-msg=C0103
41

    
42
# C0103: Invalid name, since the R_* names are not conforming
43

    
44
from ganeti import opcodes
45
from ganeti import http
46
from ganeti import constants
47
from ganeti import cli
48
from ganeti import rapi
49
from ganeti import ht
50
from ganeti.rapi import baserlib
51

    
52

    
53
_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54
I_FIELDS = ["name", "admin_state", "os",
55
            "pnode", "snodes",
56
            "disk_template",
57
            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58
            "network_port",
59
            "disk.sizes", "disk_usage",
60
            "beparams", "hvparams",
61
            "oper_state", "oper_ram", "oper_vcpus", "status",
62
            "custom_hvparams", "custom_beparams", "custom_nicparams",
63
            ] + _COMMON_FIELDS
64

    
65
N_FIELDS = ["name", "offline", "master_candidate", "drained",
66
            "dtotal", "dfree",
67
            "mtotal", "mnode", "mfree",
68
            "pinst_cnt", "sinst_cnt",
69
            "ctotal", "cnodes", "csockets",
70
            "pip", "sip", "role",
71
            "pinst_list", "sinst_list",
72
            "master_capable", "vm_capable",
73
            "group.uuid",
74
            ] + _COMMON_FIELDS
75

    
76
G_FIELDS = ["name", "uuid",
77
            "alloc_policy",
78
            "node_cnt", "node_list",
79
            "ctime", "mtime", "serial_no",
80
            ]  # "tags" is missing to be able to use _COMMON_FIELDS here.
81

    
82
_NR_DRAINED = "drained"
83
_NR_MASTER_CANDIATE = "master-candidate"
84
_NR_MASTER = "master"
85
_NR_OFFLINE = "offline"
86
_NR_REGULAR = "regular"
87

    
88
_NR_MAP = {
89
  constants.NR_MASTER: _NR_MASTER,
90
  constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
91
  constants.NR_DRAINED: _NR_DRAINED,
92
  constants.NR_OFFLINE: _NR_OFFLINE,
93
  constants.NR_REGULAR: _NR_REGULAR,
94
  }
95

    
96
assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
97

    
98
# Request data version field
99
_REQ_DATA_VERSION = "__version__"
100

    
101
# Feature string for instance creation request data version 1
102
_INST_CREATE_REQV1 = "instance-create-reqv1"
103

    
104
# Feature string for instance reinstall request version 1
105
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
106

    
107
# Feature string for node migration version 1
108
_NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
109

    
110
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
111
_WFJC_TIMEOUT = 10
112

    
113

    
114
class R_version(baserlib.R_Generic):
115
  """/version resource.
116

117
  This resource should be used to determine the remote API version and
118
  to adapt clients accordingly.
119

120
  """
121
  @staticmethod
122
  def GET():
123
    """Returns the remote API version.
124

125
    """
126
    return constants.RAPI_VERSION
127

    
128

    
129
class R_2_info(baserlib.R_Generic):
130
  """/2/info resource.
131

132
  """
133
  @staticmethod
134
  def GET():
135
    """Returns cluster information.
136

137
    """
138
    client = baserlib.GetClient()
139
    return client.QueryClusterInfo()
140

    
141

    
142
class R_2_features(baserlib.R_Generic):
143
  """/2/features resource.
144

145
  """
146
  @staticmethod
147
  def GET():
148
    """Returns list of optional RAPI features implemented.
149

150
    """
151
    return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1, _NODE_MIGRATE_REQV1]
152

    
153

    
154
class R_2_os(baserlib.R_Generic):
155
  """/2/os resource.
156

157
  """
158
  @staticmethod
159
  def GET():
160
    """Return a list of all OSes.
161

162
    Can return error 500 in case of a problem.
163

164
    Example: ["debian-etch"]
165

166
    """
167
    cl = baserlib.GetClient()
168
    op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
169
    job_id = baserlib.SubmitJob([op], cl)
170
    # we use custom feedback function, instead of print we log the status
171
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
172
    diagnose_data = result[0]
173

    
174
    if not isinstance(diagnose_data, list):
175
      raise http.HttpBadGateway(message="Can't get OS list")
176

    
177
    os_names = []
178
    for (name, variants) in diagnose_data:
179
      os_names.extend(cli.CalculateOSNames(name, variants))
180

    
181
    return os_names
182

    
183

    
184
class R_2_redist_config(baserlib.R_Generic):
185
  """/2/redistribute-config resource.
186

187
  """
188
  @staticmethod
189
  def PUT():
190
    """Redistribute configuration to all nodes.
191

192
    """
193
    return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
194

    
195

    
196
class R_2_cluster_modify(baserlib.R_Generic):
197
  """/2/modify resource.
198

199
  """
200
  def PUT(self):
201
    """Modifies cluster parameters.
202

203
    @return: a job id
204

205
    """
206
    op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
207
                             None)
208

    
209
    return baserlib.SubmitJob([op])
210

    
211

    
212
class R_2_jobs(baserlib.R_Generic):
213
  """/2/jobs resource.
214

215
  """
216
  @staticmethod
217
  def GET():
218
    """Returns a dictionary of jobs.
219

220
    @return: a dictionary with jobs id and uri.
221

222
    """
223
    fields = ["id"]
224
    cl = baserlib.GetClient()
225
    # Convert the list of lists to the list of ids
226
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
227
    return baserlib.BuildUriList(result, "/2/jobs/%s",
228
                                 uri_fields=("id", "uri"))
229

    
230

    
231
class R_2_jobs_id(baserlib.R_Generic):
232
  """/2/jobs/[job_id] resource.
233

234
  """
235
  def GET(self):
236
    """Returns a job status.
237

238
    @return: a dictionary with job parameters.
239
        The result includes:
240
            - id: job ID as a number
241
            - status: current job status as a string
242
            - ops: involved OpCodes as a list of dictionaries for each
243
              opcodes in the job
244
            - opstatus: OpCodes status as a list
245
            - opresult: OpCodes results as a list of lists
246

247
    """
248
    fields = ["id", "ops", "status", "summary",
249
              "opstatus", "opresult", "oplog",
250
              "received_ts", "start_ts", "end_ts",
251
              ]
252
    job_id = self.items[0]
253
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
254
    if result is None:
255
      raise http.HttpNotFound()
256
    return baserlib.MapFields(fields, result)
257

    
258
  def DELETE(self):
259
    """Cancel not-yet-started job.
260

261
    """
262
    job_id = self.items[0]
263
    result = baserlib.GetClient().CancelJob(job_id)
264
    return result
265

    
266

    
267
class R_2_jobs_id_wait(baserlib.R_Generic):
268
  """/2/jobs/[job_id]/wait resource.
269

270
  """
271
  # WaitForJobChange provides access to sensitive information and blocks
272
  # machine resources (it's a blocking RAPI call), hence restricting access.
273
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
274

    
275
  def GET(self):
276
    """Waits for job changes.
277

278
    """
279
    job_id = self.items[0]
280

    
281
    fields = self.getBodyParameter("fields")
282
    prev_job_info = self.getBodyParameter("previous_job_info", None)
283
    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
284

    
285
    if not isinstance(fields, list):
286
      raise http.HttpBadRequest("The 'fields' parameter should be a list")
287

    
288
    if not (prev_job_info is None or isinstance(prev_job_info, list)):
289
      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
290
                                " be a list")
291

    
292
    if not (prev_log_serial is None or
293
            isinstance(prev_log_serial, (int, long))):
294
      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
295
                                " be a number")
296

    
297
    client = baserlib.GetClient()
298
    result = client.WaitForJobChangeOnce(job_id, fields,
299
                                         prev_job_info, prev_log_serial,
300
                                         timeout=_WFJC_TIMEOUT)
301
    if not result:
302
      raise http.HttpNotFound()
303

    
304
    if result == constants.JOB_NOTCHANGED:
305
      # No changes
306
      return None
307

    
308
    (job_info, log_entries) = result
309

    
310
    return {
311
      "job_info": job_info,
312
      "log_entries": log_entries,
313
      }
314

    
315

    
316
class R_2_nodes(baserlib.R_Generic):
317
  """/2/nodes resource.
318

319
  """
320
  def GET(self):
321
    """Returns a list of all nodes.
322

323
    """
324
    client = baserlib.GetClient()
325

    
326
    if self.useBulk():
327
      bulkdata = client.QueryNodes([], N_FIELDS, False)
328
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
329
    else:
330
      nodesdata = client.QueryNodes([], ["name"], False)
331
      nodeslist = [row[0] for row in nodesdata]
332
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
333
                                   uri_fields=("id", "uri"))
334

    
335

    
336
class R_2_nodes_name(baserlib.R_Generic):
337
  """/2/nodes/[node_name] resource.
338

339
  """
340
  def GET(self):
341
    """Send information about a node.
342

343
    """
344
    node_name = self.items[0]
345
    client = baserlib.GetClient()
346

    
347
    result = baserlib.HandleItemQueryErrors(client.QueryNodes,
348
                                            names=[node_name], fields=N_FIELDS,
349
                                            use_locking=self.useLocking())
350

    
351
    return baserlib.MapFields(N_FIELDS, result[0])
352

    
353

    
354
class R_2_nodes_name_role(baserlib.R_Generic):
355
  """ /2/nodes/[node_name]/role resource.
356

357
  """
358
  def GET(self):
359
    """Returns the current node role.
360

361
    @return: Node role
362

363
    """
364
    node_name = self.items[0]
365
    client = baserlib.GetClient()
366
    result = client.QueryNodes(names=[node_name], fields=["role"],
367
                               use_locking=self.useLocking())
368

    
369
    return _NR_MAP[result[0][0]]
370

    
371
  def PUT(self):
372
    """Sets the node role.
373

374
    @return: a job id
375

376
    """
377
    if not isinstance(self.request_body, basestring):
378
      raise http.HttpBadRequest("Invalid body contents, not a string")
379

    
380
    node_name = self.items[0]
381
    role = self.request_body
382

    
383
    if role == _NR_REGULAR:
384
      candidate = False
385
      offline = False
386
      drained = False
387

    
388
    elif role == _NR_MASTER_CANDIATE:
389
      candidate = True
390
      offline = drained = None
391

    
392
    elif role == _NR_DRAINED:
393
      drained = True
394
      candidate = offline = None
395

    
396
    elif role == _NR_OFFLINE:
397
      offline = True
398
      candidate = drained = None
399

    
400
    else:
401
      raise http.HttpBadRequest("Can't set '%s' role" % role)
402

    
403
    op = opcodes.OpNodeSetParams(node_name=node_name,
404
                                 master_candidate=candidate,
405
                                 offline=offline,
406
                                 drained=drained,
407
                                 force=bool(self.useForce()))
408

    
409
    return baserlib.SubmitJob([op])
410

    
411

    
412
class R_2_nodes_name_evacuate(baserlib.R_Generic):
413
  """/2/nodes/[node_name]/evacuate resource.
414

415
  """
416
  def POST(self):
417
    """Evacuate all secondary instances off a node.
418

419
    """
420
    node_name = self.items[0]
421
    remote_node = self._checkStringVariable("remote_node", default=None)
422
    iallocator = self._checkStringVariable("iallocator", default=None)
423
    early_r = bool(self._checkIntVariable("early_release", default=0))
424
    dry_run = bool(self.dryRun())
425

    
426
    cl = baserlib.GetClient()
427

    
428
    op = opcodes.OpNodeEvacStrategy(nodes=[node_name],
429
                                    iallocator=iallocator,
430
                                    remote_node=remote_node)
431

    
432
    job_id = baserlib.SubmitJob([op], cl)
433
    # we use custom feedback function, instead of print we log the status
434
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
435

    
436
    jobs = []
437
    for iname, node in result[0]:
438
      if dry_run:
439
        jid = None
440
      else:
441
        op = opcodes.OpInstanceReplaceDisks(instance_name=iname,
442
                                            remote_node=node, disks=[],
443
                                            mode=constants.REPLACE_DISK_CHG,
444
                                            early_release=early_r)
445
        jid = baserlib.SubmitJob([op])
446
      jobs.append((jid, iname, node))
447

    
448
    return jobs
449

    
450

    
451
class R_2_nodes_name_migrate(baserlib.R_Generic):
452
  """/2/nodes/[node_name]/migrate resource.
453

454
  """
455
  def POST(self):
456
    """Migrate all primary instances from a node.
457

458
    """
459
    node_name = self.items[0]
460

    
461
    if self.queryargs:
462
      # Support old-style requests
463
      if "live" in self.queryargs and "mode" in self.queryargs:
464
        raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
465
                                  " be passed")
466

    
467
      if "live" in self.queryargs:
468
        if self._checkIntVariable("live", default=1):
469
          mode = constants.HT_MIGRATION_LIVE
470
        else:
471
          mode = constants.HT_MIGRATION_NONLIVE
472
      else:
473
        mode = self._checkStringVariable("mode", default=None)
474

    
475
      data = {
476
        "mode": mode,
477
        }
478
    else:
479
      data = self.request_body
480

    
481
    op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
482
      "node_name": node_name,
483
      })
484

    
485
    return baserlib.SubmitJob([op])
486

    
487

    
488
class R_2_nodes_name_storage(baserlib.R_Generic):
489
  """/2/nodes/[node_name]/storage resource.
490

491
  """
492
  # LUNodeQueryStorage acquires locks, hence restricting access to GET
493
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
494

    
495
  def GET(self):
496
    node_name = self.items[0]
497

    
498
    storage_type = self._checkStringVariable("storage_type", None)
499
    if not storage_type:
500
      raise http.HttpBadRequest("Missing the required 'storage_type'"
501
                                " parameter")
502

    
503
    output_fields = self._checkStringVariable("output_fields", None)
504
    if not output_fields:
505
      raise http.HttpBadRequest("Missing the required 'output_fields'"
506
                                " parameter")
507

    
508
    op = opcodes.OpNodeQueryStorage(nodes=[node_name],
509
                                    storage_type=storage_type,
510
                                    output_fields=output_fields.split(","))
511
    return baserlib.SubmitJob([op])
512

    
513

    
514
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
515
  """/2/nodes/[node_name]/storage/modify resource.
516

517
  """
518
  def PUT(self):
519
    node_name = self.items[0]
520

    
521
    storage_type = self._checkStringVariable("storage_type", None)
522
    if not storage_type:
523
      raise http.HttpBadRequest("Missing the required 'storage_type'"
524
                                " parameter")
525

    
526
    name = self._checkStringVariable("name", None)
527
    if not name:
528
      raise http.HttpBadRequest("Missing the required 'name'"
529
                                " parameter")
530

    
531
    changes = {}
532

    
533
    if "allocatable" in self.queryargs:
534
      changes[constants.SF_ALLOCATABLE] = \
535
        bool(self._checkIntVariable("allocatable", default=1))
536

    
537
    op = opcodes.OpNodeModifyStorage(node_name=node_name,
538
                                     storage_type=storage_type,
539
                                     name=name,
540
                                     changes=changes)
541
    return baserlib.SubmitJob([op])
542

    
543

    
544
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
545
  """/2/nodes/[node_name]/storage/repair resource.
546

547
  """
548
  def PUT(self):
549
    node_name = self.items[0]
550

    
551
    storage_type = self._checkStringVariable("storage_type", None)
552
    if not storage_type:
553
      raise http.HttpBadRequest("Missing the required 'storage_type'"
554
                                " parameter")
555

    
556
    name = self._checkStringVariable("name", None)
557
    if not name:
558
      raise http.HttpBadRequest("Missing the required 'name'"
559
                                " parameter")
560

    
561
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
562
                                     storage_type=storage_type,
563
                                     name=name)
564
    return baserlib.SubmitJob([op])
565

    
566

    
567
def _ParseCreateGroupRequest(data, dry_run):
568
  """Parses a request for creating a node group.
569

570
  @rtype: L{opcodes.OpGroupAdd}
571
  @return: Group creation opcode
572

573
  """
574
  override = {
575
    "dry_run": dry_run,
576
    }
577

    
578
  rename = {
579
    "name": "group_name",
580
    }
581

    
582
  return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
583
                             rename=rename)
584

    
585

    
586
class R_2_groups(baserlib.R_Generic):
587
  """/2/groups resource.
588

589
  """
590
  def GET(self):
591
    """Returns a list of all node groups.
592

593
    """
594
    client = baserlib.GetClient()
595

    
596
    if self.useBulk():
597
      bulkdata = client.QueryGroups([], G_FIELDS, False)
598
      return baserlib.MapBulkFields(bulkdata, G_FIELDS)
599
    else:
600
      data = client.QueryGroups([], ["name"], False)
601
      groupnames = [row[0] for row in data]
602
      return baserlib.BuildUriList(groupnames, "/2/groups/%s",
603
                                   uri_fields=("name", "uri"))
604

    
605
  def POST(self):
606
    """Create a node group.
607

608
    @return: a job id
609

610
    """
611
    baserlib.CheckType(self.request_body, dict, "Body contents")
612
    op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
613
    return baserlib.SubmitJob([op])
614

    
615

    
616
class R_2_groups_name(baserlib.R_Generic):
617
  """/2/groups/[group_name] resource.
618

619
  """
620
  def GET(self):
621
    """Send information about a node group.
622

623
    """
624
    group_name = self.items[0]
625
    client = baserlib.GetClient()
626

    
627
    result = baserlib.HandleItemQueryErrors(client.QueryGroups,
628
                                            names=[group_name], fields=G_FIELDS,
629
                                            use_locking=self.useLocking())
630

    
631
    return baserlib.MapFields(G_FIELDS, result[0])
632

    
633
  def DELETE(self):
634
    """Delete a node group.
635

636
    """
637
    op = opcodes.OpGroupRemove(group_name=self.items[0],
638
                               dry_run=bool(self.dryRun()))
639

    
640
    return baserlib.SubmitJob([op])
641

    
642

    
643
def _ParseModifyGroupRequest(name, data):
644
  """Parses a request for modifying a node group.
645

646
  @rtype: L{opcodes.OpGroupSetParams}
647
  @return: Group modify opcode
648

649
  """
650
  return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
651
    "group_name": name,
652
    })
653

    
654

    
655

    
656
class R_2_groups_name_modify(baserlib.R_Generic):
657
  """/2/groups/[group_name]/modify resource.
658

659
  """
660
  def PUT(self):
661
    """Changes some parameters of node group.
662

663
    @return: a job id
664

665
    """
666
    baserlib.CheckType(self.request_body, dict, "Body contents")
667

    
668
    op = _ParseModifyGroupRequest(self.items[0], self.request_body)
669

    
670
    return baserlib.SubmitJob([op])
671

    
672

    
673
def _ParseRenameGroupRequest(name, data, dry_run):
674
  """Parses a request for renaming a node group.
675

676
  @type name: string
677
  @param name: name of the node group to rename
678
  @type data: dict
679
  @param data: the body received by the rename request
680
  @type dry_run: bool
681
  @param dry_run: whether to perform a dry run
682

683
  @rtype: L{opcodes.OpGroupRename}
684
  @return: Node group rename opcode
685

686
  """
687
  return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
688
    "group_name": name,
689
    "dry_run": dry_run,
690
    })
691

    
692

    
693
class R_2_groups_name_rename(baserlib.R_Generic):
694
  """/2/groups/[group_name]/rename resource.
695

696
  """
697
  def PUT(self):
698
    """Changes the name of a node group.
699

700
    @return: a job id
701

702
    """
703
    baserlib.CheckType(self.request_body, dict, "Body contents")
704
    op = _ParseRenameGroupRequest(self.items[0], self.request_body,
705
                                  self.dryRun())
706
    return baserlib.SubmitJob([op])
707

    
708

    
709
class R_2_groups_name_assign_nodes(baserlib.R_Generic):
710
  """/2/groups/[group_name]/assign-nodes resource.
711

712
  """
713
  def PUT(self):
714
    """Assigns nodes to a group.
715

716
    @return: a job id
717

718
    """
719
    op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
720
      "group_name": self.items[0],
721
      "dry_run": self.dryRun(),
722
      "force": self.useForce(),
723
      })
724

    
725
    return baserlib.SubmitJob([op])
726

    
727

    
728
def _ParseInstanceCreateRequestVersion1(data, dry_run):
729
  """Parses an instance creation request version 1.
730

731
  @rtype: L{opcodes.OpInstanceCreate}
732
  @return: Instance creation opcode
733

734
  """
735
  override = {
736
    "dry_run": dry_run,
737
    }
738

    
739
  rename = {
740
    "os": "os_type",
741
    "name": "instance_name",
742
    }
743

    
744
  return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
745
                             rename=rename)
746

    
747

    
748
class R_2_instances(baserlib.R_Generic):
749
  """/2/instances resource.
750

751
  """
752
  def GET(self):
753
    """Returns a list of all available instances.
754

755
    """
756
    client = baserlib.GetClient()
757

    
758
    use_locking = self.useLocking()
759
    if self.useBulk():
760
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
761
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
762
    else:
763
      instancesdata = client.QueryInstances([], ["name"], use_locking)
764
      instanceslist = [row[0] for row in instancesdata]
765
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
766
                                   uri_fields=("id", "uri"))
767

    
768
  def POST(self):
769
    """Create an instance.
770

771
    @return: a job id
772

773
    """
774
    if not isinstance(self.request_body, dict):
775
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
776

    
777
    # Default to request data version 0
778
    data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
779

    
780
    if data_version == 0:
781
      raise http.HttpBadRequest("Instance creation request version 0 is no"
782
                                " longer supported")
783
    elif data_version == 1:
784
      data = self.request_body.copy()
785
      # Remove "__version__"
786
      data.pop(_REQ_DATA_VERSION, None)
787
      op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
788
    else:
789
      raise http.HttpBadRequest("Unsupported request data version %s" %
790
                                data_version)
791

    
792
    return baserlib.SubmitJob([op])
793

    
794

    
795
class R_2_instances_name(baserlib.R_Generic):
796
  """/2/instances/[instance_name] resource.
797

798
  """
799
  def GET(self):
800
    """Send information about an instance.
801

802
    """
803
    client = baserlib.GetClient()
804
    instance_name = self.items[0]
805

    
806
    result = baserlib.HandleItemQueryErrors(client.QueryInstances,
807
                                            names=[instance_name],
808
                                            fields=I_FIELDS,
809
                                            use_locking=self.useLocking())
810

    
811
    return baserlib.MapFields(I_FIELDS, result[0])
812

    
813
  def DELETE(self):
814
    """Delete an instance.
815

816
    """
817
    op = opcodes.OpInstanceRemove(instance_name=self.items[0],
818
                                  ignore_failures=False,
819
                                  dry_run=bool(self.dryRun()))
820
    return baserlib.SubmitJob([op])
821

    
822

    
823
class R_2_instances_name_info(baserlib.R_Generic):
824
  """/2/instances/[instance_name]/info resource.
825

826
  """
827
  def GET(self):
828
    """Request detailed instance information.
829

830
    """
831
    instance_name = self.items[0]
832
    static = bool(self._checkIntVariable("static", default=0))
833

    
834
    op = opcodes.OpInstanceQueryData(instances=[instance_name],
835
                                     static=static)
836
    return baserlib.SubmitJob([op])
837

    
838

    
839
class R_2_instances_name_reboot(baserlib.R_Generic):
840
  """/2/instances/[instance_name]/reboot resource.
841

842
  Implements an instance reboot.
843

844
  """
845
  def POST(self):
846
    """Reboot an instance.
847

848
    The URI takes type=[hard|soft|full] and
849
    ignore_secondaries=[False|True] parameters.
850

851
    """
852
    instance_name = self.items[0]
853
    reboot_type = self.queryargs.get('type',
854
                                     [constants.INSTANCE_REBOOT_HARD])[0]
855
    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
856
    op = opcodes.OpInstanceReboot(instance_name=instance_name,
857
                                  reboot_type=reboot_type,
858
                                  ignore_secondaries=ignore_secondaries,
859
                                  dry_run=bool(self.dryRun()))
860

    
861
    return baserlib.SubmitJob([op])
862

    
863

    
864
class R_2_instances_name_startup(baserlib.R_Generic):
865
  """/2/instances/[instance_name]/startup resource.
866

867
  Implements an instance startup.
868

869
  """
870
  def PUT(self):
871
    """Startup an instance.
872

873
    The URI takes force=[False|True] parameter to start the instance
874
    if even if secondary disks are failing.
875

876
    """
877
    instance_name = self.items[0]
878
    force_startup = bool(self._checkIntVariable('force'))
879
    no_remember = bool(self._checkIntVariable('no_remember'))
880
    op = opcodes.OpInstanceStartup(instance_name=instance_name,
881
                                   force=force_startup,
882
                                   dry_run=bool(self.dryRun()),
883
                                   no_remember=no_remember)
884

    
885
    return baserlib.SubmitJob([op])
886

    
887

    
888
def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
889
  """Parses a request for an instance shutdown.
890

891
  @rtype: L{opcodes.OpInstanceShutdown}
892
  @return: Instance shutdown opcode
893

894
  """
895
  return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
896
    "instance_name": name,
897
    "dry_run": dry_run,
898
    "no_remember": no_remember,
899
    })
900

    
901

    
902
class R_2_instances_name_shutdown(baserlib.R_Generic):
903
  """/2/instances/[instance_name]/shutdown resource.
904

905
  Implements an instance shutdown.
906

907
  """
908
  def PUT(self):
909
    """Shutdown an instance.
910

911
    @return: a job id
912

913
    """
914
    baserlib.CheckType(self.request_body, dict, "Body contents")
915

    
916
    no_remember = bool(self._checkIntVariable('no_remember'))
917
    op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
918
                                       bool(self.dryRun()), no_remember)
919

    
920
    return baserlib.SubmitJob([op])
921

    
922

    
923
def _ParseInstanceReinstallRequest(name, data):
924
  """Parses a request for reinstalling an instance.
925

926
  """
927
  if not isinstance(data, dict):
928
    raise http.HttpBadRequest("Invalid body contents, not a dictionary")
929

    
930
  ostype = baserlib.CheckParameter(data, "os", default=None)
931
  start = baserlib.CheckParameter(data, "start", exptype=bool,
932
                                  default=True)
933
  osparams = baserlib.CheckParameter(data, "osparams", default=None)
934

    
935
  ops = [
936
    opcodes.OpInstanceShutdown(instance_name=name),
937
    opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
938
                                osparams=osparams),
939
    ]
940

    
941
  if start:
942
    ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
943

    
944
  return ops
945

    
946

    
947
class R_2_instances_name_reinstall(baserlib.R_Generic):
948
  """/2/instances/[instance_name]/reinstall resource.
949

950
  Implements an instance reinstall.
951

952
  """
953
  def POST(self):
954
    """Reinstall an instance.
955

956
    The URI takes os=name and nostartup=[0|1] optional
957
    parameters. By default, the instance will be started
958
    automatically.
959

960
    """
961
    if self.request_body:
962
      if self.queryargs:
963
        raise http.HttpBadRequest("Can't combine query and body parameters")
964

    
965
      body = self.request_body
966
    elif self.queryargs:
967
      # Legacy interface, do not modify/extend
968
      body = {
969
        "os": self._checkStringVariable("os"),
970
        "start": not self._checkIntVariable("nostartup"),
971
        }
972
    else:
973
      body = {}
974

    
975
    ops = _ParseInstanceReinstallRequest(self.items[0], body)
976

    
977
    return baserlib.SubmitJob(ops)
978

    
979

    
980
def _ParseInstanceReplaceDisksRequest(name, data):
981
  """Parses a request for an instance export.
982

983
  @rtype: L{opcodes.OpInstanceReplaceDisks}
984
  @return: Instance export opcode
985

986
  """
987
  override = {
988
    "instance_name": name,
989
    }
990

    
991
  # Parse disks
992
  try:
993
    raw_disks = data["disks"]
994
  except KeyError:
995
    pass
996
  else:
997
    if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
998
      # Backwards compatibility for strings of the format "1, 2, 3"
999
      try:
1000
        data["disks"] = [int(part) for part in raw_disks.split(",")]
1001
      except (TypeError, ValueError), err:
1002
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1003

    
1004
  return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
1005

    
1006

    
1007
class R_2_instances_name_replace_disks(baserlib.R_Generic):
1008
  """/2/instances/[instance_name]/replace-disks resource.
1009

1010
  """
1011
  def POST(self):
1012
    """Replaces disks on an instance.
1013

1014
    """
1015
    op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
1016

    
1017
    return baserlib.SubmitJob([op])
1018

    
1019

    
1020
class R_2_instances_name_activate_disks(baserlib.R_Generic):
1021
  """/2/instances/[instance_name]/activate-disks resource.
1022

1023
  """
1024
  def PUT(self):
1025
    """Activate disks for an instance.
1026

1027
    The URI might contain ignore_size to ignore current recorded size.
1028

1029
    """
1030
    instance_name = self.items[0]
1031
    ignore_size = bool(self._checkIntVariable('ignore_size'))
1032

    
1033
    op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1034
                                         ignore_size=ignore_size)
1035

    
1036
    return baserlib.SubmitJob([op])
1037

    
1038

    
1039
class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1040
  """/2/instances/[instance_name]/deactivate-disks resource.
1041

1042
  """
1043
  def PUT(self):
1044
    """Deactivate disks for an instance.
1045

1046
    """
1047
    instance_name = self.items[0]
1048

    
1049
    op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1050

    
1051
    return baserlib.SubmitJob([op])
1052

    
1053

    
1054
class R_2_instances_name_prepare_export(baserlib.R_Generic):
1055
  """/2/instances/[instance_name]/prepare-export resource.
1056

1057
  """
1058
  def PUT(self):
1059
    """Prepares an export for an instance.
1060

1061
    @return: a job id
1062

1063
    """
1064
    instance_name = self.items[0]
1065
    mode = self._checkStringVariable("mode")
1066

    
1067
    op = opcodes.OpBackupPrepare(instance_name=instance_name,
1068
                                 mode=mode)
1069

    
1070
    return baserlib.SubmitJob([op])
1071

    
1072

    
1073
def _ParseExportInstanceRequest(name, data):
1074
  """Parses a request for an instance export.
1075

1076
  @rtype: L{opcodes.OpBackupExport}
1077
  @return: Instance export opcode
1078

1079
  """
1080
  # Rename "destination" to "target_node"
1081
  try:
1082
    data["target_node"] = data.pop("destination")
1083
  except KeyError:
1084
    pass
1085

    
1086
  return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1087
    "instance_name": name,
1088
    })
1089

    
1090

    
1091
class R_2_instances_name_export(baserlib.R_Generic):
1092
  """/2/instances/[instance_name]/export resource.
1093

1094
  """
1095
  def PUT(self):
1096
    """Exports an instance.
1097

1098
    @return: a job id
1099

1100
    """
1101
    if not isinstance(self.request_body, dict):
1102
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1103

    
1104
    op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1105

    
1106
    return baserlib.SubmitJob([op])
1107

    
1108

    
1109
def _ParseMigrateInstanceRequest(name, data):
1110
  """Parses a request for an instance migration.
1111

1112
  @rtype: L{opcodes.OpInstanceMigrate}
1113
  @return: Instance migration opcode
1114

1115
  """
1116
  return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1117
    "instance_name": name,
1118
    })
1119

    
1120

    
1121
class R_2_instances_name_migrate(baserlib.R_Generic):
1122
  """/2/instances/[instance_name]/migrate resource.
1123

1124
  """
1125
  def PUT(self):
1126
    """Migrates an instance.
1127

1128
    @return: a job id
1129

1130
    """
1131
    baserlib.CheckType(self.request_body, dict, "Body contents")
1132

    
1133
    op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1134

    
1135
    return baserlib.SubmitJob([op])
1136

    
1137

    
1138
def _ParseRenameInstanceRequest(name, data):
1139
  """Parses a request for renaming an instance.
1140

1141
  @rtype: L{opcodes.OpInstanceRename}
1142
  @return: Instance rename opcode
1143

1144
  """
1145
  return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1146
    "instance_name": name,
1147
    })
1148

    
1149

    
1150
class R_2_instances_name_rename(baserlib.R_Generic):
1151
  """/2/instances/[instance_name]/rename resource.
1152

1153
  """
1154
  def PUT(self):
1155
    """Changes the name of an instance.
1156

1157
    @return: a job id
1158

1159
    """
1160
    baserlib.CheckType(self.request_body, dict, "Body contents")
1161

    
1162
    op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1163

    
1164
    return baserlib.SubmitJob([op])
1165

    
1166

    
1167
def _ParseModifyInstanceRequest(name, data):
1168
  """Parses a request for modifying an instance.
1169

1170
  @rtype: L{opcodes.OpInstanceSetParams}
1171
  @return: Instance modify opcode
1172

1173
  """
1174
  return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1175
    "instance_name": name,
1176
    })
1177

    
1178

    
1179
class R_2_instances_name_modify(baserlib.R_Generic):
1180
  """/2/instances/[instance_name]/modify resource.
1181

1182
  """
1183
  def PUT(self):
1184
    """Changes some parameters of an instance.
1185

1186
    @return: a job id
1187

1188
    """
1189
    baserlib.CheckType(self.request_body, dict, "Body contents")
1190

    
1191
    op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1192

    
1193
    return baserlib.SubmitJob([op])
1194

    
1195

    
1196
class R_2_instances_name_disk_grow(baserlib.R_Generic):
1197
  """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1198

1199
  """
1200
  def POST(self):
1201
    """Increases the size of an instance disk.
1202

1203
    @return: a job id
1204

1205
    """
1206
    op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1207
      "instance_name": self.items[0],
1208
      "disk": int(self.items[1]),
1209
      })
1210

    
1211
    return baserlib.SubmitJob([op])
1212

    
1213

    
1214
class R_2_instances_name_console(baserlib.R_Generic):
1215
  """/2/instances/[instance_name]/console resource.
1216

1217
  """
1218
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1219

    
1220
  def GET(self):
1221
    """Request information for connecting to instance's console.
1222

1223
    @return: Serialized instance console description, see
1224
             L{objects.InstanceConsole}
1225

1226
    """
1227
    client = baserlib.GetClient()
1228

    
1229
    ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1230

    
1231
    if console is None:
1232
      raise http.HttpServiceUnavailable("Instance console unavailable")
1233

    
1234
    assert isinstance(console, dict)
1235
    return console
1236

    
1237

    
1238
def _GetQueryFields(args):
1239
  """
1240

1241
  """
1242
  try:
1243
    fields = args["fields"]
1244
  except KeyError:
1245
    raise http.HttpBadRequest("Missing 'fields' query argument")
1246

    
1247
  return _SplitQueryFields(fields[0])
1248

    
1249

    
1250
def _SplitQueryFields(fields):
1251
  """
1252

1253
  """
1254
  return [i.strip() for i in fields.split(",")]
1255

    
1256

    
1257
class R_2_query(baserlib.R_Generic):
1258
  """/2/query/[resource] resource.
1259

1260
  """
1261
  # Results might contain sensitive information
1262
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1263

    
1264
  def _Query(self, fields, filter_):
1265
    return baserlib.GetClient().Query(self.items[0], fields, filter_).ToDict()
1266

    
1267
  def GET(self):
1268
    """Returns resource information.
1269

1270
    @return: Query result, see L{objects.QueryResponse}
1271

1272
    """
1273
    return self._Query(_GetQueryFields(self.queryargs), None)
1274

    
1275
  def PUT(self):
1276
    """Submits job querying for resources.
1277

1278
    @return: Query result, see L{objects.QueryResponse}
1279

1280
    """
1281
    body = self.request_body
1282

    
1283
    baserlib.CheckType(body, dict, "Body contents")
1284

    
1285
    try:
1286
      fields = body["fields"]
1287
    except KeyError:
1288
      fields = _GetQueryFields(self.queryargs)
1289

    
1290
    return self._Query(fields, self.request_body.get("filter", None))
1291

    
1292

    
1293
class R_2_query_fields(baserlib.R_Generic):
1294
  """/2/query/[resource]/fields resource.
1295

1296
  """
1297
  def GET(self):
1298
    """Retrieves list of available fields for a resource.
1299

1300
    @return: List of serialized L{objects.QueryFieldDefinition}
1301

1302
    """
1303
    try:
1304
      raw_fields = self.queryargs["fields"]
1305
    except KeyError:
1306
      fields = None
1307
    else:
1308
      fields = _SplitQueryFields(raw_fields[0])
1309

    
1310
    return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict()
1311

    
1312

    
1313
class _R_Tags(baserlib.R_Generic):
1314
  """ Quasiclass for tagging resources
1315

1316
  Manages tags. When inheriting this class you must define the
1317
  TAG_LEVEL for it.
1318

1319
  """
1320
  TAG_LEVEL = None
1321

    
1322
  def __init__(self, items, queryargs, req):
1323
    """A tag resource constructor.
1324

1325
    We have to override the default to sort out cluster naming case.
1326

1327
    """
1328
    baserlib.R_Generic.__init__(self, items, queryargs, req)
1329

    
1330
    if self.TAG_LEVEL == constants.TAG_CLUSTER:
1331
      self.name = None
1332
    else:
1333
      self.name = items[0]
1334

    
1335
  def GET(self):
1336
    """Returns a list of tags.
1337

1338
    Example: ["tag1", "tag2", "tag3"]
1339

1340
    """
1341
    # pylint: disable-msg=W0212
1342
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1343

    
1344
  def PUT(self):
1345
    """Add a set of tags.
1346

1347
    The request as a list of strings should be PUT to this URI. And
1348
    you'll have back a job id.
1349

1350
    """
1351
    # pylint: disable-msg=W0212
1352
    if 'tag' not in self.queryargs:
1353
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
1354
                                " the 'tag' parameter")
1355
    return baserlib._Tags_PUT(self.TAG_LEVEL,
1356
                              self.queryargs['tag'], name=self.name,
1357
                              dry_run=bool(self.dryRun()))
1358

    
1359
  def DELETE(self):
1360
    """Delete a tag.
1361

1362
    In order to delete a set of tags, the DELETE
1363
    request should be addressed to URI like:
1364
    /tags?tag=[tag]&tag=[tag]
1365

1366
    """
1367
    # pylint: disable-msg=W0212
1368
    if 'tag' not in self.queryargs:
1369
      # no we not gonna delete all tags
1370
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
1371
                                " tag(s) using the 'tag' parameter")
1372
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
1373
                                 self.queryargs['tag'],
1374
                                 name=self.name,
1375
                                 dry_run=bool(self.dryRun()))
1376

    
1377

    
1378
class R_2_instances_name_tags(_R_Tags):
1379
  """ /2/instances/[instance_name]/tags resource.
1380

1381
  Manages per-instance tags.
1382

1383
  """
1384
  TAG_LEVEL = constants.TAG_INSTANCE
1385

    
1386

    
1387
class R_2_nodes_name_tags(_R_Tags):
1388
  """ /2/nodes/[node_name]/tags resource.
1389

1390
  Manages per-node tags.
1391

1392
  """
1393
  TAG_LEVEL = constants.TAG_NODE
1394

    
1395

    
1396
class R_2_groups_name_tags(_R_Tags):
1397
  """ /2/groups/[group_name]/tags resource.
1398

1399
  Manages per-nodegroup tags.
1400

1401
  """
1402
  TAG_LEVEL = constants.TAG_NODEGROUP
1403

    
1404

    
1405
class R_2_tags(_R_Tags):
1406
  """ /2/tags resource.
1407

1408
  Manages cluster tags.
1409

1410
  """
1411
  TAG_LEVEL = constants.TAG_CLUSTER