Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ f18fab7d

History | View | Annotate | Download (39.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
  PUT or POST?
25
  ============
26

27
  According to RFC2616 the main difference between PUT and POST is that
28
  POST can create new resources but PUT can only create the resource the
29
  URI was pointing to on the PUT request.
30

31
  To be in context of this module for instance creation POST on
32
  /2/instances is legitim while PUT would be not, due to it does create a
33
  new entity and not just replace /2/instances with it.
34

35
  So when adding new methods, if they are operating on the URI entity itself,
36
  PUT should be prefered over POST.
37

38
"""
39

    
40
# pylint: disable-msg=C0103
41

    
42
# C0103: Invalid name, since the R_* names are not conforming
43

    
44
from ganeti import opcodes
45
from ganeti import http
46
from ganeti import constants
47
from ganeti import cli
48
from ganeti import utils
49
from ganeti import rapi
50
from ganeti.rapi import baserlib
51

    
52

    
53
_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54
I_FIELDS = ["name", "admin_state", "os",
55
            "pnode", "snodes",
56
            "disk_template",
57
            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58
            "network_port",
59
            "disk.sizes", "disk_usage",
60
            "beparams", "hvparams",
61
            "oper_state", "oper_ram", "oper_vcpus", "status",
62
            "custom_hvparams", "custom_beparams", "custom_nicparams",
63
            ] + _COMMON_FIELDS
64

    
65
N_FIELDS = ["name", "offline", "master_candidate", "drained",
66
            "dtotal", "dfree",
67
            "mtotal", "mnode", "mfree",
68
            "pinst_cnt", "sinst_cnt",
69
            "ctotal", "cnodes", "csockets",
70
            "pip", "sip", "role",
71
            "pinst_list", "sinst_list",
72
            "master_capable", "vm_capable",
73
            "group.uuid",
74
            ] + _COMMON_FIELDS
75

    
76
G_FIELDS = ["name", "uuid",
77
            "alloc_policy",
78
            "node_cnt", "node_list",
79
            "ctime", "mtime", "serial_no",
80
            ]  # "tags" is missing to be able to use _COMMON_FIELDS here.
81

    
82
_NR_DRAINED = "drained"
83
_NR_MASTER_CANDIATE = "master-candidate"
84
_NR_MASTER = "master"
85
_NR_OFFLINE = "offline"
86
_NR_REGULAR = "regular"
87

    
88
_NR_MAP = {
89
  "M": _NR_MASTER,
90
  "C": _NR_MASTER_CANDIATE,
91
  "D": _NR_DRAINED,
92
  "O": _NR_OFFLINE,
93
  "R": _NR_REGULAR,
94
  }
95

    
96
# Request data version field
97
_REQ_DATA_VERSION = "__version__"
98

    
99
# Feature string for instance creation request data version 1
100
_INST_CREATE_REQV1 = "instance-create-reqv1"
101

    
102
# Feature string for instance reinstall request version 1
103
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
104

    
105
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
106
_WFJC_TIMEOUT = 10
107

    
108

    
109
class R_version(baserlib.R_Generic):
110
  """/version resource.
111

112
  This resource should be used to determine the remote API version and
113
  to adapt clients accordingly.
114

115
  """
116
  @staticmethod
117
  def GET():
118
    """Returns the remote API version.
119

120
    """
121
    return constants.RAPI_VERSION
122

    
123

    
124
class R_2_info(baserlib.R_Generic):
125
  """Cluster info.
126

127
  """
128
  @staticmethod
129
  def GET():
130
    """Returns cluster information.
131

132
    """
133
    client = baserlib.GetClient()
134
    return client.QueryClusterInfo()
135

    
136

    
137
class R_2_features(baserlib.R_Generic):
138
  """/2/features resource.
139

140
  """
141
  @staticmethod
142
  def GET():
143
    """Returns list of optional RAPI features implemented.
144

145
    """
146
    return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1]
147

    
148

    
149
class R_2_os(baserlib.R_Generic):
150
  """/2/os resource.
151

152
  """
153
  @staticmethod
154
  def GET():
155
    """Return a list of all OSes.
156

157
    Can return error 500 in case of a problem.
158

159
    Example: ["debian-etch"]
160

161
    """
162
    cl = baserlib.GetClient()
163
    op = opcodes.OpDiagnoseOS(output_fields=["name", "variants"], names=[])
164
    job_id = baserlib.SubmitJob([op], cl)
165
    # we use custom feedback function, instead of print we log the status
166
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
167
    diagnose_data = result[0]
168

    
169
    if not isinstance(diagnose_data, list):
170
      raise http.HttpBadGateway(message="Can't get OS list")
171

    
172
    os_names = []
173
    for (name, variants) in diagnose_data:
174
      os_names.extend(cli.CalculateOSNames(name, variants))
175

    
176
    return os_names
177

    
178

    
179
class R_2_redist_config(baserlib.R_Generic):
180
  """/2/redistribute-config resource.
181

182
  """
183
  @staticmethod
184
  def PUT():
185
    """Redistribute configuration to all nodes.
186

187
    """
188
    return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
189

    
190

    
191
class R_2_jobs(baserlib.R_Generic):
192
  """/2/jobs resource.
193

194
  """
195
  @staticmethod
196
  def GET():
197
    """Returns a dictionary of jobs.
198

199
    @return: a dictionary with jobs id and uri.
200

201
    """
202
    fields = ["id"]
203
    cl = baserlib.GetClient()
204
    # Convert the list of lists to the list of ids
205
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
206
    return baserlib.BuildUriList(result, "/2/jobs/%s",
207
                                 uri_fields=("id", "uri"))
208

    
209

    
210
class R_2_jobs_id(baserlib.R_Generic):
211
  """/2/jobs/[job_id] resource.
212

213
  """
214
  def GET(self):
215
    """Returns a job status.
216

217
    @return: a dictionary with job parameters.
218
        The result includes:
219
            - id: job ID as a number
220
            - status: current job status as a string
221
            - ops: involved OpCodes as a list of dictionaries for each
222
              opcodes in the job
223
            - opstatus: OpCodes status as a list
224
            - opresult: OpCodes results as a list of lists
225

226
    """
227
    fields = ["id", "ops", "status", "summary",
228
              "opstatus", "opresult", "oplog",
229
              "received_ts", "start_ts", "end_ts",
230
              ]
231
    job_id = self.items[0]
232
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
233
    if result is None:
234
      raise http.HttpNotFound()
235
    return baserlib.MapFields(fields, result)
236

    
237
  def DELETE(self):
238
    """Cancel not-yet-started job.
239

240
    """
241
    job_id = self.items[0]
242
    result = baserlib.GetClient().CancelJob(job_id)
243
    return result
244

    
245

    
246
class R_2_jobs_id_wait(baserlib.R_Generic):
247
  """/2/jobs/[job_id]/wait resource.
248

249
  """
250
  # WaitForJobChange provides access to sensitive information and blocks
251
  # machine resources (it's a blocking RAPI call), hence restricting access.
252
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
253

    
254
  def GET(self):
255
    """Waits for job changes.
256

257
    """
258
    job_id = self.items[0]
259

    
260
    fields = self.getBodyParameter("fields")
261
    prev_job_info = self.getBodyParameter("previous_job_info", None)
262
    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
263

    
264
    if not isinstance(fields, list):
265
      raise http.HttpBadRequest("The 'fields' parameter should be a list")
266

    
267
    if not (prev_job_info is None or isinstance(prev_job_info, list)):
268
      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
269
                                " be a list")
270

    
271
    if not (prev_log_serial is None or
272
            isinstance(prev_log_serial, (int, long))):
273
      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
274
                                " be a number")
275

    
276
    client = baserlib.GetClient()
277
    result = client.WaitForJobChangeOnce(job_id, fields,
278
                                         prev_job_info, prev_log_serial,
279
                                         timeout=_WFJC_TIMEOUT)
280
    if not result:
281
      raise http.HttpNotFound()
282

    
283
    if result == constants.JOB_NOTCHANGED:
284
      # No changes
285
      return None
286

    
287
    (job_info, log_entries) = result
288

    
289
    return {
290
      "job_info": job_info,
291
      "log_entries": log_entries,
292
      }
293

    
294

    
295
class R_2_nodes(baserlib.R_Generic):
296
  """/2/nodes resource.
297

298
  """
299
  def GET(self):
300
    """Returns a list of all nodes.
301

302
    """
303
    client = baserlib.GetClient()
304

    
305
    if self.useBulk():
306
      bulkdata = client.QueryNodes([], N_FIELDS, False)
307
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
308
    else:
309
      nodesdata = client.QueryNodes([], ["name"], False)
310
      nodeslist = [row[0] for row in nodesdata]
311
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
312
                                   uri_fields=("id", "uri"))
313

    
314

    
315
class R_2_nodes_name(baserlib.R_Generic):
316
  """/2/nodes/[node_name] resources.
317

318
  """
319
  def GET(self):
320
    """Send information about a node.
321

322
    """
323
    node_name = self.items[0]
324
    client = baserlib.GetClient()
325

    
326
    result = baserlib.HandleItemQueryErrors(client.QueryNodes,
327
                                            names=[node_name], fields=N_FIELDS,
328
                                            use_locking=self.useLocking())
329

    
330
    return baserlib.MapFields(N_FIELDS, result[0])
331

    
332

    
333
class R_2_nodes_name_role(baserlib.R_Generic):
334
  """ /2/nodes/[node_name]/role resource.
335

336
  """
337
  def GET(self):
338
    """Returns the current node role.
339

340
    @return: Node role
341

342
    """
343
    node_name = self.items[0]
344
    client = baserlib.GetClient()
345
    result = client.QueryNodes(names=[node_name], fields=["role"],
346
                               use_locking=self.useLocking())
347

    
348
    return _NR_MAP[result[0][0]]
349

    
350
  def PUT(self):
351
    """Sets the node role.
352

353
    @return: a job id
354

355
    """
356
    if not isinstance(self.request_body, basestring):
357
      raise http.HttpBadRequest("Invalid body contents, not a string")
358

    
359
    node_name = self.items[0]
360
    role = self.request_body
361

    
362
    if role == _NR_REGULAR:
363
      candidate = False
364
      offline = False
365
      drained = False
366

    
367
    elif role == _NR_MASTER_CANDIATE:
368
      candidate = True
369
      offline = drained = None
370

    
371
    elif role == _NR_DRAINED:
372
      drained = True
373
      candidate = offline = None
374

    
375
    elif role == _NR_OFFLINE:
376
      offline = True
377
      candidate = drained = None
378

    
379
    else:
380
      raise http.HttpBadRequest("Can't set '%s' role" % role)
381

    
382
    op = opcodes.OpSetNodeParams(node_name=node_name,
383
                                 master_candidate=candidate,
384
                                 offline=offline,
385
                                 drained=drained,
386
                                 force=bool(self.useForce()))
387

    
388
    return baserlib.SubmitJob([op])
389

    
390

    
391
class R_2_nodes_name_evacuate(baserlib.R_Generic):
392
  """/2/nodes/[node_name]/evacuate resource.
393

394
  """
395
  def POST(self):
396
    """Evacuate all secondary instances off a node.
397

398
    """
399
    node_name = self.items[0]
400
    remote_node = self._checkStringVariable("remote_node", default=None)
401
    iallocator = self._checkStringVariable("iallocator", default=None)
402
    early_r = bool(self._checkIntVariable("early_release", default=0))
403
    dry_run = bool(self.dryRun())
404

    
405
    cl = baserlib.GetClient()
406

    
407
    op = opcodes.OpNodeEvacuationStrategy(nodes=[node_name],
408
                                          iallocator=iallocator,
409
                                          remote_node=remote_node)
410

    
411
    job_id = baserlib.SubmitJob([op], cl)
412
    # we use custom feedback function, instead of print we log the status
413
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
414

    
415
    jobs = []
416
    for iname, node in result:
417
      if dry_run:
418
        jid = None
419
      else:
420
        op = opcodes.OpReplaceDisks(instance_name=iname,
421
                                    remote_node=node, disks=[],
422
                                    mode=constants.REPLACE_DISK_CHG,
423
                                    early_release=early_r)
424
        jid = baserlib.SubmitJob([op])
425
      jobs.append((jid, iname, node))
426

    
427
    return jobs
428

    
429

    
430
class R_2_nodes_name_migrate(baserlib.R_Generic):
431
  """/2/nodes/[node_name]/migrate resource.
432

433
  """
434
  def POST(self):
435
    """Migrate all primary instances from a node.
436

437
    """
438
    node_name = self.items[0]
439

    
440
    if "live" in self.queryargs and "mode" in self.queryargs:
441
      raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
442
                                " be passed")
443
    elif "live" in self.queryargs:
444
      if self._checkIntVariable("live", default=1):
445
        mode = constants.HT_MIGRATION_LIVE
446
      else:
447
        mode = constants.HT_MIGRATION_NONLIVE
448
    else:
449
      mode = self._checkStringVariable("mode", default=None)
450

    
451
    op = opcodes.OpMigrateNode(node_name=node_name, mode=mode)
452

    
453
    return baserlib.SubmitJob([op])
454

    
455

    
456
class R_2_nodes_name_storage(baserlib.R_Generic):
457
  """/2/nodes/[node_name]/storage ressource.
458

459
  """
460
  # LUQueryNodeStorage acquires locks, hence restricting access to GET
461
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
462

    
463
  def GET(self):
464
    node_name = self.items[0]
465

    
466
    storage_type = self._checkStringVariable("storage_type", None)
467
    if not storage_type:
468
      raise http.HttpBadRequest("Missing the required 'storage_type'"
469
                                " parameter")
470

    
471
    output_fields = self._checkStringVariable("output_fields", None)
472
    if not output_fields:
473
      raise http.HttpBadRequest("Missing the required 'output_fields'"
474
                                " parameter")
475

    
476
    op = opcodes.OpQueryNodeStorage(nodes=[node_name],
477
                                    storage_type=storage_type,
478
                                    output_fields=output_fields.split(","))
479
    return baserlib.SubmitJob([op])
480

    
481

    
482
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
483
  """/2/nodes/[node_name]/storage/modify ressource.
484

485
  """
486
  def PUT(self):
487
    node_name = self.items[0]
488

    
489
    storage_type = self._checkStringVariable("storage_type", None)
490
    if not storage_type:
491
      raise http.HttpBadRequest("Missing the required 'storage_type'"
492
                                " parameter")
493

    
494
    name = self._checkStringVariable("name", None)
495
    if not name:
496
      raise http.HttpBadRequest("Missing the required 'name'"
497
                                " parameter")
498

    
499
    changes = {}
500

    
501
    if "allocatable" in self.queryargs:
502
      changes[constants.SF_ALLOCATABLE] = \
503
        bool(self._checkIntVariable("allocatable", default=1))
504

    
505
    op = opcodes.OpModifyNodeStorage(node_name=node_name,
506
                                     storage_type=storage_type,
507
                                     name=name,
508
                                     changes=changes)
509
    return baserlib.SubmitJob([op])
510

    
511

    
512
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
513
  """/2/nodes/[node_name]/storage/repair ressource.
514

515
  """
516
  def PUT(self):
517
    node_name = self.items[0]
518

    
519
    storage_type = self._checkStringVariable("storage_type", None)
520
    if not storage_type:
521
      raise http.HttpBadRequest("Missing the required 'storage_type'"
522
                                " parameter")
523

    
524
    name = self._checkStringVariable("name", None)
525
    if not name:
526
      raise http.HttpBadRequest("Missing the required 'name'"
527
                                " parameter")
528

    
529
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
530
                                     storage_type=storage_type,
531
                                     name=name)
532
    return baserlib.SubmitJob([op])
533

    
534

    
535
def _ParseCreateGroupRequest(data, dry_run):
536
  """Parses a request for creating a node group.
537

538
  @rtype: L{opcodes.OpAddGroup}
539
  @return: Group creation opcode
540

541
  """
542
  group_name = baserlib.CheckParameter(data, "name")
543
  alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None)
544

    
545
  return opcodes.OpAddGroup(group_name=group_name,
546
                            alloc_policy=alloc_policy,
547
                            dry_run=dry_run)
548

    
549

    
550
class R_2_groups(baserlib.R_Generic):
551
  """/2/groups resource.
552

553
  """
554
  def GET(self):
555
    """Returns a list of all node groups.
556

557
    """
558
    client = baserlib.GetClient()
559

    
560
    if self.useBulk():
561
      bulkdata = client.QueryGroups([], G_FIELDS, False)
562
      return baserlib.MapBulkFields(bulkdata, G_FIELDS)
563
    else:
564
      data = client.QueryGroups([], ["name"], False)
565
      groupnames = [row[0] for row in data]
566
      return baserlib.BuildUriList(groupnames, "/2/groups/%s",
567
                                   uri_fields=("name", "uri"))
568

    
569
  def POST(self):
570
    """Create a node group.
571

572
    @return: a job id
573

574
    """
575
    baserlib.CheckType(self.request_body, dict, "Body contents")
576
    op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
577
    return baserlib.SubmitJob([op])
578

    
579

    
580
class R_2_groups_name(baserlib.R_Generic):
581
  """/2/groups/[group_name] resources.
582

583
  """
584
  def GET(self):
585
    """Send information about a node group.
586

587
    """
588
    group_name = self.items[0]
589
    client = baserlib.GetClient()
590

    
591
    result = baserlib.HandleItemQueryErrors(client.QueryGroups,
592
                                            names=[group_name], fields=G_FIELDS,
593
                                            use_locking=self.useLocking())
594

    
595
    return baserlib.MapFields(G_FIELDS, result[0])
596

    
597
  def DELETE(self):
598
    """Delete a node group.
599

600
    """
601
    op = opcodes.OpRemoveGroup(group_name=self.items[0],
602
                               dry_run=bool(self.dryRun()))
603

    
604
    return baserlib.SubmitJob([op])
605

    
606

    
607
def _ParseModifyGroupRequest(name, data):
608
  """Parses a request for modifying a node group.
609

610
  @rtype: L{opcodes.OpSetGroupParams}
611
  @return: Group modify opcode
612

613
  """
614
  alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None)
615
  return opcodes.OpSetGroupParams(group_name=name, alloc_policy=alloc_policy)
616

    
617

    
618
class R_2_groups_name_modify(baserlib.R_Generic):
619
  """/2/groups/[group_name]/modify resource.
620

621
  """
622
  def PUT(self):
623
    """Changes some parameters of node group.
624

625
    @return: a job id
626

627
    """
628
    baserlib.CheckType(self.request_body, dict, "Body contents")
629

    
630
    op = _ParseModifyGroupRequest(self.items[0], self.request_body)
631

    
632
    return baserlib.SubmitJob([op])
633

    
634

    
635
def _ParseRenameGroupRequest(name, data, dry_run):
636
  """Parses a request for renaming a node group.
637

638
  @type name: string
639
  @param name: name of the node group to rename
640
  @type data: dict
641
  @param data: the body received by the rename request
642
  @type dry_run: bool
643
  @param dry_run: whether to perform a dry run
644

645
  @rtype: L{opcodes.OpRenameGroup}
646
  @return: Node group rename opcode
647

648
  """
649
  old_name = name
650
  new_name = baserlib.CheckParameter(data, "new_name")
651

    
652
  return opcodes.OpRenameGroup(old_name=old_name, new_name=new_name,
653
                               dry_run=dry_run)
654

    
655

    
656
class R_2_groups_name_rename(baserlib.R_Generic):
657
  """/2/groups/[groupe_name]/rename resource.
658

659
  """
660
  def PUT(self):
661
    """Changes the name of a node group.
662

663
    @return: a job id
664

665
    """
666
    baserlib.CheckType(self.request_body, dict, "Body contents")
667
    op = _ParseRenameGroupRequest(self.items[0], self.request_body,
668
                                  self.dryRun())
669
    return baserlib.SubmitJob([op])
670

    
671

    
672
def _ParseInstanceCreateRequestVersion1(data, dry_run):
673
  """Parses an instance creation request version 1.
674

675
  @rtype: L{opcodes.OpCreateInstance}
676
  @return: Instance creation opcode
677

678
  """
679
  # Disks
680
  disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
681

    
682
  disks = []
683
  for idx, i in enumerate(disks_input):
684
    baserlib.CheckType(i, dict, "Disk %d specification" % idx)
685

    
686
    # Size is mandatory
687
    try:
688
      size = i[constants.IDISK_SIZE]
689
    except KeyError:
690
      raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
691
                                " size" % idx)
692

    
693
    disk = {
694
      constants.IDISK_SIZE: size,
695
      }
696

    
697
    # Optional disk access mode
698
    try:
699
      disk_access = i[constants.IDISK_MODE]
700
    except KeyError:
701
      pass
702
    else:
703
      disk[constants.IDISK_MODE] = disk_access
704

    
705
    disks.append(disk)
706

    
707
  assert len(disks_input) == len(disks)
708

    
709
  # Network interfaces
710
  nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
711

    
712
  nics = []
713
  for idx, i in enumerate(nics_input):
714
    baserlib.CheckType(i, dict, "NIC %d specification" % idx)
715

    
716
    nic = {}
717

    
718
    for field in constants.INIC_PARAMS:
719
      try:
720
        value = i[field]
721
      except KeyError:
722
        continue
723

    
724
      nic[field] = value
725

    
726
    nics.append(nic)
727

    
728
  assert len(nics_input) == len(nics)
729

    
730
  # HV/BE parameters
731
  hvparams = baserlib.CheckParameter(data, "hvparams", default={})
732
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
733

    
734
  beparams = baserlib.CheckParameter(data, "beparams", default={})
735
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
736

    
737
  return opcodes.OpCreateInstance(
738
    mode=baserlib.CheckParameter(data, "mode"),
739
    instance_name=baserlib.CheckParameter(data, "name"),
740
    os_type=baserlib.CheckParameter(data, "os"),
741
    osparams=baserlib.CheckParameter(data, "osparams", default={}),
742
    force_variant=baserlib.CheckParameter(data, "force_variant",
743
                                          default=False),
744
    no_install=baserlib.CheckParameter(data, "no_install", default=False),
745
    pnode=baserlib.CheckParameter(data, "pnode", default=None),
746
    snode=baserlib.CheckParameter(data, "snode", default=None),
747
    disk_template=baserlib.CheckParameter(data, "disk_template"),
748
    disks=disks,
749
    nics=nics,
750
    src_node=baserlib.CheckParameter(data, "src_node", default=None),
751
    src_path=baserlib.CheckParameter(data, "src_path", default=None),
752
    start=baserlib.CheckParameter(data, "start", default=True),
753
    wait_for_sync=True,
754
    ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
755
    name_check=baserlib.CheckParameter(data, "name_check", default=True),
756
    file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
757
                                             default=None),
758
    file_driver=baserlib.CheckParameter(data, "file_driver",
759
                                        default=constants.FD_LOOP),
760
    source_handshake=baserlib.CheckParameter(data, "source_handshake",
761
                                             default=None),
762
    source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca",
763
                                           default=None),
764
    source_instance_name=baserlib.CheckParameter(data, "source_instance_name",
765
                                                 default=None),
766
    iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
767
    hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
768
    hvparams=hvparams,
769
    beparams=beparams,
770
    dry_run=dry_run,
771
    )
772

    
773

    
774
class R_2_instances(baserlib.R_Generic):
775
  """/2/instances resource.
776

777
  """
778
  def GET(self):
779
    """Returns a list of all available instances.
780

781
    """
782
    client = baserlib.GetClient()
783

    
784
    use_locking = self.useLocking()
785
    if self.useBulk():
786
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
787
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
788
    else:
789
      instancesdata = client.QueryInstances([], ["name"], use_locking)
790
      instanceslist = [row[0] for row in instancesdata]
791
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
792
                                   uri_fields=("id", "uri"))
793

    
794
  def _ParseVersion0CreateRequest(self):
795
    """Parses an instance creation request version 0.
796

797
    Request data version 0 is deprecated and should not be used anymore.
798

799
    @rtype: L{opcodes.OpCreateInstance}
800
    @return: Instance creation opcode
801

802
    """
803
    # Do not modify anymore, request data version 0 is deprecated
804
    beparams = baserlib.MakeParamsDict(self.request_body,
805
                                       constants.BES_PARAMETERS)
806
    hvparams = baserlib.MakeParamsDict(self.request_body,
807
                                       constants.HVS_PARAMETERS)
808
    fn = self.getBodyParameter
809

    
810
    # disk processing
811
    disk_data = fn('disks')
812
    if not isinstance(disk_data, list):
813
      raise http.HttpBadRequest("The 'disks' parameter should be a list")
814
    disks = []
815
    for idx, d in enumerate(disk_data):
816
      if not isinstance(d, int):
817
        raise http.HttpBadRequest("Disk %d specification wrong: should"
818
                                  " be an integer" % idx)
819
      disks.append({"size": d})
820

    
821
    # nic processing (one nic only)
822
    nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
823
    if fn("ip", None) is not None:
824
      nics[0]["ip"] = fn("ip")
825
    if fn("mode", None) is not None:
826
      nics[0]["mode"] = fn("mode")
827
    if fn("link", None) is not None:
828
      nics[0]["link"] = fn("link")
829
    if fn("bridge", None) is not None:
830
      nics[0]["bridge"] = fn("bridge")
831

    
832
    # Do not modify anymore, request data version 0 is deprecated
833
    return opcodes.OpCreateInstance(
834
      mode=constants.INSTANCE_CREATE,
835
      instance_name=fn('name'),
836
      disks=disks,
837
      disk_template=fn('disk_template'),
838
      os_type=fn('os'),
839
      pnode=fn('pnode', None),
840
      snode=fn('snode', None),
841
      iallocator=fn('iallocator', None),
842
      nics=nics,
843
      start=fn('start', True),
844
      ip_check=fn('ip_check', True),
845
      name_check=fn('name_check', True),
846
      wait_for_sync=True,
847
      hypervisor=fn('hypervisor', None),
848
      hvparams=hvparams,
849
      beparams=beparams,
850
      file_storage_dir=fn('file_storage_dir', None),
851
      file_driver=fn('file_driver', constants.FD_LOOP),
852
      dry_run=bool(self.dryRun()),
853
      )
854

    
855
  def POST(self):
856
    """Create an instance.
857

858
    @return: a job id
859

860
    """
861
    if not isinstance(self.request_body, dict):
862
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
863

    
864
    # Default to request data version 0
865
    data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
866

    
867
    if data_version == 0:
868
      op = self._ParseVersion0CreateRequest()
869
    elif data_version == 1:
870
      op = _ParseInstanceCreateRequestVersion1(self.request_body,
871
                                               self.dryRun())
872
    else:
873
      raise http.HttpBadRequest("Unsupported request data version %s" %
874
                                data_version)
875

    
876
    return baserlib.SubmitJob([op])
877

    
878

    
879
class R_2_instances_name(baserlib.R_Generic):
880
  """/2/instances/[instance_name] resources.
881

882
  """
883
  def GET(self):
884
    """Send information about an instance.
885

886
    """
887
    client = baserlib.GetClient()
888
    instance_name = self.items[0]
889

    
890
    result = baserlib.HandleItemQueryErrors(client.QueryInstances,
891
                                            names=[instance_name],
892
                                            fields=I_FIELDS,
893
                                            use_locking=self.useLocking())
894

    
895
    return baserlib.MapFields(I_FIELDS, result[0])
896

    
897
  def DELETE(self):
898
    """Delete an instance.
899

900
    """
901
    op = opcodes.OpRemoveInstance(instance_name=self.items[0],
902
                                  ignore_failures=False,
903
                                  dry_run=bool(self.dryRun()))
904
    return baserlib.SubmitJob([op])
905

    
906

    
907
class R_2_instances_name_info(baserlib.R_Generic):
908
  """/2/instances/[instance_name]/info resource.
909

910
  """
911
  def GET(self):
912
    """Request detailed instance information.
913

914
    """
915
    instance_name = self.items[0]
916
    static = bool(self._checkIntVariable("static", default=0))
917

    
918
    op = opcodes.OpQueryInstanceData(instances=[instance_name],
919
                                     static=static)
920
    return baserlib.SubmitJob([op])
921

    
922

    
923
class R_2_instances_name_reboot(baserlib.R_Generic):
924
  """/2/instances/[instance_name]/reboot resource.
925

926
  Implements an instance reboot.
927

928
  """
929
  def POST(self):
930
    """Reboot an instance.
931

932
    The URI takes type=[hard|soft|full] and
933
    ignore_secondaries=[False|True] parameters.
934

935
    """
936
    instance_name = self.items[0]
937
    reboot_type = self.queryargs.get('type',
938
                                     [constants.INSTANCE_REBOOT_HARD])[0]
939
    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
940
    op = opcodes.OpRebootInstance(instance_name=instance_name,
941
                                  reboot_type=reboot_type,
942
                                  ignore_secondaries=ignore_secondaries,
943
                                  dry_run=bool(self.dryRun()))
944

    
945
    return baserlib.SubmitJob([op])
946

    
947

    
948
class R_2_instances_name_startup(baserlib.R_Generic):
949
  """/2/instances/[instance_name]/startup resource.
950

951
  Implements an instance startup.
952

953
  """
954
  def PUT(self):
955
    """Startup an instance.
956

957
    The URI takes force=[False|True] parameter to start the instance
958
    if even if secondary disks are failing.
959

960
    """
961
    instance_name = self.items[0]
962
    force_startup = bool(self._checkIntVariable('force'))
963
    op = opcodes.OpStartupInstance(instance_name=instance_name,
964
                                   force=force_startup,
965
                                   dry_run=bool(self.dryRun()))
966

    
967
    return baserlib.SubmitJob([op])
968

    
969

    
970
class R_2_instances_name_shutdown(baserlib.R_Generic):
971
  """/2/instances/[instance_name]/shutdown resource.
972

973
  Implements an instance shutdown.
974

975
  """
976
  def PUT(self):
977
    """Shutdown an instance.
978

979
    """
980
    instance_name = self.items[0]
981
    op = opcodes.OpShutdownInstance(instance_name=instance_name,
982
                                    dry_run=bool(self.dryRun()))
983

    
984
    return baserlib.SubmitJob([op])
985

    
986

    
987
def _ParseInstanceReinstallRequest(name, data):
988
  """Parses a request for reinstalling an instance.
989

990
  """
991
  if not isinstance(data, dict):
992
    raise http.HttpBadRequest("Invalid body contents, not a dictionary")
993

    
994
  ostype = baserlib.CheckParameter(data, "os")
995
  start = baserlib.CheckParameter(data, "start", exptype=bool,
996
                                  default=True)
997
  osparams = baserlib.CheckParameter(data, "osparams", default=None)
998

    
999
  ops = [
1000
    opcodes.OpShutdownInstance(instance_name=name),
1001
    opcodes.OpReinstallInstance(instance_name=name, os_type=ostype,
1002
                                osparams=osparams),
1003
    ]
1004

    
1005
  if start:
1006
    ops.append(opcodes.OpStartupInstance(instance_name=name, force=False))
1007

    
1008
  return ops
1009

    
1010

    
1011
class R_2_instances_name_reinstall(baserlib.R_Generic):
1012
  """/2/instances/[instance_name]/reinstall resource.
1013

1014
  Implements an instance reinstall.
1015

1016
  """
1017
  def POST(self):
1018
    """Reinstall an instance.
1019

1020
    The URI takes os=name and nostartup=[0|1] optional
1021
    parameters. By default, the instance will be started
1022
    automatically.
1023

1024
    """
1025
    if self.request_body:
1026
      if self.queryargs:
1027
        raise http.HttpBadRequest("Can't combine query and body parameters")
1028

    
1029
      body = self.request_body
1030
    else:
1031
      if not self.queryargs:
1032
        raise http.HttpBadRequest("Missing query parameters")
1033
      # Legacy interface, do not modify/extend
1034
      body = {
1035
        "os": self._checkStringVariable("os"),
1036
        "start": not self._checkIntVariable("nostartup"),
1037
        }
1038

    
1039
    ops = _ParseInstanceReinstallRequest(self.items[0], body)
1040

    
1041
    return baserlib.SubmitJob(ops)
1042

    
1043

    
1044
class R_2_instances_name_replace_disks(baserlib.R_Generic):
1045
  """/2/instances/[instance_name]/replace-disks resource.
1046

1047
  """
1048
  def POST(self):
1049
    """Replaces disks on an instance.
1050

1051
    """
1052
    instance_name = self.items[0]
1053
    remote_node = self._checkStringVariable("remote_node", default=None)
1054
    mode = self._checkStringVariable("mode", default=None)
1055
    raw_disks = self._checkStringVariable("disks", default=None)
1056
    iallocator = self._checkStringVariable("iallocator", default=None)
1057

    
1058
    if raw_disks:
1059
      try:
1060
        disks = [int(part) for part in raw_disks.split(",")]
1061
      except ValueError, err:
1062
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1063
    else:
1064
      disks = []
1065

    
1066
    op = opcodes.OpReplaceDisks(instance_name=instance_name,
1067
                                remote_node=remote_node,
1068
                                mode=mode,
1069
                                disks=disks,
1070
                                iallocator=iallocator)
1071

    
1072
    return baserlib.SubmitJob([op])
1073

    
1074

    
1075
class R_2_instances_name_activate_disks(baserlib.R_Generic):
1076
  """/2/instances/[instance_name]/activate-disks resource.
1077

1078
  """
1079
  def PUT(self):
1080
    """Activate disks for an instance.
1081

1082
    The URI might contain ignore_size to ignore current recorded size.
1083

1084
    """
1085
    instance_name = self.items[0]
1086
    ignore_size = bool(self._checkIntVariable('ignore_size'))
1087

    
1088
    op = opcodes.OpActivateInstanceDisks(instance_name=instance_name,
1089
                                         ignore_size=ignore_size)
1090

    
1091
    return baserlib.SubmitJob([op])
1092

    
1093

    
1094
class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1095
  """/2/instances/[instance_name]/deactivate-disks resource.
1096

1097
  """
1098
  def PUT(self):
1099
    """Deactivate disks for an instance.
1100

1101
    """
1102
    instance_name = self.items[0]
1103

    
1104
    op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
1105

    
1106
    return baserlib.SubmitJob([op])
1107

    
1108

    
1109
class R_2_instances_name_prepare_export(baserlib.R_Generic):
1110
  """/2/instances/[instance_name]/prepare-export resource.
1111

1112
  """
1113
  def PUT(self):
1114
    """Prepares an export for an instance.
1115

1116
    @return: a job id
1117

1118
    """
1119
    instance_name = self.items[0]
1120
    mode = self._checkStringVariable("mode")
1121

    
1122
    op = opcodes.OpPrepareExport(instance_name=instance_name,
1123
                                 mode=mode)
1124

    
1125
    return baserlib.SubmitJob([op])
1126

    
1127

    
1128
def _ParseExportInstanceRequest(name, data):
1129
  """Parses a request for an instance export.
1130

1131
  @rtype: L{opcodes.OpExportInstance}
1132
  @return: Instance export opcode
1133

1134
  """
1135
  mode = baserlib.CheckParameter(data, "mode",
1136
                                 default=constants.EXPORT_MODE_LOCAL)
1137
  target_node = baserlib.CheckParameter(data, "destination")
1138
  shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool)
1139
  remove_instance = baserlib.CheckParameter(data, "remove_instance",
1140
                                            exptype=bool, default=False)
1141
  x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None)
1142
  destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca",
1143
                                                default=None)
1144

    
1145
  return opcodes.OpExportInstance(instance_name=name,
1146
                                  mode=mode,
1147
                                  target_node=target_node,
1148
                                  shutdown=shutdown,
1149
                                  remove_instance=remove_instance,
1150
                                  x509_key_name=x509_key_name,
1151
                                  destination_x509_ca=destination_x509_ca)
1152

    
1153

    
1154
class R_2_instances_name_export(baserlib.R_Generic):
1155
  """/2/instances/[instance_name]/export resource.
1156

1157
  """
1158
  def PUT(self):
1159
    """Exports an instance.
1160

1161
    @return: a job id
1162

1163
    """
1164
    if not isinstance(self.request_body, dict):
1165
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1166

    
1167
    op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1168

    
1169
    return baserlib.SubmitJob([op])
1170

    
1171

    
1172
def _ParseMigrateInstanceRequest(name, data):
1173
  """Parses a request for an instance migration.
1174

1175
  @rtype: L{opcodes.OpMigrateInstance}
1176
  @return: Instance migration opcode
1177

1178
  """
1179
  mode = baserlib.CheckParameter(data, "mode", default=None)
1180
  cleanup = baserlib.CheckParameter(data, "cleanup", exptype=bool,
1181
                                    default=False)
1182

    
1183
  return opcodes.OpMigrateInstance(instance_name=name, mode=mode,
1184
                                   cleanup=cleanup)
1185

    
1186

    
1187
class R_2_instances_name_migrate(baserlib.R_Generic):
1188
  """/2/instances/[instance_name]/migrate resource.
1189

1190
  """
1191
  def PUT(self):
1192
    """Migrates an instance.
1193

1194
    @return: a job id
1195

1196
    """
1197
    baserlib.CheckType(self.request_body, dict, "Body contents")
1198

    
1199
    op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1200

    
1201
    return baserlib.SubmitJob([op])
1202

    
1203

    
1204
def _ParseRenameInstanceRequest(name, data):
1205
  """Parses a request for renaming an instance.
1206

1207
  @rtype: L{opcodes.OpRenameInstance}
1208
  @return: Instance rename opcode
1209

1210
  """
1211
  new_name = baserlib.CheckParameter(data, "new_name")
1212
  ip_check = baserlib.CheckParameter(data, "ip_check", default=True)
1213
  name_check = baserlib.CheckParameter(data, "name_check", default=True)
1214

    
1215
  return opcodes.OpRenameInstance(instance_name=name, new_name=new_name,
1216
                                  name_check=name_check, ip_check=ip_check)
1217

    
1218

    
1219
class R_2_instances_name_rename(baserlib.R_Generic):
1220
  """/2/instances/[instance_name]/rename resource.
1221

1222
  """
1223
  def PUT(self):
1224
    """Changes the name of an instance.
1225

1226
    @return: a job id
1227

1228
    """
1229
    baserlib.CheckType(self.request_body, dict, "Body contents")
1230

    
1231
    op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1232

    
1233
    return baserlib.SubmitJob([op])
1234

    
1235

    
1236
def _ParseModifyInstanceRequest(name, data):
1237
  """Parses a request for modifying an instance.
1238

1239
  @rtype: L{opcodes.OpSetInstanceParams}
1240
  @return: Instance modify opcode
1241

1242
  """
1243
  osparams = baserlib.CheckParameter(data, "osparams", default={})
1244
  force = baserlib.CheckParameter(data, "force", default=False)
1245
  nics = baserlib.CheckParameter(data, "nics", default=[])
1246
  disks = baserlib.CheckParameter(data, "disks", default=[])
1247
  disk_template = baserlib.CheckParameter(data, "disk_template", default=None)
1248
  remote_node = baserlib.CheckParameter(data, "remote_node", default=None)
1249
  os_name = baserlib.CheckParameter(data, "os_name", default=None)
1250
  force_variant = baserlib.CheckParameter(data, "force_variant", default=False)
1251

    
1252
  # HV/BE parameters
1253
  hvparams = baserlib.CheckParameter(data, "hvparams", default={})
1254
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES,
1255
                      allowed_values=[constants.VALUE_DEFAULT])
1256

    
1257
  beparams = baserlib.CheckParameter(data, "beparams", default={})
1258
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES,
1259
                      allowed_values=[constants.VALUE_DEFAULT])
1260

    
1261
  return opcodes.OpSetInstanceParams(instance_name=name, hvparams=hvparams,
1262
                                     beparams=beparams, osparams=osparams,
1263
                                     force=force, nics=nics, disks=disks,
1264
                                     disk_template=disk_template,
1265
                                     remote_node=remote_node, os_name=os_name,
1266
                                     force_variant=force_variant)
1267

    
1268

    
1269
class R_2_instances_name_modify(baserlib.R_Generic):
1270
  """/2/instances/[instance_name]/modify resource.
1271

1272
  """
1273
  def PUT(self):
1274
    """Changes some parameters of an instance.
1275

1276
    @return: a job id
1277

1278
    """
1279
    baserlib.CheckType(self.request_body, dict, "Body contents")
1280

    
1281
    op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1282

    
1283
    return baserlib.SubmitJob([op])
1284

    
1285

    
1286
class _R_Tags(baserlib.R_Generic):
1287
  """ Quasiclass for tagging resources
1288

1289
  Manages tags. When inheriting this class you must define the
1290
  TAG_LEVEL for it.
1291

1292
  """
1293
  TAG_LEVEL = None
1294

    
1295
  def __init__(self, items, queryargs, req):
1296
    """A tag resource constructor.
1297

1298
    We have to override the default to sort out cluster naming case.
1299

1300
    """
1301
    baserlib.R_Generic.__init__(self, items, queryargs, req)
1302

    
1303
    if self.TAG_LEVEL == constants.TAG_CLUSTER:
1304
      self.name = None
1305
    else:
1306
      self.name = items[0]
1307

    
1308
  def GET(self):
1309
    """Returns a list of tags.
1310

1311
    Example: ["tag1", "tag2", "tag3"]
1312

1313
    """
1314
    # pylint: disable-msg=W0212
1315
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1316

    
1317
  def PUT(self):
1318
    """Add a set of tags.
1319

1320
    The request as a list of strings should be PUT to this URI. And
1321
    you'll have back a job id.
1322

1323
    """
1324
    # pylint: disable-msg=W0212
1325
    if 'tag' not in self.queryargs:
1326
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
1327
                                " the 'tag' parameter")
1328
    return baserlib._Tags_PUT(self.TAG_LEVEL,
1329
                              self.queryargs['tag'], name=self.name,
1330
                              dry_run=bool(self.dryRun()))
1331

    
1332
  def DELETE(self):
1333
    """Delete a tag.
1334

1335
    In order to delete a set of tags, the DELETE
1336
    request should be addressed to URI like:
1337
    /tags?tag=[tag]&tag=[tag]
1338

1339
    """
1340
    # pylint: disable-msg=W0212
1341
    if 'tag' not in self.queryargs:
1342
      # no we not gonna delete all tags
1343
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
1344
                                " tag(s) using the 'tag' parameter")
1345
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
1346
                                 self.queryargs['tag'],
1347
                                 name=self.name,
1348
                                 dry_run=bool(self.dryRun()))
1349

    
1350

    
1351
class R_2_instances_name_tags(_R_Tags):
1352
  """ /2/instances/[instance_name]/tags resource.
1353

1354
  Manages per-instance tags.
1355

1356
  """
1357
  TAG_LEVEL = constants.TAG_INSTANCE
1358

    
1359

    
1360
class R_2_nodes_name_tags(_R_Tags):
1361
  """ /2/nodes/[node_name]/tags resource.
1362

1363
  Manages per-node tags.
1364

1365
  """
1366
  TAG_LEVEL = constants.TAG_NODE
1367

    
1368

    
1369
class R_2_tags(_R_Tags):
1370
  """ /2/instances/tags resource.
1371

1372
  Manages cluster tags.
1373

1374
  """
1375
  TAG_LEVEL = constants.TAG_CLUSTER