Statistics
| Branch: | Tag: | Revision:

root / lib / backend.py @ 2b8322f7

History | View | Annotate | Download (148.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions used by the node daemon
23

24
@var _ALLOWED_UPLOAD_FILES: denotes which files are accepted in
25
     the L{UploadFile} function
26
@var _ALLOWED_CLEAN_DIRS: denotes which directories are accepted
27
     in the L{_CleanDirectory} function
28

29
"""
30

    
31
# pylint: disable=E1103,C0302
32

    
33
# E1103: %s %r has no %r member (but some types could not be
34
# inferred), because the _TryOSFromDisk returns either (True, os_obj)
35
# or (False, "string") which confuses pylint
36

    
37
# C0302: This module has become too big and should be split up
38

    
39

    
40
import base64
41
import errno
42
import logging
43
import os
44
import os.path
45
import pycurl
46
import random
47
import re
48
import shutil
49
import signal
50
import stat
51
import tempfile
52
import time
53
import zlib
54

    
55
from ganeti import errors
56
from ganeti import http
57
from ganeti import utils
58
from ganeti import ssh
59
from ganeti import hypervisor
60
from ganeti import constants
61
from ganeti.storage import bdev
62
from ganeti.storage import drbd
63
from ganeti.storage import filestorage
64
from ganeti import objects
65
from ganeti import ssconf
66
from ganeti import serializer
67
from ganeti import netutils
68
from ganeti import runtime
69
from ganeti import compat
70
from ganeti import pathutils
71
from ganeti import vcluster
72
from ganeti import ht
73
from ganeti.storage.base import BlockDev
74
from ganeti.storage.drbd import DRBD8
75
from ganeti import hooksmaster
76

    
77

    
78
_BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
79
_ALLOWED_CLEAN_DIRS = compat.UniqueFrozenset([
80
  pathutils.DATA_DIR,
81
  pathutils.JOB_QUEUE_ARCHIVE_DIR,
82
  pathutils.QUEUE_DIR,
83
  pathutils.CRYPTO_KEYS_DIR,
84
  ])
85
_MAX_SSL_CERT_VALIDITY = 7 * 24 * 60 * 60
86
_X509_KEY_FILE = "key"
87
_X509_CERT_FILE = "cert"
88
_IES_STATUS_FILE = "status"
89
_IES_PID_FILE = "pid"
90
_IES_CA_FILE = "ca"
91

    
92
#: Valid LVS output line regex
93
_LVSLINE_REGEX = re.compile(r"^ *([^|]+)\|([^|]+)\|([0-9.]+)\|([^|]{6,})\|?$")
94

    
95
# Actions for the master setup script
96
_MASTER_START = "start"
97
_MASTER_STOP = "stop"
98

    
99
#: Maximum file permissions for restricted command directory and executables
100
_RCMD_MAX_MODE = (stat.S_IRWXU |
101
                  stat.S_IRGRP | stat.S_IXGRP |
102
                  stat.S_IROTH | stat.S_IXOTH)
103

    
104
#: Delay before returning an error for restricted commands
105
_RCMD_INVALID_DELAY = 10
106

    
107
#: How long to wait to acquire lock for restricted commands (shorter than
108
#: L{_RCMD_INVALID_DELAY}) to reduce blockage of noded forks when many
109
#: command requests arrive
110
_RCMD_LOCK_TIMEOUT = _RCMD_INVALID_DELAY * 0.8
111

    
112

    
113
class RPCFail(Exception):
114
  """Class denoting RPC failure.
115

116
  Its argument is the error message.
117

118
  """
119

    
120

    
121
def _GetInstReasonFilename(instance_name):
122
  """Path of the file containing the reason of the instance status change.
123

124
  @type instance_name: string
125
  @param instance_name: The name of the instance
126
  @rtype: string
127
  @return: The path of the file
128

129
  """
130
  return utils.PathJoin(pathutils.INSTANCE_REASON_DIR, instance_name)
131

    
132

    
133
def _StoreInstReasonTrail(instance_name, trail):
134
  """Serialize a reason trail related to an instance change of state to file.
135

136
  The exact location of the file depends on the name of the instance and on
137
  the configuration of the Ganeti cluster defined at deploy time.
138

139
  @type instance_name: string
140
  @param instance_name: The name of the instance
141

142
  @type trail: list of reasons
143
  @param trail: reason trail
144

145
  @rtype: None
146

147
  """
148
  json = serializer.DumpJson(trail)
149
  filename = _GetInstReasonFilename(instance_name)
150
  utils.WriteFile(filename, data=json)
151

    
152

    
153
def _Fail(msg, *args, **kwargs):
154
  """Log an error and the raise an RPCFail exception.
155

156
  This exception is then handled specially in the ganeti daemon and
157
  turned into a 'failed' return type. As such, this function is a
158
  useful shortcut for logging the error and returning it to the master
159
  daemon.
160

161
  @type msg: string
162
  @param msg: the text of the exception
163
  @raise RPCFail
164

165
  """
166
  if args:
167
    msg = msg % args
168
  if "log" not in kwargs or kwargs["log"]: # if we should log this error
169
    if "exc" in kwargs and kwargs["exc"]:
170
      logging.exception(msg)
171
    else:
172
      logging.error(msg)
173
  raise RPCFail(msg)
174

    
175

    
176
def _GetConfig():
177
  """Simple wrapper to return a SimpleStore.
178

179
  @rtype: L{ssconf.SimpleStore}
180
  @return: a SimpleStore instance
181

182
  """
183
  return ssconf.SimpleStore()
184

    
185

    
186
def _GetSshRunner(cluster_name):
187
  """Simple wrapper to return an SshRunner.
188

189
  @type cluster_name: str
190
  @param cluster_name: the cluster name, which is needed
191
      by the SshRunner constructor
192
  @rtype: L{ssh.SshRunner}
193
  @return: an SshRunner instance
194

195
  """
196
  return ssh.SshRunner(cluster_name)
197

    
198

    
199
def _Decompress(data):
200
  """Unpacks data compressed by the RPC client.
201

202
  @type data: list or tuple
203
  @param data: Data sent by RPC client
204
  @rtype: str
205
  @return: Decompressed data
206

207
  """
208
  assert isinstance(data, (list, tuple))
209
  assert len(data) == 2
210
  (encoding, content) = data
211
  if encoding == constants.RPC_ENCODING_NONE:
212
    return content
213
  elif encoding == constants.RPC_ENCODING_ZLIB_BASE64:
214
    return zlib.decompress(base64.b64decode(content))
215
  else:
216
    raise AssertionError("Unknown data encoding")
217

    
218

    
219
def _CleanDirectory(path, exclude=None):
220
  """Removes all regular files in a directory.
221

222
  @type path: str
223
  @param path: the directory to clean
224
  @type exclude: list
225
  @param exclude: list of files to be excluded, defaults
226
      to the empty list
227

228
  """
229
  if path not in _ALLOWED_CLEAN_DIRS:
230
    _Fail("Path passed to _CleanDirectory not in allowed clean targets: '%s'",
231
          path)
232

    
233
  if not os.path.isdir(path):
234
    return
235
  if exclude is None:
236
    exclude = []
237
  else:
238
    # Normalize excluded paths
239
    exclude = [os.path.normpath(i) for i in exclude]
240

    
241
  for rel_name in utils.ListVisibleFiles(path):
242
    full_name = utils.PathJoin(path, rel_name)
243
    if full_name in exclude:
244
      continue
245
    if os.path.isfile(full_name) and not os.path.islink(full_name):
246
      utils.RemoveFile(full_name)
247

    
248

    
249
def _BuildUploadFileList():
250
  """Build the list of allowed upload files.
251

252
  This is abstracted so that it's built only once at module import time.
253

254
  """
255
  allowed_files = set([
256
    pathutils.CLUSTER_CONF_FILE,
257
    pathutils.ETC_HOSTS,
258
    pathutils.SSH_KNOWN_HOSTS_FILE,
259
    pathutils.VNC_PASSWORD_FILE,
260
    pathutils.RAPI_CERT_FILE,
261
    pathutils.SPICE_CERT_FILE,
262
    pathutils.SPICE_CACERT_FILE,
263
    pathutils.RAPI_USERS_FILE,
264
    pathutils.CONFD_HMAC_KEY,
265
    pathutils.CLUSTER_DOMAIN_SECRET_FILE,
266
    ])
267

    
268
  for hv_name in constants.HYPER_TYPES:
269
    hv_class = hypervisor.GetHypervisorClass(hv_name)
270
    allowed_files.update(hv_class.GetAncillaryFiles()[0])
271

    
272
  assert pathutils.FILE_STORAGE_PATHS_FILE not in allowed_files, \
273
    "Allowed file storage paths should never be uploaded via RPC"
274

    
275
  return frozenset(allowed_files)
276

    
277

    
278
_ALLOWED_UPLOAD_FILES = _BuildUploadFileList()
279

    
280

    
281
def JobQueuePurge():
282
  """Removes job queue files and archived jobs.
283

284
  @rtype: tuple
285
  @return: True, None
286

287
  """
288
  _CleanDirectory(pathutils.QUEUE_DIR, exclude=[pathutils.JOB_QUEUE_LOCK_FILE])
289
  _CleanDirectory(pathutils.JOB_QUEUE_ARCHIVE_DIR)
290

    
291

    
292
def GetMasterNodeName():
293
  """Returns the master node name.
294

295
  @rtype: string
296
  @return: name of the master node
297
  @raise RPCFail: in case of errors
298

299
  """
300
  try:
301
    return _GetConfig().GetMasterNode()
302
  except errors.ConfigurationError, err:
303
    _Fail("Cluster configuration incomplete: %s", err, exc=True)
304

    
305

    
306
def RunLocalHooks(hook_opcode, hooks_path, env_builder_fn):
307
  """Decorator that runs hooks before and after the decorated function.
308

309
  @type hook_opcode: string
310
  @param hook_opcode: opcode of the hook
311
  @type hooks_path: string
312
  @param hooks_path: path of the hooks
313
  @type env_builder_fn: function
314
  @param env_builder_fn: function that returns a dictionary containing the
315
    environment variables for the hooks. Will get all the parameters of the
316
    decorated function.
317
  @raise RPCFail: in case of pre-hook failure
318

319
  """
320
  def decorator(fn):
321
    def wrapper(*args, **kwargs):
322
      _, myself = ssconf.GetMasterAndMyself()
323
      nodes = ([myself], [myself])  # these hooks run locally
324

    
325
      env_fn = compat.partial(env_builder_fn, *args, **kwargs)
326

    
327
      cfg = _GetConfig()
328
      hr = HooksRunner()
329
      hm = hooksmaster.HooksMaster(hook_opcode, hooks_path, nodes,
330
                                   hr.RunLocalHooks, None, env_fn, None,
331
                                   logging.warning, cfg.GetClusterName(),
332
                                   cfg.GetMasterNode())
333
      hm.RunPhase(constants.HOOKS_PHASE_PRE)
334
      result = fn(*args, **kwargs)
335
      hm.RunPhase(constants.HOOKS_PHASE_POST)
336

    
337
      return result
338
    return wrapper
339
  return decorator
340

    
341

    
342
def _BuildMasterIpEnv(master_params, use_external_mip_script=None):
343
  """Builds environment variables for master IP hooks.
344

345
  @type master_params: L{objects.MasterNetworkParameters}
346
  @param master_params: network parameters of the master
347
  @type use_external_mip_script: boolean
348
  @param use_external_mip_script: whether to use an external master IP
349
    address setup script (unused, but necessary per the implementation of the
350
    _RunLocalHooks decorator)
351

352
  """
353
  # pylint: disable=W0613
354
  ver = netutils.IPAddress.GetVersionFromAddressFamily(master_params.ip_family)
355
  env = {
356
    "MASTER_NETDEV": master_params.netdev,
357
    "MASTER_IP": master_params.ip,
358
    "MASTER_NETMASK": str(master_params.netmask),
359
    "CLUSTER_IP_VERSION": str(ver),
360
  }
361

    
362
  return env
363

    
364

    
365
def _RunMasterSetupScript(master_params, action, use_external_mip_script):
366
  """Execute the master IP address setup script.
367

368
  @type master_params: L{objects.MasterNetworkParameters}
369
  @param master_params: network parameters of the master
370
  @type action: string
371
  @param action: action to pass to the script. Must be one of
372
    L{backend._MASTER_START} or L{backend._MASTER_STOP}
373
  @type use_external_mip_script: boolean
374
  @param use_external_mip_script: whether to use an external master IP
375
    address setup script
376
  @raise backend.RPCFail: if there are errors during the execution of the
377
    script
378

379
  """
380
  env = _BuildMasterIpEnv(master_params)
381

    
382
  if use_external_mip_script:
383
    setup_script = pathutils.EXTERNAL_MASTER_SETUP_SCRIPT
384
  else:
385
    setup_script = pathutils.DEFAULT_MASTER_SETUP_SCRIPT
386

    
387
  result = utils.RunCmd([setup_script, action], env=env, reset_env=True)
388

    
389
  if result.failed:
390
    _Fail("Failed to %s the master IP. Script return value: %s, output: '%s'" %
391
          (action, result.exit_code, result.output), log=True)
392

    
393

    
394
@RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup",
395
               _BuildMasterIpEnv)
396
def ActivateMasterIp(master_params, use_external_mip_script):
397
  """Activate the IP address of the master daemon.
398

399
  @type master_params: L{objects.MasterNetworkParameters}
400
  @param master_params: network parameters of the master
401
  @type use_external_mip_script: boolean
402
  @param use_external_mip_script: whether to use an external master IP
403
    address setup script
404
  @raise RPCFail: in case of errors during the IP startup
405

406
  """
407
  _RunMasterSetupScript(master_params, _MASTER_START,
408
                        use_external_mip_script)
409

    
410

    
411
def StartMasterDaemons(no_voting):
412
  """Activate local node as master node.
413

414
  The function will start the master daemons (ganeti-masterd and ganeti-rapi).
415

416
  @type no_voting: boolean
417
  @param no_voting: whether to start ganeti-masterd without a node vote
418
      but still non-interactively
419
  @rtype: None
420

421
  """
422

    
423
  if no_voting:
424
    masterd_args = "--no-voting --yes-do-it"
425
  else:
426
    masterd_args = ""
427

    
428
  env = {
429
    "EXTRA_MASTERD_ARGS": masterd_args,
430
    }
431

    
432
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-master"], env=env)
433
  if result.failed:
434
    msg = "Can't start Ganeti master: %s" % result.output
435
    logging.error(msg)
436
    _Fail(msg)
437

    
438

    
439
@RunLocalHooks(constants.FAKE_OP_MASTER_TURNDOWN, "master-ip-turndown",
440
               _BuildMasterIpEnv)
441
def DeactivateMasterIp(master_params, use_external_mip_script):
442
  """Deactivate the master IP on this node.
443

444
  @type master_params: L{objects.MasterNetworkParameters}
445
  @param master_params: network parameters of the master
446
  @type use_external_mip_script: boolean
447
  @param use_external_mip_script: whether to use an external master IP
448
    address setup script
449
  @raise RPCFail: in case of errors during the IP turndown
450

451
  """
452
  _RunMasterSetupScript(master_params, _MASTER_STOP,
453
                        use_external_mip_script)
454

    
455

    
456
def StopMasterDaemons():
457
  """Stop the master daemons on this node.
458

459
  Stop the master daemons (ganeti-masterd and ganeti-rapi) on this node.
460

461
  @rtype: None
462

463
  """
464
  # TODO: log and report back to the caller the error failures; we
465
  # need to decide in which case we fail the RPC for this
466

    
467
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-master"])
468
  if result.failed:
469
    logging.error("Could not stop Ganeti master, command %s had exitcode %s"
470
                  " and error %s",
471
                  result.cmd, result.exit_code, result.output)
472

    
473

    
474
def ChangeMasterNetmask(old_netmask, netmask, master_ip, master_netdev):
475
  """Change the netmask of the master IP.
476

477
  @param old_netmask: the old value of the netmask
478
  @param netmask: the new value of the netmask
479
  @param master_ip: the master IP
480
  @param master_netdev: the master network device
481

482
  """
483
  if old_netmask == netmask:
484
    return
485

    
486
  if not netutils.IPAddress.Own(master_ip):
487
    _Fail("The master IP address is not up, not attempting to change its"
488
          " netmask")
489

    
490
  result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "add",
491
                         "%s/%s" % (master_ip, netmask),
492
                         "dev", master_netdev, "label",
493
                         "%s:0" % master_netdev])
494
  if result.failed:
495
    _Fail("Could not set the new netmask on the master IP address")
496

    
497
  result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "del",
498
                         "%s/%s" % (master_ip, old_netmask),
499
                         "dev", master_netdev, "label",
500
                         "%s:0" % master_netdev])
501
  if result.failed:
502
    _Fail("Could not bring down the master IP address with the old netmask")
503

    
504

    
505
def EtcHostsModify(mode, host, ip):
506
  """Modify a host entry in /etc/hosts.
507

508
  @param mode: The mode to operate. Either add or remove entry
509
  @param host: The host to operate on
510
  @param ip: The ip associated with the entry
511

512
  """
513
  if mode == constants.ETC_HOSTS_ADD:
514
    if not ip:
515
      RPCFail("Mode 'add' needs 'ip' parameter, but parameter not"
516
              " present")
517
    utils.AddHostToEtcHosts(host, ip)
518
  elif mode == constants.ETC_HOSTS_REMOVE:
519
    if ip:
520
      RPCFail("Mode 'remove' does not allow 'ip' parameter, but"
521
              " parameter is present")
522
    utils.RemoveHostFromEtcHosts(host)
523
  else:
524
    RPCFail("Mode not supported")
525

    
526

    
527
def LeaveCluster(modify_ssh_setup):
528
  """Cleans up and remove the current node.
529

530
  This function cleans up and prepares the current node to be removed
531
  from the cluster.
532

533
  If processing is successful, then it raises an
534
  L{errors.QuitGanetiException} which is used as a special case to
535
  shutdown the node daemon.
536

537
  @param modify_ssh_setup: boolean
538

539
  """
540
  _CleanDirectory(pathutils.DATA_DIR)
541
  _CleanDirectory(pathutils.CRYPTO_KEYS_DIR)
542
  JobQueuePurge()
543

    
544
  if modify_ssh_setup:
545
    try:
546
      priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)
547

    
548
      utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
549

    
550
      utils.RemoveFile(priv_key)
551
      utils.RemoveFile(pub_key)
552
    except errors.OpExecError:
553
      logging.exception("Error while processing ssh files")
554

    
555
  try:
556
    utils.RemoveFile(pathutils.CONFD_HMAC_KEY)
557
    utils.RemoveFile(pathutils.RAPI_CERT_FILE)
558
    utils.RemoveFile(pathutils.SPICE_CERT_FILE)
559
    utils.RemoveFile(pathutils.SPICE_CACERT_FILE)
560
    utils.RemoveFile(pathutils.NODED_CERT_FILE)
561
  except: # pylint: disable=W0702
562
    logging.exception("Error while removing cluster secrets")
563

    
564
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop", constants.CONFD])
565
  if result.failed:
566
    logging.error("Command %s failed with exitcode %s and error %s",
567
                  result.cmd, result.exit_code, result.output)
568

    
569
  # Raise a custom exception (handled in ganeti-noded)
570
  raise errors.QuitGanetiException(True, "Shutdown scheduled")
571

    
572

    
573
def _CheckStorageParams(params, num_params):
574
  """Performs sanity checks for storage parameters.
575

576
  @type params: list
577
  @param params: list of storage parameters
578
  @type num_params: int
579
  @param num_params: expected number of parameters
580

581
  """
582
  if params is None:
583
    raise errors.ProgrammerError("No storage parameters for storage"
584
                                 " reporting is provided.")
585
  if not isinstance(params, list):
586
    raise errors.ProgrammerError("The storage parameters are not of type"
587
                                 " list: '%s'" % params)
588
  if not len(params) == num_params:
589
    raise errors.ProgrammerError("Did not receive the expected number of"
590
                                 "storage parameters: expected %s,"
591
                                 " received '%s'" % (num_params, len(params)))
592

    
593

    
594
def _CheckLvmStorageParams(params):
595
  """Performs sanity check for the 'exclusive storage' flag.
596

597
  @see: C{_CheckStorageParams}
598

599
  """
600
  _CheckStorageParams(params, 1)
601
  excl_stor = params[0]
602
  if not isinstance(params[0], bool):
603
    raise errors.ProgrammerError("Exclusive storage parameter is not"
604
                                 " boolean: '%s'." % excl_stor)
605
  return excl_stor
606

    
607

    
608
def _GetLvmVgSpaceInfo(name, params):
609
  """Wrapper around C{_GetVgInfo} which checks the storage parameters.
610

611
  @type name: string
612
  @param name: name of the volume group
613
  @type params: list
614
  @param params: list of storage parameters, which in this case should be
615
    containing only one for exclusive storage
616

617
  """
618
  excl_stor = _CheckLvmStorageParams(params)
619
  return _GetVgInfo(name, excl_stor)
620

    
621

    
622
def _GetVgInfo(
623
    name, excl_stor, info_fn=bdev.LogicalVolume.GetVGInfo):
624
  """Retrieves information about a LVM volume group.
625

626
  """
627
  # TODO: GetVGInfo supports returning information for multiple VGs at once
628
  vginfo = info_fn([name], excl_stor)
629
  if vginfo:
630
    vg_free = int(round(vginfo[0][0], 0))
631
    vg_size = int(round(vginfo[0][1], 0))
632
  else:
633
    vg_free = None
634
    vg_size = None
635

    
636
  return {
637
    "type": constants.ST_LVM_VG,
638
    "name": name,
639
    "storage_free": vg_free,
640
    "storage_size": vg_size,
641
    }
642

    
643

    
644
def _GetLvmPvSpaceInfo(name, params):
645
  """Wrapper around C{_GetVgSpindlesInfo} with sanity checks.
646

647
  @see: C{_GetLvmVgSpaceInfo}
648

649
  """
650
  excl_stor = _CheckLvmStorageParams(params)
651
  return _GetVgSpindlesInfo(name, excl_stor)
652

    
653

    
654
def _GetVgSpindlesInfo(
655
    name, excl_stor, info_fn=bdev.LogicalVolume.GetVgSpindlesInfo):
656
  """Retrieves information about spindles in an LVM volume group.
657

658
  @type name: string
659
  @param name: VG name
660
  @type excl_stor: bool
661
  @param excl_stor: exclusive storage
662
  @rtype: dict
663
  @return: dictionary whose keys are "name", "vg_free", "vg_size" for VG name,
664
      free spindles, total spindles respectively
665

666
  """
667
  if excl_stor:
668
    (vg_free, vg_size) = info_fn(name)
669
  else:
670
    vg_free = 0
671
    vg_size = 0
672
  return {
673
    "type": constants.ST_LVM_PV,
674
    "name": name,
675
    "storage_free": vg_free,
676
    "storage_size": vg_size,
677
    }
678

    
679

    
680
def _GetHvInfo(name, hvparams, get_hv_fn=hypervisor.GetHypervisor):
681
  """Retrieves node information from a hypervisor.
682

683
  The information returned depends on the hypervisor. Common items:
684

685
    - vg_size is the size of the configured volume group in MiB
686
    - vg_free is the free size of the volume group in MiB
687
    - memory_dom0 is the memory allocated for domain0 in MiB
688
    - memory_free is the currently available (free) ram in MiB
689
    - memory_total is the total number of ram in MiB
690
    - hv_version: the hypervisor version, if available
691

692
  @type hvparams: dict of string
693
  @param hvparams: the hypervisor's hvparams
694

695
  """
696
  return get_hv_fn(name).GetNodeInfo(hvparams=hvparams)
697

    
698

    
699
def _GetHvInfoAll(hv_specs, get_hv_fn=hypervisor.GetHypervisor):
700
  """Retrieves node information for all hypervisors.
701

702
  See C{_GetHvInfo} for information on the output.
703

704
  @type hv_specs: list of pairs (string, dict of strings)
705
  @param hv_specs: list of pairs of a hypervisor's name and its hvparams
706

707
  """
708
  if hv_specs is None:
709
    return None
710

    
711
  result = []
712
  for hvname, hvparams in hv_specs:
713
    result.append(_GetHvInfo(hvname, hvparams, get_hv_fn))
714
  return result
715

    
716

    
717
def _GetNamedNodeInfo(names, fn):
718
  """Calls C{fn} for all names in C{names} and returns a dictionary.
719

720
  @rtype: None or dict
721

722
  """
723
  if names is None:
724
    return None
725
  else:
726
    return map(fn, names)
727

    
728

    
729
def GetNodeInfo(storage_units, hv_specs):
730
  """Gives back a hash with different information about the node.
731

732
  @type storage_units: list of tuples (string, string, list)
733
  @param storage_units: List of tuples (storage unit, identifier, parameters) to
734
    ask for disk space information. In case of lvm-vg, the identifier is
735
    the VG name. The parameters can contain additional, storage-type-specific
736
    parameters, for example exclusive storage for lvm storage.
737
  @type hv_specs: list of pairs (string, dict of strings)
738
  @param hv_specs: list of pairs of a hypervisor's name and its hvparams
739
  @rtype: tuple; (string, None/dict, None/dict)
740
  @return: Tuple containing boot ID, volume group information and hypervisor
741
    information
742

743
  """
744
  bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
745
  storage_info = _GetNamedNodeInfo(
746
    storage_units,
747
    (lambda (storage_type, storage_key, storage_params):
748
        _ApplyStorageInfoFunction(storage_type, storage_key, storage_params)))
749
  hv_info = _GetHvInfoAll(hv_specs)
750
  return (bootid, storage_info, hv_info)
751

    
752

    
753
def _GetFileStorageSpaceInfo(path, params):
754
  """Wrapper around filestorage.GetSpaceInfo.
755

756
  The purpose of this wrapper is to call filestorage.GetFileStorageSpaceInfo
757
  and ignore the *args parameter to not leak it into the filestorage
758
  module's code.
759

760
  @see: C{filestorage.GetFileStorageSpaceInfo} for description of the
761
    parameters.
762

763
  """
764
  _CheckStorageParams(params, 0)
765
  return filestorage.GetFileStorageSpaceInfo(path)
766

    
767

    
768
# FIXME: implement storage reporting for all missing storage types.
769
_STORAGE_TYPE_INFO_FN = {
770
  constants.ST_BLOCK: None,
771
  constants.ST_DISKLESS: None,
772
  constants.ST_EXT: None,
773
  constants.ST_FILE: _GetFileStorageSpaceInfo,
774
  constants.ST_LVM_PV: _GetLvmPvSpaceInfo,
775
  constants.ST_LVM_VG: _GetLvmVgSpaceInfo,
776
  constants.ST_SHARED_FILE: None,
777
  constants.ST_RADOS: None,
778
}
779

    
780

    
781
def _ApplyStorageInfoFunction(storage_type, storage_key, *args):
782
  """Looks up and applies the correct function to calculate free and total
783
  storage for the given storage type.
784

785
  @type storage_type: string
786
  @param storage_type: the storage type for which the storage shall be reported.
787
  @type storage_key: string
788
  @param storage_key: identifier of a storage unit, e.g. the volume group name
789
    of an LVM storage unit
790
  @type args: any
791
  @param args: various parameters that can be used for storage reporting. These
792
    parameters and their semantics vary from storage type to storage type and
793
    are just propagated in this function.
794
  @return: the results of the application of the storage space function (see
795
    _STORAGE_TYPE_INFO_FN) if storage space reporting is implemented for that
796
    storage type
797
  @raises NotImplementedError: for storage types who don't support space
798
    reporting yet
799
  """
800
  fn = _STORAGE_TYPE_INFO_FN[storage_type]
801
  if fn is not None:
802
    return fn(storage_key, *args)
803
  else:
804
    raise NotImplementedError
805

    
806

    
807
def _CheckExclusivePvs(pvi_list):
808
  """Check that PVs are not shared among LVs
809

810
  @type pvi_list: list of L{objects.LvmPvInfo} objects
811
  @param pvi_list: information about the PVs
812

813
  @rtype: list of tuples (string, list of strings)
814
  @return: offending volumes, as tuples: (pv_name, [lv1_name, lv2_name...])
815

816
  """
817
  res = []
818
  for pvi in pvi_list:
819
    if len(pvi.lv_list) > 1:
820
      res.append((pvi.name, pvi.lv_list))
821
  return res
822

    
823

    
824
def _VerifyHypervisors(what, vm_capable, result, all_hvparams,
825
                       get_hv_fn=hypervisor.GetHypervisor):
826
  """Verifies the hypervisor. Appends the results to the 'results' list.
827

828
  @type what: C{dict}
829
  @param what: a dictionary of things to check
830
  @type vm_capable: boolean
831
  @param vm_capable: whether or not this node is vm capable
832
  @type result: dict
833
  @param result: dictionary of verification results; results of the
834
    verifications in this function will be added here
835
  @type all_hvparams: dict of dict of string
836
  @param all_hvparams: dictionary mapping hypervisor names to hvparams
837
  @type get_hv_fn: function
838
  @param get_hv_fn: function to retrieve the hypervisor, to improve testability
839

840
  """
841
  if not vm_capable:
842
    return
843

    
844
  if constants.NV_HYPERVISOR in what:
845
    result[constants.NV_HYPERVISOR] = {}
846
    for hv_name in what[constants.NV_HYPERVISOR]:
847
      hvparams = all_hvparams[hv_name]
848
      try:
849
        val = get_hv_fn(hv_name).Verify(hvparams=hvparams)
850
      except errors.HypervisorError, err:
851
        val = "Error while checking hypervisor: %s" % str(err)
852
      result[constants.NV_HYPERVISOR][hv_name] = val
853

    
854

    
855
def _VerifyHvparams(what, vm_capable, result,
856
                    get_hv_fn=hypervisor.GetHypervisor):
857
  """Verifies the hvparams. Appends the results to the 'results' list.
858

859
  @type what: C{dict}
860
  @param what: a dictionary of things to check
861
  @type vm_capable: boolean
862
  @param vm_capable: whether or not this node is vm capable
863
  @type result: dict
864
  @param result: dictionary of verification results; results of the
865
    verifications in this function will be added here
866
  @type get_hv_fn: function
867
  @param get_hv_fn: function to retrieve the hypervisor, to improve testability
868

869
  """
870
  if not vm_capable:
871
    return
872

    
873
  if constants.NV_HVPARAMS in what:
874
    result[constants.NV_HVPARAMS] = []
875
    for source, hv_name, hvparms in what[constants.NV_HVPARAMS]:
876
      try:
877
        logging.info("Validating hv %s, %s", hv_name, hvparms)
878
        get_hv_fn(hv_name).ValidateParameters(hvparms)
879
      except errors.HypervisorError, err:
880
        result[constants.NV_HVPARAMS].append((source, hv_name, str(err)))
881

    
882

    
883
def _VerifyInstanceList(what, vm_capable, result, all_hvparams):
884
  """Verifies the instance list.
885

886
  @type what: C{dict}
887
  @param what: a dictionary of things to check
888
  @type vm_capable: boolean
889
  @param vm_capable: whether or not this node is vm capable
890
  @type result: dict
891
  @param result: dictionary of verification results; results of the
892
    verifications in this function will be added here
893
  @type all_hvparams: dict of dict of string
894
  @param all_hvparams: dictionary mapping hypervisor names to hvparams
895

896
  """
897
  if constants.NV_INSTANCELIST in what and vm_capable:
898
    # GetInstanceList can fail
899
    try:
900
      val = GetInstanceList(what[constants.NV_INSTANCELIST],
901
                            all_hvparams=all_hvparams)
902
    except RPCFail, err:
903
      val = str(err)
904
    result[constants.NV_INSTANCELIST] = val
905

    
906

    
907
def _VerifyNodeInfo(what, vm_capable, result, all_hvparams):
908
  """Verifies the node info.
909

910
  @type what: C{dict}
911
  @param what: a dictionary of things to check
912
  @type vm_capable: boolean
913
  @param vm_capable: whether or not this node is vm capable
914
  @type result: dict
915
  @param result: dictionary of verification results; results of the
916
    verifications in this function will be added here
917
  @type all_hvparams: dict of dict of string
918
  @param all_hvparams: dictionary mapping hypervisor names to hvparams
919

920
  """
921
  if constants.NV_HVINFO in what and vm_capable:
922
    hvname = what[constants.NV_HVINFO]
923
    hyper = hypervisor.GetHypervisor(hvname)
924
    hvparams = all_hvparams[hvname]
925
    result[constants.NV_HVINFO] = hyper.GetNodeInfo(hvparams=hvparams)
926

    
927

    
928
def _VerifyClientCertificate(cert_file=pathutils.NODED_CLIENT_CERT_FILE):
929
  """Verify the existance and validity of the client SSL certificate.
930

931
  """
932
  create_cert_cmd = "gnt-cluster renew-crypto --new-node-certificates"
933
  if not os.path.exists(cert_file):
934
    return (constants.CV_ERROR,
935
            "The client certificate does not exist. Run '%s' to create"
936
            " client certificates for all nodes." % create_cert_cmd)
937

    
938
  (errcode, msg) = utils.VerifyCertificate(cert_file)
939
  if errcode is not None:
940
    return (errcode, msg)
941
  else:
942
    # if everything is fine, we return the digest to be compared to the config
943
    return (None, utils.GetCertificateDigest(cert_filename=cert_file))
944

    
945

    
946
def VerifyNode(what, cluster_name, all_hvparams, node_groups, groups_cfg):
947
  """Verify the status of the local node.
948

949
  Based on the input L{what} parameter, various checks are done on the
950
  local node.
951

952
  If the I{filelist} key is present, this list of
953
  files is checksummed and the file/checksum pairs are returned.
954

955
  If the I{nodelist} key is present, we check that we have
956
  connectivity via ssh with the target nodes (and check the hostname
957
  report).
958

959
  If the I{node-net-test} key is present, we check that we have
960
  connectivity to the given nodes via both primary IP and, if
961
  applicable, secondary IPs.
962

963
  @type what: C{dict}
964
  @param what: a dictionary of things to check:
965
      - filelist: list of files for which to compute checksums
966
      - nodelist: list of nodes we should check ssh communication with
967
      - node-net-test: list of nodes we should check node daemon port
968
        connectivity with
969
      - hypervisor: list with hypervisors to run the verify for
970
  @type cluster_name: string
971
  @param cluster_name: the cluster's name
972
  @type all_hvparams: dict of dict of strings
973
  @param all_hvparams: a dictionary mapping hypervisor names to hvparams
974
  @type node_groups: a dict of strings
975
  @param node_groups: node _names_ mapped to their group uuids (it's enough to
976
      have only those nodes that are in `what["nodelist"]`)
977
  @type groups_cfg: a dict of dict of strings
978
  @param groups_cfg: a dictionary mapping group uuids to their configuration
979
  @rtype: dict
980
  @return: a dictionary with the same keys as the input dict, and
981
      values representing the result of the checks
982

983
  """
984
  result = {}
985
  my_name = netutils.Hostname.GetSysName()
986
  port = netutils.GetDaemonPort(constants.NODED)
987
  vm_capable = my_name not in what.get(constants.NV_VMNODES, [])
988

    
989
  _VerifyHypervisors(what, vm_capable, result, all_hvparams)
990
  _VerifyHvparams(what, vm_capable, result)
991

    
992
  if constants.NV_FILELIST in what:
993
    fingerprints = utils.FingerprintFiles(map(vcluster.LocalizeVirtualPath,
994
                                              what[constants.NV_FILELIST]))
995
    result[constants.NV_FILELIST] = \
996
      dict((vcluster.MakeVirtualPath(key), value)
997
           for (key, value) in fingerprints.items())
998

    
999
  if constants.NV_CLIENT_CERT in what:
1000
    result[constants.NV_CLIENT_CERT] = _VerifyClientCertificate()
1001

    
1002
  if constants.NV_NODELIST in what:
1003
    (nodes, bynode) = what[constants.NV_NODELIST]
1004

    
1005
    # Add nodes from other groups (different for each node)
1006
    try:
1007
      nodes.extend(bynode[my_name])
1008
    except KeyError:
1009
      pass
1010

    
1011
    # Use a random order
1012
    random.shuffle(nodes)
1013

    
1014
    # Try to contact all nodes
1015
    val = {}
1016
    for node in nodes:
1017
      params = groups_cfg.get(node_groups.get(node))
1018
      ssh_port = params["ndparams"].get(constants.ND_SSH_PORT)
1019
      logging.debug("Ssh port %s (None = default) for node %s",
1020
                    str(ssh_port), node)
1021
      success, message = _GetSshRunner(cluster_name). \
1022
                            VerifyNodeHostname(node, ssh_port)
1023
      if not success:
1024
        val[node] = message
1025

    
1026
    result[constants.NV_NODELIST] = val
1027

    
1028
  if constants.NV_NODENETTEST in what:
1029
    result[constants.NV_NODENETTEST] = tmp = {}
1030
    my_pip = my_sip = None
1031
    for name, pip, sip in what[constants.NV_NODENETTEST]:
1032
      if name == my_name:
1033
        my_pip = pip
1034
        my_sip = sip
1035
        break
1036
    if not my_pip:
1037
      tmp[my_name] = ("Can't find my own primary/secondary IP"
1038
                      " in the node list")
1039
    else:
1040
      for name, pip, sip in what[constants.NV_NODENETTEST]:
1041
        fail = []
1042
        if not netutils.TcpPing(pip, port, source=my_pip):
1043
          fail.append("primary")
1044
        if sip != pip:
1045
          if not netutils.TcpPing(sip, port, source=my_sip):
1046
            fail.append("secondary")
1047
        if fail:
1048
          tmp[name] = ("failure using the %s interface(s)" %
1049
                       " and ".join(fail))
1050

    
1051
  if constants.NV_MASTERIP in what:
1052
    # FIXME: add checks on incoming data structures (here and in the
1053
    # rest of the function)
1054
    master_name, master_ip = what[constants.NV_MASTERIP]
1055
    if master_name == my_name:
1056
      source = constants.IP4_ADDRESS_LOCALHOST
1057
    else:
1058
      source = None
1059
    result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port,
1060
                                                     source=source)
1061

    
1062
  if constants.NV_USERSCRIPTS in what:
1063
    result[constants.NV_USERSCRIPTS] = \
1064
      [script for script in what[constants.NV_USERSCRIPTS]
1065
       if not utils.IsExecutable(script)]
1066

    
1067
  if constants.NV_OOB_PATHS in what:
1068
    result[constants.NV_OOB_PATHS] = tmp = []
1069
    for path in what[constants.NV_OOB_PATHS]:
1070
      try:
1071
        st = os.stat(path)
1072
      except OSError, err:
1073
        tmp.append("error stating out of band helper: %s" % err)
1074
      else:
1075
        if stat.S_ISREG(st.st_mode):
1076
          if stat.S_IMODE(st.st_mode) & stat.S_IXUSR:
1077
            tmp.append(None)
1078
          else:
1079
            tmp.append("out of band helper %s is not executable" % path)
1080
        else:
1081
          tmp.append("out of band helper %s is not a file" % path)
1082

    
1083
  if constants.NV_LVLIST in what and vm_capable:
1084
    try:
1085
      val = GetVolumeList(utils.ListVolumeGroups().keys())
1086
    except RPCFail, err:
1087
      val = str(err)
1088
    result[constants.NV_LVLIST] = val
1089

    
1090
  _VerifyInstanceList(what, vm_capable, result, all_hvparams)
1091

    
1092
  if constants.NV_VGLIST in what and vm_capable:
1093
    result[constants.NV_VGLIST] = utils.ListVolumeGroups()
1094

    
1095
  if constants.NV_PVLIST in what and vm_capable:
1096
    check_exclusive_pvs = constants.NV_EXCLUSIVEPVS in what
1097
    val = bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST],
1098
                                       filter_allocatable=False,
1099
                                       include_lvs=check_exclusive_pvs)
1100
    if check_exclusive_pvs:
1101
      result[constants.NV_EXCLUSIVEPVS] = _CheckExclusivePvs(val)
1102
      for pvi in val:
1103
        # Avoid sending useless data on the wire
1104
        pvi.lv_list = []
1105
    result[constants.NV_PVLIST] = map(objects.LvmPvInfo.ToDict, val)
1106

    
1107
  if constants.NV_VERSION in what:
1108
    result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION,
1109
                                    constants.RELEASE_VERSION)
1110

    
1111
  _VerifyNodeInfo(what, vm_capable, result, all_hvparams)
1112

    
1113
  if constants.NV_DRBDVERSION in what and vm_capable:
1114
    try:
1115
      drbd_version = DRBD8.GetProcInfo().GetVersionString()
1116
    except errors.BlockDeviceError, err:
1117
      logging.warning("Can't get DRBD version", exc_info=True)
1118
      drbd_version = str(err)
1119
    result[constants.NV_DRBDVERSION] = drbd_version
1120

    
1121
  if constants.NV_DRBDLIST in what and vm_capable:
1122
    try:
1123
      used_minors = drbd.DRBD8.GetUsedDevs()
1124
    except errors.BlockDeviceError, err:
1125
      logging.warning("Can't get used minors list", exc_info=True)
1126
      used_minors = str(err)
1127
    result[constants.NV_DRBDLIST] = used_minors
1128

    
1129
  if constants.NV_DRBDHELPER in what and vm_capable:
1130
    status = True
1131
    try:
1132
      payload = drbd.DRBD8.GetUsermodeHelper()
1133
    except errors.BlockDeviceError, err:
1134
      logging.error("Can't get DRBD usermode helper: %s", str(err))
1135
      status = False
1136
      payload = str(err)
1137
    result[constants.NV_DRBDHELPER] = (status, payload)
1138

    
1139
  if constants.NV_NODESETUP in what:
1140
    result[constants.NV_NODESETUP] = tmpr = []
1141
    if not os.path.isdir("/sys/block") or not os.path.isdir("/sys/class/net"):
1142
      tmpr.append("The sysfs filesytem doesn't seem to be mounted"
1143
                  " under /sys, missing required directories /sys/block"
1144
                  " and /sys/class/net")
1145
    if (not os.path.isdir("/proc/sys") or
1146
        not os.path.isfile("/proc/sysrq-trigger")):
1147
      tmpr.append("The procfs filesystem doesn't seem to be mounted"
1148
                  " under /proc, missing required directory /proc/sys and"
1149
                  " the file /proc/sysrq-trigger")
1150

    
1151
  if constants.NV_TIME in what:
1152
    result[constants.NV_TIME] = utils.SplitTime(time.time())
1153

    
1154
  if constants.NV_OSLIST in what and vm_capable:
1155
    result[constants.NV_OSLIST] = DiagnoseOS()
1156

    
1157
  if constants.NV_BRIDGES in what and vm_capable:
1158
    result[constants.NV_BRIDGES] = [bridge
1159
                                    for bridge in what[constants.NV_BRIDGES]
1160
                                    if not utils.BridgeExists(bridge)]
1161

    
1162
  if what.get(constants.NV_ACCEPTED_STORAGE_PATHS) == my_name:
1163
    result[constants.NV_ACCEPTED_STORAGE_PATHS] = \
1164
        filestorage.ComputeWrongFileStoragePaths()
1165

    
1166
  if what.get(constants.NV_FILE_STORAGE_PATH):
1167
    pathresult = filestorage.CheckFileStoragePath(
1168
        what[constants.NV_FILE_STORAGE_PATH])
1169
    if pathresult:
1170
      result[constants.NV_FILE_STORAGE_PATH] = pathresult
1171

    
1172
  if what.get(constants.NV_SHARED_FILE_STORAGE_PATH):
1173
    pathresult = filestorage.CheckFileStoragePath(
1174
        what[constants.NV_SHARED_FILE_STORAGE_PATH])
1175
    if pathresult:
1176
      result[constants.NV_SHARED_FILE_STORAGE_PATH] = pathresult
1177

    
1178
  return result
1179

    
1180

    
1181
def GetCryptoTokens(token_requests):
1182
  """Perform actions on the node's cryptographic tokens.
1183

1184
  Token types can be 'ssl' or 'ssh'. So far only some actions are implemented
1185
  for 'ssl'. Action 'get' returns the digest of the public client ssl
1186
  certificate. Action 'create' creates a new client certificate and private key
1187
  and also returns the digest of the certificate. The third parameter of a
1188
  token request are optional parameters for the actions, so far only the
1189
  filename is supported.
1190

1191
  @type token_requests: list of tuples of (string, string, dict), where the
1192
    first string is in constants.CRYPTO_TYPES, the second in
1193
    constants.CRYPTO_ACTIONS. The third parameter is a dictionary of string
1194
    to string.
1195
  @param token_requests: list of requests of cryptographic tokens and actions
1196
    to perform on them. The actions come with a dictionary of options.
1197
  @rtype: list of tuples (string, string)
1198
  @return: list of tuples of the token type and the public crypto token
1199

1200
  """
1201
  getents = runtime.GetEnts()
1202
  _VALID_CERT_FILES = [pathutils.NODED_CERT_FILE,
1203
                       pathutils.NODED_CLIENT_CERT_FILE,
1204
                       pathutils.NODED_CLIENT_CERT_FILE_TMP]
1205
  _DEFAULT_CERT_FILE = pathutils.NODED_CLIENT_CERT_FILE
1206
  tokens = []
1207
  for (token_type, action, options) in token_requests:
1208
    if token_type not in constants.CRYPTO_TYPES:
1209
      raise errors.ProgrammerError("Token type '%s' not supported." %
1210
                                   token_type)
1211
    if action not in constants.CRYPTO_ACTIONS:
1212
      raise errors.ProgrammerError("Action '%s' is not supported." %
1213
                                   action)
1214
    if token_type == constants.CRYPTO_TYPE_SSL_DIGEST:
1215
      if action == constants.CRYPTO_ACTION_CREATE:
1216

    
1217
        # extract file name from options
1218
        cert_filename = None
1219
        if options:
1220
          cert_filename = options.get(constants.CRYPTO_OPTION_CERT_FILE)
1221
        if not cert_filename:
1222
          cert_filename = _DEFAULT_CERT_FILE
1223
        # For security reason, we don't allow arbitrary filenames
1224
        if not cert_filename in _VALID_CERT_FILES:
1225
          raise errors.ProgrammerError(
1226
            "The certificate file name path '%s' is not allowed." %
1227
            cert_filename)
1228

    
1229
        # extract serial number from options
1230
        serial_no = None
1231
        if options:
1232
          try:
1233
            serial_no = int(options[constants.CRYPTO_OPTION_SERIAL_NO])
1234
          except ValueError:
1235
            raise errors.ProgrammerError(
1236
              "The given serial number is not an intenger: %s." %
1237
              options.get(constants.CRYPTO_OPTION_SERIAL_NO))
1238
          except KeyError:
1239
            raise errors.ProgrammerError("No serial number was provided.")
1240

    
1241
        if not serial_no:
1242
          raise errors.ProgrammerError(
1243
            "Cannot create an SSL certificate without a serial no.")
1244

    
1245
        utils.GenerateNewSslCert(
1246
          True, cert_filename, serial_no,
1247
          "Create new client SSL certificate in %s." % cert_filename,
1248
          uid=getents.masterd_uid, gid=getents.masterd_gid)
1249
        tokens.append((token_type,
1250
                       utils.GetCertificateDigest(
1251
                         cert_filename=cert_filename)))
1252
      elif action == constants.CRYPTO_ACTION_GET:
1253
        tokens.append((token_type,
1254
                       utils.GetCertificateDigest()))
1255
  return tokens
1256

    
1257

    
1258
def GetBlockDevSizes(devices):
1259
  """Return the size of the given block devices
1260

1261
  @type devices: list
1262
  @param devices: list of block device nodes to query
1263
  @rtype: dict
1264
  @return:
1265
    dictionary of all block devices under /dev (key). The value is their
1266
    size in MiB.
1267

1268
    {'/dev/disk/by-uuid/123456-12321231-312312-312': 124}
1269

1270
  """
1271
  DEV_PREFIX = "/dev/"
1272
  blockdevs = {}
1273

    
1274
  for devpath in devices:
1275
    if not utils.IsBelowDir(DEV_PREFIX, devpath):
1276
      continue
1277

    
1278
    try:
1279
      st = os.stat(devpath)
1280
    except EnvironmentError, err:
1281
      logging.warning("Error stat()'ing device %s: %s", devpath, str(err))
1282
      continue
1283

    
1284
    if stat.S_ISBLK(st.st_mode):
1285
      result = utils.RunCmd(["blockdev", "--getsize64", devpath])
1286
      if result.failed:
1287
        # We don't want to fail, just do not list this device as available
1288
        logging.warning("Cannot get size for block device %s", devpath)
1289
        continue
1290

    
1291
      size = int(result.stdout) / (1024 * 1024)
1292
      blockdevs[devpath] = size
1293
  return blockdevs
1294

    
1295

    
1296
def GetVolumeList(vg_names):
1297
  """Compute list of logical volumes and their size.
1298

1299
  @type vg_names: list
1300
  @param vg_names: the volume groups whose LVs we should list, or
1301
      empty for all volume groups
1302
  @rtype: dict
1303
  @return:
1304
      dictionary of all partions (key) with value being a tuple of
1305
      their size (in MiB), inactive and online status::
1306

1307
        {'xenvg/test1': ('20.06', True, True)}
1308

1309
      in case of errors, a string is returned with the error
1310
      details.
1311

1312
  """
1313
  lvs = {}
1314
  sep = "|"
1315
  if not vg_names:
1316
    vg_names = []
1317
  result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
1318
                         "--separator=%s" % sep,
1319
                         "-ovg_name,lv_name,lv_size,lv_attr"] + vg_names)
1320
  if result.failed:
1321
    _Fail("Failed to list logical volumes, lvs output: %s", result.output)
1322

    
1323
  for line in result.stdout.splitlines():
1324
    line = line.strip()
1325
    match = _LVSLINE_REGEX.match(line)
1326
    if not match:
1327
      logging.error("Invalid line returned from lvs output: '%s'", line)
1328
      continue
1329
    vg_name, name, size, attr = match.groups()
1330
    inactive = attr[4] == "-"
1331
    online = attr[5] == "o"
1332
    virtual = attr[0] == "v"
1333
    if virtual:
1334
      # we don't want to report such volumes as existing, since they
1335
      # don't really hold data
1336
      continue
1337
    lvs[vg_name + "/" + name] = (size, inactive, online)
1338

    
1339
  return lvs
1340

    
1341

    
1342
def ListVolumeGroups():
1343
  """List the volume groups and their size.
1344

1345
  @rtype: dict
1346
  @return: dictionary with keys volume name and values the
1347
      size of the volume
1348

1349
  """
1350
  return utils.ListVolumeGroups()
1351

    
1352

    
1353
def NodeVolumes():
1354
  """List all volumes on this node.
1355

1356
  @rtype: list
1357
  @return:
1358
    A list of dictionaries, each having four keys:
1359
      - name: the logical volume name,
1360
      - size: the size of the logical volume
1361
      - dev: the physical device on which the LV lives
1362
      - vg: the volume group to which it belongs
1363

1364
    In case of errors, we return an empty list and log the
1365
    error.
1366

1367
    Note that since a logical volume can live on multiple physical
1368
    volumes, the resulting list might include a logical volume
1369
    multiple times.
1370

1371
  """
1372
  result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
1373
                         "--separator=|",
1374
                         "--options=lv_name,lv_size,devices,vg_name"])
1375
  if result.failed:
1376
    _Fail("Failed to list logical volumes, lvs output: %s",
1377
          result.output)
1378

    
1379
  def parse_dev(dev):
1380
    return dev.split("(")[0]
1381

    
1382
  def handle_dev(dev):
1383
    return [parse_dev(x) for x in dev.split(",")]
1384

    
1385
  def map_line(line):
1386
    line = [v.strip() for v in line]
1387
    return [{"name": line[0], "size": line[1],
1388
             "dev": dev, "vg": line[3]} for dev in handle_dev(line[2])]
1389

    
1390
  all_devs = []
1391
  for line in result.stdout.splitlines():
1392
    if line.count("|") >= 3:
1393
      all_devs.extend(map_line(line.split("|")))
1394
    else:
1395
      logging.warning("Strange line in the output from lvs: '%s'", line)
1396
  return all_devs
1397

    
1398

    
1399
def BridgesExist(bridges_list):
1400
  """Check if a list of bridges exist on the current node.
1401

1402
  @rtype: boolean
1403
  @return: C{True} if all of them exist, C{False} otherwise
1404

1405
  """
1406
  missing = []
1407
  for bridge in bridges_list:
1408
    if not utils.BridgeExists(bridge):
1409
      missing.append(bridge)
1410

    
1411
  if missing:
1412
    _Fail("Missing bridges %s", utils.CommaJoin(missing))
1413

    
1414

    
1415
def GetInstanceListForHypervisor(hname, hvparams=None,
1416
                                 get_hv_fn=hypervisor.GetHypervisor):
1417
  """Provides a list of instances of the given hypervisor.
1418

1419
  @type hname: string
1420
  @param hname: name of the hypervisor
1421
  @type hvparams: dict of strings
1422
  @param hvparams: hypervisor parameters for the given hypervisor
1423
  @type get_hv_fn: function
1424
  @param get_hv_fn: function that returns a hypervisor for the given hypervisor
1425
    name; optional parameter to increase testability
1426

1427
  @rtype: list
1428
  @return: a list of all running instances on the current node
1429
    - instance1.example.com
1430
    - instance2.example.com
1431

1432
  """
1433
  results = []
1434
  try:
1435
    hv = get_hv_fn(hname)
1436
    names = hv.ListInstances(hvparams=hvparams)
1437
    results.extend(names)
1438
  except errors.HypervisorError, err:
1439
    _Fail("Error enumerating instances (hypervisor %s): %s",
1440
          hname, err, exc=True)
1441
  return results
1442

    
1443

    
1444
def GetInstanceList(hypervisor_list, all_hvparams=None,
1445
                    get_hv_fn=hypervisor.GetHypervisor):
1446
  """Provides a list of instances.
1447

1448
  @type hypervisor_list: list
1449
  @param hypervisor_list: the list of hypervisors to query information
1450
  @type all_hvparams: dict of dict of strings
1451
  @param all_hvparams: a dictionary mapping hypervisor types to respective
1452
    cluster-wide hypervisor parameters
1453
  @type get_hv_fn: function
1454
  @param get_hv_fn: function that returns a hypervisor for the given hypervisor
1455
    name; optional parameter to increase testability
1456

1457
  @rtype: list
1458
  @return: a list of all running instances on the current node
1459
    - instance1.example.com
1460
    - instance2.example.com
1461

1462
  """
1463
  results = []
1464
  for hname in hypervisor_list:
1465
    hvparams = all_hvparams[hname]
1466
    results.extend(GetInstanceListForHypervisor(hname, hvparams=hvparams,
1467
                                                get_hv_fn=get_hv_fn))
1468
  return results
1469

    
1470

    
1471
def GetInstanceInfo(instance, hname, hvparams=None):
1472
  """Gives back the information about an instance as a dictionary.
1473

1474
  @type instance: string
1475
  @param instance: the instance name
1476
  @type hname: string
1477
  @param hname: the hypervisor type of the instance
1478
  @type hvparams: dict of strings
1479
  @param hvparams: the instance's hvparams
1480

1481
  @rtype: dict
1482
  @return: dictionary with the following keys:
1483
      - memory: memory size of instance (int)
1484
      - state: state of instance (HvInstanceState)
1485
      - time: cpu time of instance (float)
1486
      - vcpus: the number of vcpus (int)
1487

1488
  """
1489
  output = {}
1490

    
1491
  iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance,
1492
                                                          hvparams=hvparams)
1493
  if iinfo is not None:
1494
    output["memory"] = iinfo[2]
1495
    output["vcpus"] = iinfo[3]
1496
    output["state"] = iinfo[4]
1497
    output["time"] = iinfo[5]
1498

    
1499
  return output
1500

    
1501

    
1502
def GetInstanceMigratable(instance):
1503
  """Computes whether an instance can be migrated.
1504

1505
  @type instance: L{objects.Instance}
1506
  @param instance: object representing the instance to be checked.
1507

1508
  @rtype: tuple
1509
  @return: tuple of (result, description) where:
1510
      - result: whether the instance can be migrated or not
1511
      - description: a description of the issue, if relevant
1512

1513
  """
1514
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1515
  iname = instance.name
1516
  if iname not in hyper.ListInstances(instance.hvparams):
1517
    _Fail("Instance %s is not running", iname)
1518

    
1519
  for idx in range(len(instance.disks)):
1520
    link_name = _GetBlockDevSymlinkPath(iname, idx)
1521
    if not os.path.islink(link_name):
1522
      logging.warning("Instance %s is missing symlink %s for disk %d",
1523
                      iname, link_name, idx)
1524

    
1525

    
1526
def GetAllInstancesInfo(hypervisor_list, all_hvparams):
1527
  """Gather data about all instances.
1528

1529
  This is the equivalent of L{GetInstanceInfo}, except that it
1530
  computes data for all instances at once, thus being faster if one
1531
  needs data about more than one instance.
1532

1533
  @type hypervisor_list: list
1534
  @param hypervisor_list: list of hypervisors to query for instance data
1535
  @type all_hvparams: dict of dict of strings
1536
  @param all_hvparams: mapping of hypervisor names to hvparams
1537

1538
  @rtype: dict
1539
  @return: dictionary of instance: data, with data having the following keys:
1540
      - memory: memory size of instance (int)
1541
      - state: xen state of instance (string)
1542
      - time: cpu time of instance (float)
1543
      - vcpus: the number of vcpus
1544

1545
  """
1546
  output = {}
1547
  for hname in hypervisor_list:
1548
    hvparams = all_hvparams[hname]
1549
    iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo(hvparams)
1550
    if iinfo:
1551
      for name, _, memory, vcpus, state, times in iinfo:
1552
        value = {
1553
          "memory": memory,
1554
          "vcpus": vcpus,
1555
          "state": state,
1556
          "time": times,
1557
          }
1558
        if name in output:
1559
          # we only check static parameters, like memory and vcpus,
1560
          # and not state and time which can change between the
1561
          # invocations of the different hypervisors
1562
          for key in "memory", "vcpus":
1563
            if value[key] != output[name][key]:
1564
              _Fail("Instance %s is running twice"
1565
                    " with different parameters", name)
1566
        output[name] = value
1567

    
1568
  return output
1569

    
1570

    
1571
def GetInstanceConsoleInfo(instance_param_dict,
1572
                           get_hv_fn=hypervisor.GetHypervisor):
1573
  """Gather data about the console access of a set of instances of this node.
1574

1575
  This function assumes that the caller already knows which instances are on
1576
  this node, by calling a function such as L{GetAllInstancesInfo} or
1577
  L{GetInstanceList}.
1578

1579
  For every instance, a large amount of configuration data needs to be
1580
  provided to the hypervisor interface in order to receive the console
1581
  information. Whether this could or should be cut down can be discussed.
1582
  The information is provided in a dictionary indexed by instance name,
1583
  allowing any number of instance queries to be done.
1584

1585
  @type instance_param_dict: dict of string to tuple of dictionaries, where the
1586
    dictionaries represent: L{objects.Instance}, L{objects.Node},
1587
    L{objects.NodeGroup}, HvParams, BeParams
1588
  @param instance_param_dict: mapping of instance name to parameters necessary
1589
    for console information retrieval
1590

1591
  @rtype: dict
1592
  @return: dictionary of instance: data, with data having the following keys:
1593
      - instance: instance name
1594
      - kind: console kind
1595
      - message: used with kind == CONS_MESSAGE, indicates console to be
1596
                 unavailable, supplies error message
1597
      - host: host to connect to
1598
      - port: port to use
1599
      - user: user for login
1600
      - command: the command, broken into parts as an array
1601
      - display: unknown, potentially unused?
1602

1603
  """
1604

    
1605
  output = {}
1606
  for inst_name in instance_param_dict:
1607
    instance = instance_param_dict[inst_name]["instance"]
1608
    pnode = instance_param_dict[inst_name]["node"]
1609
    group = instance_param_dict[inst_name]["group"]
1610
    hvparams = instance_param_dict[inst_name]["hvParams"]
1611
    beparams = instance_param_dict[inst_name]["beParams"]
1612

    
1613
    instance = objects.Instance.FromDict(instance)
1614
    pnode = objects.Node.FromDict(pnode)
1615
    group = objects.NodeGroup.FromDict(group)
1616

    
1617
    h = get_hv_fn(instance.hypervisor)
1618
    output[inst_name] = h.GetInstanceConsole(instance, pnode, group,
1619
                                             hvparams, beparams).ToDict()
1620

    
1621
  return output
1622

    
1623

    
1624
def _InstanceLogName(kind, os_name, instance, component):
1625
  """Compute the OS log filename for a given instance and operation.
1626

1627
  The instance name and os name are passed in as strings since not all
1628
  operations have these as part of an instance object.
1629

1630
  @type kind: string
1631
  @param kind: the operation type (e.g. add, import, etc.)
1632
  @type os_name: string
1633
  @param os_name: the os name
1634
  @type instance: string
1635
  @param instance: the name of the instance being imported/added/etc.
1636
  @type component: string or None
1637
  @param component: the name of the component of the instance being
1638
      transferred
1639

1640
  """
1641
  # TODO: Use tempfile.mkstemp to create unique filename
1642
  if component:
1643
    assert "/" not in component
1644
    c_msg = "-%s" % component
1645
  else:
1646
    c_msg = ""
1647
  base = ("%s-%s-%s%s-%s.log" %
1648
          (kind, os_name, instance, c_msg, utils.TimestampForFilename()))
1649
  return utils.PathJoin(pathutils.LOG_OS_DIR, base)
1650

    
1651

    
1652
def InstanceOsAdd(instance, reinstall, debug):
1653
  """Add an OS to an instance.
1654

1655
  @type instance: L{objects.Instance}
1656
  @param instance: Instance whose OS is to be installed
1657
  @type reinstall: boolean
1658
  @param reinstall: whether this is an instance reinstall
1659
  @type debug: integer
1660
  @param debug: debug level, passed to the OS scripts
1661
  @rtype: None
1662

1663
  """
1664
  inst_os = OSFromDisk(instance.os)
1665

    
1666
  create_env = OSEnvironment(instance, inst_os, debug)
1667
  if reinstall:
1668
    create_env["INSTANCE_REINSTALL"] = "1"
1669

    
1670
  logfile = _InstanceLogName("add", instance.os, instance.name, None)
1671

    
1672
  result = utils.RunCmd([inst_os.create_script], env=create_env,
1673
                        cwd=inst_os.path, output=logfile, reset_env=True)
1674
  if result.failed:
1675
    logging.error("os create command '%s' returned error: %s, logfile: %s,"
1676
                  " output: %s", result.cmd, result.fail_reason, logfile,
1677
                  result.output)
1678
    lines = [utils.SafeEncode(val)
1679
             for val in utils.TailFile(logfile, lines=20)]
1680
    _Fail("OS create script failed (%s), last lines in the"
1681
          " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1682

    
1683

    
1684
def RunRenameInstance(instance, old_name, debug):
1685
  """Run the OS rename script for an instance.
1686

1687
  @type instance: L{objects.Instance}
1688
  @param instance: Instance whose OS is to be installed
1689
  @type old_name: string
1690
  @param old_name: previous instance name
1691
  @type debug: integer
1692
  @param debug: debug level, passed to the OS scripts
1693
  @rtype: boolean
1694
  @return: the success of the operation
1695

1696
  """
1697
  inst_os = OSFromDisk(instance.os)
1698

    
1699
  rename_env = OSEnvironment(instance, inst_os, debug)
1700
  rename_env["OLD_INSTANCE_NAME"] = old_name
1701

    
1702
  logfile = _InstanceLogName("rename", instance.os,
1703
                             "%s-%s" % (old_name, instance.name), None)
1704

    
1705
  result = utils.RunCmd([inst_os.rename_script], env=rename_env,
1706
                        cwd=inst_os.path, output=logfile, reset_env=True)
1707

    
1708
  if result.failed:
1709
    logging.error("os create command '%s' returned error: %s output: %s",
1710
                  result.cmd, result.fail_reason, result.output)
1711
    lines = [utils.SafeEncode(val)
1712
             for val in utils.TailFile(logfile, lines=20)]
1713
    _Fail("OS rename script failed (%s), last lines in the"
1714
          " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1715

    
1716

    
1717
def _GetBlockDevSymlinkPath(instance_name, idx, _dir=None):
1718
  """Returns symlink path for block device.
1719

1720
  """
1721
  if _dir is None:
1722
    _dir = pathutils.DISK_LINKS_DIR
1723

    
1724
  return utils.PathJoin(_dir,
1725
                        ("%s%s%s" %
1726
                         (instance_name, constants.DISK_SEPARATOR, idx)))
1727

    
1728

    
1729
def _SymlinkBlockDev(instance_name, device_path, idx):
1730
  """Set up symlinks to a instance's block device.
1731

1732
  This is an auxiliary function run when an instance is start (on the primary
1733
  node) or when an instance is migrated (on the target node).
1734

1735

1736
  @param instance_name: the name of the target instance
1737
  @param device_path: path of the physical block device, on the node
1738
  @param idx: the disk index
1739
  @return: absolute path to the disk's symlink
1740

1741
  """
1742
  link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1743
  try:
1744
    os.symlink(device_path, link_name)
1745
  except OSError, err:
1746
    if err.errno == errno.EEXIST:
1747
      if (not os.path.islink(link_name) or
1748
          os.readlink(link_name) != device_path):
1749
        os.remove(link_name)
1750
        os.symlink(device_path, link_name)
1751
    else:
1752
      raise
1753

    
1754
  return link_name
1755

    
1756

    
1757
def _RemoveBlockDevLinks(instance_name, disks):
1758
  """Remove the block device symlinks belonging to the given instance.
1759

1760
  """
1761
  for idx, _ in enumerate(disks):
1762
    link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1763
    if os.path.islink(link_name):
1764
      try:
1765
        os.remove(link_name)
1766
      except OSError:
1767
        logging.exception("Can't remove symlink '%s'", link_name)
1768

    
1769

    
1770
def _CalculateDeviceURI(instance, disk, device):
1771
  """Get the URI for the device.
1772

1773
  @type instance: L{objects.Instance}
1774
  @param instance: the instance which disk belongs to
1775
  @type disk: L{objects.Disk}
1776
  @param disk: the target disk object
1777
  @type device: L{bdev.BlockDev}
1778
  @param device: the corresponding BlockDevice
1779
  @rtype: string
1780
  @return: the device uri if any else None
1781

1782
  """
1783
  access_mode = disk.params.get(constants.LDP_ACCESS,
1784
                                constants.DISK_KERNELSPACE)
1785
  if access_mode == constants.DISK_USERSPACE:
1786
    # This can raise errors.BlockDeviceError
1787
    return device.GetUserspaceAccessUri(instance.hypervisor)
1788
  else:
1789
    return None
1790

    
1791

    
1792
def _GatherAndLinkBlockDevs(instance):
1793
  """Set up an instance's block device(s).
1794

1795
  This is run on the primary node at instance startup. The block
1796
  devices must be already assembled.
1797

1798
  @type instance: L{objects.Instance}
1799
  @param instance: the instance whose disks we should assemble
1800
  @rtype: list
1801
  @return: list of (disk_object, link_name, drive_uri)
1802

1803
  """
1804
  block_devices = []
1805
  for idx, disk in enumerate(instance.disks):
1806
    device = _RecursiveFindBD(disk)
1807
    if device is None:
1808
      raise errors.BlockDeviceError("Block device '%s' is not set up." %
1809
                                    str(disk))
1810
    device.Open()
1811
    try:
1812
      link_name = _SymlinkBlockDev(instance.name, device.dev_path, idx)
1813
    except OSError, e:
1814
      raise errors.BlockDeviceError("Cannot create block device symlink: %s" %
1815
                                    e.strerror)
1816
    uri = _CalculateDeviceURI(instance, disk, device)
1817

    
1818
    block_devices.append((disk, link_name, uri))
1819

    
1820
  return block_devices
1821

    
1822

    
1823
def StartInstance(instance, startup_paused, reason, store_reason=True):
1824
  """Start an instance.
1825

1826
  @type instance: L{objects.Instance}
1827
  @param instance: the instance object
1828
  @type startup_paused: bool
1829
  @param instance: pause instance at startup?
1830
  @type reason: list of reasons
1831
  @param reason: the reason trail for this startup
1832
  @type store_reason: boolean
1833
  @param store_reason: whether to store the shutdown reason trail on file
1834
  @rtype: None
1835

1836
  """
1837
  running_instances = GetInstanceListForHypervisor(instance.hypervisor,
1838
                                                   instance.hvparams)
1839

    
1840
  if instance.name in running_instances:
1841
    logging.info("Instance %s already running, not starting", instance.name)
1842
    return
1843

    
1844
  try:
1845
    block_devices = _GatherAndLinkBlockDevs(instance)
1846
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
1847
    hyper.StartInstance(instance, block_devices, startup_paused)
1848
    if store_reason:
1849
      _StoreInstReasonTrail(instance.name, reason)
1850
  except errors.BlockDeviceError, err:
1851
    _Fail("Block device error: %s", err, exc=True)
1852
  except errors.HypervisorError, err:
1853
    _RemoveBlockDevLinks(instance.name, instance.disks)
1854
    _Fail("Hypervisor error: %s", err, exc=True)
1855

    
1856

    
1857
def InstanceShutdown(instance, timeout, reason, store_reason=True):
1858
  """Shut an instance down.
1859

1860
  @note: this functions uses polling with a hardcoded timeout.
1861

1862
  @type instance: L{objects.Instance}
1863
  @param instance: the instance object
1864
  @type timeout: integer
1865
  @param timeout: maximum timeout for soft shutdown
1866
  @type reason: list of reasons
1867
  @param reason: the reason trail for this shutdown
1868
  @type store_reason: boolean
1869
  @param store_reason: whether to store the shutdown reason trail on file
1870
  @rtype: None
1871

1872
  """
1873
  hv_name = instance.hypervisor
1874
  hyper = hypervisor.GetHypervisor(hv_name)
1875
  iname = instance.name
1876

    
1877
  if instance.name not in hyper.ListInstances(instance.hvparams):
1878
    logging.info("Instance %s not running, doing nothing", iname)
1879
    return
1880

    
1881
  class _TryShutdown:
1882
    def __init__(self):
1883
      self.tried_once = False
1884

    
1885
    def __call__(self):
1886
      if iname not in hyper.ListInstances(instance.hvparams):
1887
        return
1888

    
1889
      try:
1890
        hyper.StopInstance(instance, retry=self.tried_once, timeout=timeout)
1891
        if store_reason:
1892
          _StoreInstReasonTrail(instance.name, reason)
1893
      except errors.HypervisorError, err:
1894
        if iname not in hyper.ListInstances(instance.hvparams):
1895
          # if the instance is no longer existing, consider this a
1896
          # success and go to cleanup
1897
          return
1898

    
1899
        _Fail("Failed to stop instance %s: %s", iname, err)
1900

    
1901
      self.tried_once = True
1902

    
1903
      raise utils.RetryAgain()
1904

    
1905
  try:
1906
    utils.Retry(_TryShutdown(), 5, timeout)
1907
  except utils.RetryTimeout:
1908
    # the shutdown did not succeed
1909
    logging.error("Shutdown of '%s' unsuccessful, forcing", iname)
1910

    
1911
    try:
1912
      hyper.StopInstance(instance, force=True)
1913
    except errors.HypervisorError, err:
1914
      if iname in hyper.ListInstances(instance.hvparams):
1915
        # only raise an error if the instance still exists, otherwise
1916
        # the error could simply be "instance ... unknown"!
1917
        _Fail("Failed to force stop instance %s: %s", iname, err)
1918

    
1919
    time.sleep(1)
1920

    
1921
    if iname in hyper.ListInstances(instance.hvparams):
1922
      _Fail("Could not shutdown instance %s even by destroy", iname)
1923

    
1924
  try:
1925
    hyper.CleanupInstance(instance.name)
1926
  except errors.HypervisorError, err:
1927
    logging.warning("Failed to execute post-shutdown cleanup step: %s", err)
1928

    
1929
  _RemoveBlockDevLinks(iname, instance.disks)
1930

    
1931

    
1932
def InstanceReboot(instance, reboot_type, shutdown_timeout, reason):
1933
  """Reboot an instance.
1934

1935
  @type instance: L{objects.Instance}
1936
  @param instance: the instance object to reboot
1937
  @type reboot_type: str
1938
  @param reboot_type: the type of reboot, one the following
1939
    constants:
1940
      - L{constants.INSTANCE_REBOOT_SOFT}: only reboot the
1941
        instance OS, do not recreate the VM
1942
      - L{constants.INSTANCE_REBOOT_HARD}: tear down and
1943
        restart the VM (at the hypervisor level)
1944
      - the other reboot type (L{constants.INSTANCE_REBOOT_FULL}) is
1945
        not accepted here, since that mode is handled differently, in
1946
        cmdlib, and translates into full stop and start of the
1947
        instance (instead of a call_instance_reboot RPC)
1948
  @type shutdown_timeout: integer
1949
  @param shutdown_timeout: maximum timeout for soft shutdown
1950
  @type reason: list of reasons
1951
  @param reason: the reason trail for this reboot
1952
  @rtype: None
1953

1954
  """
1955
  running_instances = GetInstanceListForHypervisor(instance.hypervisor,
1956
                                                   instance.hvparams)
1957

    
1958
  if instance.name not in running_instances:
1959
    _Fail("Cannot reboot instance %s that is not running", instance.name)
1960

    
1961
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1962
  if reboot_type == constants.INSTANCE_REBOOT_SOFT:
1963
    try:
1964
      hyper.RebootInstance(instance)
1965
    except errors.HypervisorError, err:
1966
      _Fail("Failed to soft reboot instance %s: %s", instance.name, err)
1967
  elif reboot_type == constants.INSTANCE_REBOOT_HARD:
1968
    try:
1969
      InstanceShutdown(instance, shutdown_timeout, reason, store_reason=False)
1970
      result = StartInstance(instance, False, reason, store_reason=False)
1971
      _StoreInstReasonTrail(instance.name, reason)
1972
      return result
1973
    except errors.HypervisorError, err:
1974
      _Fail("Failed to hard reboot instance %s: %s", instance.name, err)
1975
  else:
1976
    _Fail("Invalid reboot_type received: %s", reboot_type)
1977

    
1978

    
1979
def InstanceBalloonMemory(instance, memory):
1980
  """Resize an instance's memory.
1981

1982
  @type instance: L{objects.Instance}
1983
  @param instance: the instance object
1984
  @type memory: int
1985
  @param memory: new memory amount in MB
1986
  @rtype: None
1987

1988
  """
1989
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1990
  running = hyper.ListInstances(instance.hvparams)
1991
  if instance.name not in running:
1992
    logging.info("Instance %s is not running, cannot balloon", instance.name)
1993
    return
1994
  try:
1995
    hyper.BalloonInstanceMemory(instance, memory)
1996
  except errors.HypervisorError, err:
1997
    _Fail("Failed to balloon instance memory: %s", err, exc=True)
1998

    
1999

    
2000
def MigrationInfo(instance):
2001
  """Gather information about an instance to be migrated.
2002

2003
  @type instance: L{objects.Instance}
2004
  @param instance: the instance definition
2005

2006
  """
2007
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
2008
  try:
2009
    info = hyper.MigrationInfo(instance)
2010
  except errors.HypervisorError, err:
2011
    _Fail("Failed to fetch migration information: %s", err, exc=True)
2012
  return info
2013

    
2014

    
2015
def AcceptInstance(instance, info, target):
2016
  """Prepare the node to accept an instance.
2017

2018
  @type instance: L{objects.Instance}
2019
  @param instance: the instance definition
2020
  @type info: string/data (opaque)
2021
  @param info: migration information, from the source node
2022
  @type target: string
2023
  @param target: target host (usually ip), on this node
2024

2025
  """
2026
  # TODO: why is this required only for DTS_EXT_MIRROR?
2027
  if instance.disk_template in constants.DTS_EXT_MIRROR:
2028
    # Create the symlinks, as the disks are not active
2029
    # in any way
2030
    try:
2031
      _GatherAndLinkBlockDevs(instance)
2032
    except errors.BlockDeviceError, err:
2033
      _Fail("Block device error: %s", err, exc=True)
2034

    
2035
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
2036
  try:
2037
    hyper.AcceptInstance(instance, info, target)
2038
  except errors.HypervisorError, err:
2039
    if instance.disk_template in constants.DTS_EXT_MIRROR:
2040
      _RemoveBlockDevLinks(instance.name, instance.disks)
2041
    _Fail("Failed to accept instance: %s", err, exc=True)
2042

    
2043

    
2044
def FinalizeMigrationDst(instance, info, success):
2045
  """Finalize any preparation to accept an instance.
2046

2047
  @type instance: L{objects.Instance}
2048
  @param instance: the instance definition
2049
  @type info: string/data (opaque)
2050
  @param info: migration information, from the source node
2051
  @type success: boolean
2052
  @param success: whether the migration was a success or a failure
2053

2054
  """
2055
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
2056
  try:
2057
    hyper.FinalizeMigrationDst(instance, info, success)
2058
  except errors.HypervisorError, err:
2059
    _Fail("Failed to finalize migration on the target node: %s", err, exc=True)
2060

    
2061

    
2062
def MigrateInstance(cluster_name, instance, target, live):
2063
  """Migrates an instance to another node.
2064

2065
  @type cluster_name: string
2066
  @param cluster_name: name of the cluster
2067
  @type instance: L{objects.Instance}
2068
  @param instance: the instance definition
2069
  @type target: string
2070
  @param target: the target node name
2071
  @type live: boolean
2072
  @param live: whether the migration should be done live or not (the
2073
      interpretation of this parameter is left to the hypervisor)
2074
  @raise RPCFail: if migration fails for some reason
2075

2076
  """
2077
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
2078

    
2079
  try:
2080
    hyper.MigrateInstance(cluster_name, instance, target, live)
2081
  except errors.HypervisorError, err:
2082
    _Fail("Failed to migrate instance: %s", err, exc=True)
2083

    
2084

    
2085
def FinalizeMigrationSource(instance, success, live):
2086
  """Finalize the instance migration on the source node.
2087

2088
  @type instance: L{objects.Instance}
2089
  @param instance: the instance definition of the migrated instance
2090
  @type success: bool
2091
  @param success: whether the migration succeeded or not
2092
  @type live: bool
2093
  @param live: whether the user requested a live migration or not
2094
  @raise RPCFail: If the execution fails for some reason
2095

2096
  """
2097
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
2098

    
2099
  try:
2100
    hyper.FinalizeMigrationSource(instance, success, live)
2101
  except Exception, err:  # pylint: disable=W0703
2102
    _Fail("Failed to finalize the migration on the source node: %s", err,
2103
          exc=True)
2104

    
2105

    
2106
def GetMigrationStatus(instance):
2107
  """Get the migration status
2108

2109
  @type instance: L{objects.Instance}
2110
  @param instance: the instance that is being migrated
2111
  @rtype: L{objects.MigrationStatus}
2112
  @return: the status of the current migration (one of
2113
           L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
2114
           progress info that can be retrieved from the hypervisor
2115
  @raise RPCFail: If the migration status cannot be retrieved
2116

2117
  """
2118
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
2119
  try:
2120
    return hyper.GetMigrationStatus(instance)
2121
  except Exception, err:  # pylint: disable=W0703
2122
    _Fail("Failed to get migration status: %s", err, exc=True)
2123

    
2124

    
2125
def HotplugDevice(instance, action, dev_type, device, extra, seq):
2126
  """Hotplug a device
2127

2128
  Hotplug is currently supported only for KVM Hypervisor.
2129
  @type instance: L{objects.Instance}
2130
  @param instance: the instance to which we hotplug a device
2131
  @type action: string
2132
  @param action: the hotplug action to perform
2133
  @type dev_type: string
2134
  @param dev_type: the device type to hotplug
2135
  @type device: either L{objects.NIC} or L{objects.Disk}
2136
  @param device: the device object to hotplug
2137
  @type extra: string
2138
  @param extra: extra info used by hotplug code (e.g. disk link)
2139
  @type seq: int
2140
  @param seq: the index of the device from master perspective
2141
  @raise RPCFail: in case instance does not have KVM hypervisor
2142

2143
  """
2144
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
2145
  try:
2146
    hyper.VerifyHotplugSupport(instance, action, dev_type)
2147
  except errors.HotplugError, err:
2148
    _Fail("Hotplug is not supported: %s", err)
2149

    
2150
  if action == constants.HOTPLUG_ACTION_ADD:
2151
    fn = hyper.HotAddDevice
2152
  elif action == constants.HOTPLUG_ACTION_REMOVE:
2153
    fn = hyper.HotDelDevice
2154
  elif action == constants.HOTPLUG_ACTION_MODIFY:
2155
    fn = hyper.HotModDevice
2156
  else:
2157
    assert action in constants.HOTPLUG_ALL_ACTIONS
2158

    
2159
  return fn(instance, dev_type, device, extra, seq)
2160

    
2161

    
2162
def HotplugSupported(instance):
2163
  """Checks if hotplug is generally supported.
2164

2165
  """
2166
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
2167
  try:
2168
    hyper.HotplugSupported(instance)
2169
  except errors.HotplugError, err:
2170
    _Fail("Hotplug is not supported: %s", err)
2171

    
2172

    
2173
def BlockdevCreate(disk, size, owner, on_primary, info, excl_stor):
2174
  """Creates a block device for an instance.
2175

2176
  @type disk: L{objects.Disk}
2177
  @param disk: the object describing the disk we should create
2178
  @type size: int
2179
  @param size: the size of the physical underlying device, in MiB
2180
  @type owner: str
2181
  @param owner: the name of the instance for which disk is created,
2182
      used for device cache data
2183
  @type on_primary: boolean
2184
  @param on_primary:  indicates if it is the primary node or not
2185
  @type info: string
2186
  @param info: string that will be sent to the physical device
2187
      creation, used for example to set (LVM) tags on LVs
2188
  @type excl_stor: boolean
2189
  @param excl_stor: Whether exclusive_storage is active
2190

2191
  @return: the new unique_id of the device (this can sometime be
2192
      computed only after creation), or None. On secondary nodes,
2193
      it's not required to return anything.
2194

2195
  """
2196
  # TODO: remove the obsolete "size" argument
2197
  # pylint: disable=W0613
2198
  clist = []
2199
  if disk.children:
2200
    for child in disk.children:
2201
      try:
2202
        crdev = _RecursiveAssembleBD(child, owner, on_primary)
2203
      except errors.BlockDeviceError, err:
2204
        _Fail("Can't assemble device %s: %s", child, err)
2205
      if on_primary or disk.AssembleOnSecondary():
2206
        # we need the children open in case the device itself has to
2207
        # be assembled
2208
        try:
2209
          # pylint: disable=E1103
2210
          crdev.Open()
2211
        except errors.BlockDeviceError, err:
2212
          _Fail("Can't make child '%s' read-write: %s", child, err)
2213
      clist.append(crdev)
2214

    
2215
  try:
2216
    device = bdev.Create(disk, clist, excl_stor)
2217
  except errors.BlockDeviceError, err:
2218
    _Fail("Can't create block device: %s", err)
2219

    
2220
  if on_primary or disk.AssembleOnSecondary():
2221
    try:
2222
      device.Assemble()
2223
    except errors.BlockDeviceError, err:
2224
      _Fail("Can't assemble device after creation, unusual event: %s", err)
2225
    if on_primary or disk.OpenOnSecondary():
2226
      try:
2227
        device.Open(force=True)
2228
      except errors.BlockDeviceError, err:
2229
        _Fail("Can't make device r/w after creation, unusual event: %s", err)
2230
    DevCacheManager.UpdateCache(device.dev_path, owner,
2231
                                on_primary, disk.iv_name)
2232

    
2233
  device.SetInfo(info)
2234

    
2235
  return device.unique_id
2236

    
2237

    
2238
def _DumpDevice(source_path, target_path, offset, size):
2239
  """This function images/wipes the device using a local file.
2240

2241
  @type source_path: string
2242
  @param source_path: path of the image or data source (e.g., "/dev/zero")
2243

2244
  @type target_path: string
2245
  @param target_path: path of the device to image/wipe
2246

2247
  @type offset: int
2248
  @param offset: offset in MiB in the output file
2249

2250
  @type size: int
2251
  @param size: maximum size in MiB to write (data source might be smaller)
2252

2253
  @return: None
2254
  @raise RPCFail: in case of failure
2255

2256
  """
2257
  # Internal sizes are always in Mebibytes; if the following "dd" command
2258
  # should use a different block size the offset and size given to this
2259
  # function must be adjusted accordingly before being passed to "dd".
2260
  block_size = 1024 * 1024
2261

    
2262
  cmd = [constants.DD_CMD, "if=%s" % source_path, "seek=%d" % offset,
2263
         "bs=%s" % block_size, "oflag=direct", "of=%s" % target_path,
2264
         "count=%d" % size]
2265
  result = utils.RunCmd(cmd)
2266

    
2267
  if result.failed:
2268
    _Fail("Dump command '%s' exited with error: %s; output: %s", result.cmd,
2269
          result.fail_reason, result.output)
2270

    
2271

    
2272
def _DownloadAndDumpDevice(source_url, target_path, size):
2273
  """This function images a device using a downloaded image file.
2274

2275
  @type source_url: string
2276
  @param source_url: URL of image to dump to disk
2277

2278
  @type target_path: string
2279
  @param target_path: path of the device to image
2280

2281
  @type size: int
2282
  @param size: maximum size in MiB to write (data source might be smaller)
2283

2284
  @rtype: NoneType
2285
  @return: None
2286
  @raise RPCFail: in case of download or write failures
2287

2288
  """
2289
  class DDParams(object):
2290
    def __init__(self, current_size, total_size):
2291
      self.current_size = current_size
2292
      self.total_size = total_size
2293
      self.image_size_error = False
2294

    
2295
  def dd_write(ddparams, out):
2296
    if ddparams.current_size < ddparams.total_size:
2297
      ddparams.current_size += len(out)
2298
      target_file.write(out)
2299
    else:
2300
      ddparams.image_size_error = True
2301
      return -1
2302

    
2303
  target_file = open(target_path, "w")
2304
  ddparams = DDParams(0, 1024 * 1024 * size)
2305

    
2306
  curl = pycurl.Curl()
2307
  curl.setopt(pycurl.VERBOSE, True)
2308
  curl.setopt(pycurl.NOSIGNAL, True)
2309
  curl.setopt(pycurl.USERAGENT, http.HTTP_GANETI_VERSION)
2310
  curl.setopt(pycurl.URL, source_url)
2311
  curl.setopt(pycurl.WRITEFUNCTION, lambda out: dd_write(ddparams, out))
2312

    
2313
  try:
2314
    curl.perform()
2315
  except pycurl.error:
2316
    if ddparams.image_size_error:
2317
      _Fail("Disk image larger than the disk")
2318
    else:
2319
      raise
2320

    
2321
  target_file.close()
2322

    
2323

    
2324
def BlockdevWipe(disk, offset, size):
2325
  """Wipes a block device.
2326

2327
  @type disk: L{objects.Disk}
2328
  @param disk: the disk object we want to wipe
2329
  @type offset: int
2330
  @param offset: The offset in MiB in the file
2331
  @type size: int
2332
  @param size: The size in MiB to write
2333

2334
  """
2335
  try:
2336
    rdev = _RecursiveFindBD(disk)
2337
  except errors.BlockDeviceError:
2338
    rdev = None
2339

    
2340
  if not rdev:
2341
    _Fail("Cannot wipe device %s: device not found", disk.iv_name)
2342
  if offset < 0:
2343
    _Fail("Negative offset")
2344
  if size < 0:
2345
    _Fail("Negative size")
2346
  if offset > rdev.size:
2347
    _Fail("Wipe offset is bigger than device size")
2348
  if (offset + size) > rdev.size:
2349
    _Fail("Wipe offset and size are bigger than device size")
2350

    
2351
  _DumpDevice("/dev/zero", rdev.dev_path, offset, size)
2352

    
2353

    
2354
def BlockdevImage(disk, image, size):
2355
  """Images a block device either by dumping a local file or
2356
  downloading a URL.
2357

2358
  @type disk: L{objects.Disk}
2359
  @param disk: the disk object we want to image
2360

2361
  @type image: string
2362
  @param image: file path to the disk image be dumped
2363

2364
  @type size: int
2365
  @param size: The size in MiB to write
2366

2367
  @rtype: NoneType
2368
  @return: None
2369
  @raise RPCFail: in case of failure
2370

2371
  """
2372
  try:
2373
    rdev = _RecursiveFindBD(disk)
2374
  except errors.BlockDeviceError:
2375
    rdev = None
2376

    
2377
  if not rdev:
2378
    _Fail("Cannot image device %s: device not found", disk.iv_name)
2379
  if size < 0:
2380
    _Fail("Negative size")
2381
  if size > rdev.size:
2382
    _Fail("Image size is bigger than device size")
2383

    
2384
  if utils.IsUrl(image):
2385
    _DownloadAndDumpDevice(image, rdev.dev_path, size)
2386
  else:
2387
    _DumpDevice(image, rdev.dev_path, 0, size)
2388

    
2389

    
2390
def BlockdevPauseResumeSync(disks, pause):
2391
  """Pause or resume the sync of the block device.
2392

2393
  @type disks: list of L{objects.Disk}
2394
  @param disks: the disks object we want to pause/resume
2395
  @type pause: bool
2396
  @param pause: Wheater to pause or resume
2397

2398
  """
2399
  success = []
2400
  for disk in disks:
2401
    try:
2402
      rdev = _RecursiveFindBD(disk)
2403
    except errors.BlockDeviceError:
2404
      rdev = None
2405

    
2406
    if not rdev:
2407
      success.append((False, ("Cannot change sync for device %s:"
2408
                              " device not found" % disk.iv_name)))
2409
      continue
2410

    
2411
    result = rdev.PauseResumeSync(pause)
2412

    
2413
    if result:
2414
      success.append((result, None))
2415
    else:
2416
      if pause:
2417
        msg = "Pause"
2418
      else:
2419
        msg = "Resume"
2420
      success.append((result, "%s for device %s failed" % (msg, disk.iv_name)))
2421

    
2422
  return success
2423

    
2424

    
2425
def BlockdevRemove(disk):
2426
  """Remove a block device.
2427

2428
  @note: This is intended to be called recursively.
2429

2430
  @type disk: L{objects.Disk}
2431
  @param disk: the disk object we should remove
2432
  @rtype: boolean
2433
  @return: the success of the operation
2434

2435
  """
2436
  msgs = []
2437
  try:
2438
    rdev = _RecursiveFindBD(disk)
2439
  except errors.BlockDeviceError, err:
2440
    # probably can't attach
2441
    logging.info("Can't attach to device %s in remove", disk)
2442
    rdev = None
2443
  if rdev is not None:
2444
    r_path = rdev.dev_path
2445

    
2446
    def _TryRemove():
2447
      try:
2448
        rdev.Remove()
2449
        return []
2450
      except errors.BlockDeviceError, err:
2451
        return [str(err)]
2452

    
2453
    msgs.extend(utils.SimpleRetry([], _TryRemove,
2454
                                  constants.DISK_REMOVE_RETRY_INTERVAL,
2455
                                  constants.DISK_REMOVE_RETRY_TIMEOUT))
2456

    
2457
    if not msgs:
2458
      DevCacheManager.RemoveCache(r_path)
2459

    
2460
  if disk.children:
2461
    for child in disk.children:
2462
      try:
2463
        BlockdevRemove(child)
2464
      except RPCFail, err:
2465
        msgs.append(str(err))
2466

    
2467
  if msgs:
2468
    _Fail("; ".join(msgs))
2469

    
2470

    
2471
def _RecursiveAssembleBD(disk, owner, as_primary):
2472
  """Activate a block device for an instance.
2473

2474
  This is run on the primary and secondary nodes for an instance.
2475

2476
  @note: this function is called recursively.
2477

2478
  @type disk: L{objects.Disk}
2479
  @param disk: the disk we try to assemble
2480
  @type owner: str
2481
  @param owner: the name of the instance which owns the disk
2482
  @type as_primary: boolean
2483
  @param as_primary: if we should make the block device
2484
      read/write
2485

2486
  @return: the assembled device or None (in case no device
2487
      was assembled)
2488
  @raise errors.BlockDeviceError: in case there is an error
2489
      during the activation of the children or the device
2490
      itself
2491

2492
  """
2493
  children = []
2494
  if disk.children:
2495
    mcn = disk.ChildrenNeeded()
2496
    if mcn == -1:
2497
      mcn = 0 # max number of Nones allowed
2498
    else:
2499
      mcn = len(disk.children) - mcn # max number of Nones
2500
    for chld_disk in disk.children:
2501
      try:
2502
        cdev = _RecursiveAssembleBD(chld_disk, owner, as_primary)
2503
      except errors.BlockDeviceError, err:
2504
        if children.count(None) >= mcn:
2505
          raise
2506
        cdev = None
2507
        logging.error("Error in child activation (but continuing): %s",
2508
                      str(err))
2509
      children.append(cdev)
2510

    
2511
  if as_primary or disk.AssembleOnSecondary():
2512
    r_dev = bdev.Assemble(disk, children)
2513
    result = r_dev
2514
    if as_primary or disk.OpenOnSecondary():
2515
      r_dev.Open()
2516
    DevCacheManager.UpdateCache(r_dev.dev_path, owner,
2517
                                as_primary, disk.iv_name)
2518

    
2519
  else:
2520
    result = True
2521
  return result
2522

    
2523

    
2524
def BlockdevAssemble(disk, owner, as_primary, idx):
2525
  """Activate a block device for an instance.
2526

2527
  This is a wrapper over _RecursiveAssembleBD.
2528

2529
  @rtype: str or boolean
2530
  @return: a tuple with the C{/dev/...} path and the created symlink
2531
      for primary nodes, and (C{True}, C{True}) for secondary nodes
2532

2533
  """
2534
  try:
2535
    result = _RecursiveAssembleBD(disk, owner, as_primary)
2536
    if isinstance(result, BlockDev):
2537
      # pylint: disable=E1103
2538
      dev_path = result.dev_path
2539
      link_name = None
2540
      if as_primary:
2541
        link_name = _SymlinkBlockDev(owner, dev_path, idx)
2542
    elif result:
2543
      return result, result
2544
    else:
2545
      _Fail("Unexpected result from _RecursiveAssembleBD")
2546
  except errors.BlockDeviceError, err:
2547
    _Fail("Error while assembling disk: %s", err, exc=True)
2548
  except OSError, err:
2549
    _Fail("Error while symlinking disk: %s", err, exc=True)
2550

    
2551
  return dev_path, link_name
2552

    
2553

    
2554
def BlockdevShutdown(disk):
2555
  """Shut down a block device.
2556

2557
  First, if the device is assembled (Attach() is successful), then
2558
  the device is shutdown. Then the children of the device are
2559
  shutdown.
2560

2561
  This function is called recursively. Note that we don't cache the
2562
  children or such, as oppossed to assemble, shutdown of different
2563
  devices doesn't require that the upper device was active.
2564

2565
  @type disk: L{objects.Disk}
2566
  @param disk: the description of the disk we should
2567
      shutdown
2568
  @rtype: None
2569

2570
  """
2571
  msgs = []
2572
  r_dev = _RecursiveFindBD(disk)
2573
  if r_dev is not None:
2574
    r_path = r_dev.dev_path
2575
    try:
2576
      r_dev.Shutdown()
2577
      DevCacheManager.RemoveCache(r_path)
2578
    except errors.BlockDeviceError, err:
2579
      msgs.append(str(err))
2580

    
2581
  if disk.children:
2582
    for child in disk.children:
2583
      try:
2584
        BlockdevShutdown(child)
2585
      except RPCFail, err:
2586
        msgs.append(str(err))
2587

    
2588
  if msgs:
2589
    _Fail("; ".join(msgs))
2590

    
2591

    
2592
def BlockdevAddchildren(parent_cdev, new_cdevs):
2593
  """Extend a mirrored block device.
2594

2595
  @type parent_cdev: L{objects.Disk}
2596
  @param parent_cdev: the disk to which we should add children
2597
  @type new_cdevs: list of L{objects.Disk}
2598
  @param new_cdevs: the list of children which we should add
2599
  @rtype: None
2600

2601
  """
2602
  parent_bdev = _RecursiveFindBD(parent_cdev)
2603
  if parent_bdev is None:
2604
    _Fail("Can't find parent device '%s' in add children", parent_cdev)
2605
  new_bdevs = [_RecursiveFindBD(disk) for disk in new_cdevs]
2606
  if new_bdevs.count(None) > 0:
2607
    _Fail("Can't find new device(s) to add: %s:%s", new_bdevs, new_cdevs)
2608
  parent_bdev.AddChildren(new_bdevs)
2609

    
2610

    
2611
def BlockdevRemovechildren(parent_cdev, new_cdevs):
2612
  """Shrink a mirrored block device.
2613

2614
  @type parent_cdev: L{objects.Disk}
2615
  @param parent_cdev: the disk from which we should remove children
2616
  @type new_cdevs: list of L{objects.Disk}
2617
  @param new_cdevs: the list of children which we should remove
2618
  @rtype: None
2619

2620
  """
2621
  parent_bdev = _RecursiveFindBD(parent_cdev)
2622
  if parent_bdev is None:
2623
    _Fail("Can't find parent device '%s' in remove children", parent_cdev)
2624
  devs = []
2625
  for disk in new_cdevs:
2626
    rpath = disk.StaticDevPath()
2627
    if rpath is None:
2628
      bd = _RecursiveFindBD(disk)
2629
      if bd is None:
2630
        _Fail("Can't find device %s while removing children", disk)
2631
      else:
2632
        devs.append(bd.dev_path)
2633
    else:
2634
      if not utils.IsNormAbsPath(rpath):
2635
        _Fail("Strange path returned from StaticDevPath: '%s'", rpath)
2636
      devs.append(rpath)
2637
  parent_bdev.RemoveChildren(devs)
2638

    
2639

    
2640
def BlockdevGetmirrorstatus(disks):
2641
  """Get the mirroring status of a list of devices.
2642

2643
  @type disks: list of L{objects.Disk}
2644
  @param disks: the list of disks which we should query
2645
  @rtype: disk
2646
  @return: List of L{objects.BlockDevStatus}, one for each disk
2647
  @raise errors.BlockDeviceError: if any of the disks cannot be
2648
      found
2649

2650
  """
2651
  stats = []
2652
  for dsk in disks:
2653
    rbd = _RecursiveFindBD(dsk)
2654
    if rbd is None:
2655
      _Fail("Can't find device %s", dsk)
2656

    
2657
    stats.append(rbd.CombinedSyncStatus())
2658

    
2659
  return stats
2660

    
2661

    
2662
def BlockdevGetmirrorstatusMulti(disks):
2663
  """Get the mirroring status of a list of devices.
2664

2665
  @type disks: list of L{objects.Disk}
2666
  @param disks: the list of disks which we should query
2667
  @rtype: disk
2668
  @return: List of tuples, (bool, status), one for each disk; bool denotes
2669
    success/failure, status is L{objects.BlockDevStatus} on success, string
2670
    otherwise
2671

2672
  """
2673
  result = []
2674
  for disk in disks:
2675
    try:
2676
      rbd = _RecursiveFindBD(disk)
2677
      if rbd is None:
2678
        result.append((False, "Can't find device %s" % disk))
2679
        continue
2680

    
2681
      status = rbd.CombinedSyncStatus()
2682
    except errors.BlockDeviceError, err:
2683
      logging.exception("Error while getting disk status")
2684
      result.append((False, str(err)))
2685
    else:
2686
      result.append((True, status))
2687

    
2688
  assert len(disks) == len(result)
2689

    
2690
  return result
2691

    
2692

    
2693
def _RecursiveFindBD(disk):
2694
  """Check if a device is activated.
2695

2696
  If so, return information about the real device.
2697

2698
  @type disk: L{objects.Disk}
2699
  @param disk: the disk object we need to find
2700

2701
  @return: None if the device can't be found,
2702
      otherwise the device instance
2703

2704
  """
2705
  children = []
2706
  if disk.children:
2707
    for chdisk in disk.children:
2708
      children.append(_RecursiveFindBD(chdisk))
2709

    
2710
  return bdev.FindDevice(disk, children)
2711

    
2712

    
2713
def _OpenRealBD(disk):
2714
  """Opens the underlying block device of a disk.
2715

2716
  @type disk: L{objects.Disk}
2717
  @param disk: the disk object we want to open
2718

2719
  """
2720
  real_disk = _RecursiveFindBD(disk)
2721
  if real_disk is None:
2722
    _Fail("Block device '%s' is not set up", disk)
2723

    
2724
  real_disk.Open()
2725

    
2726
  return real_disk
2727

    
2728

    
2729
def BlockdevFind(disk):
2730
  """Check if a device is activated.
2731

2732
  If it is, return information about the real device.
2733

2734
  @type disk: L{objects.Disk}
2735
  @param disk: the disk to find
2736
  @rtype: None or objects.BlockDevStatus
2737
  @return: None if the disk cannot be found, otherwise a the current
2738
           information
2739

2740
  """
2741
  try:
2742
    rbd = _RecursiveFindBD(disk)
2743
  except errors.BlockDeviceError, err:
2744
    _Fail("Failed to find device: %s", err, exc=True)
2745

    
2746
  if rbd is None:
2747
    return None
2748

    
2749
  return rbd.GetSyncStatus()
2750

    
2751

    
2752
def BlockdevGetdimensions(disks):
2753
  """Computes the size of the given disks.
2754

2755
  If a disk is not found, returns None instead.
2756

2757
  @type disks: list of L{objects.Disk}
2758
  @param disks: the list of disk to compute the size for
2759
  @rtype: list
2760
  @return: list with elements None if the disk cannot be found,
2761
      otherwise the pair (size, spindles), where spindles is None if the
2762
      device doesn't support that
2763

2764
  """
2765
  result = []
2766
  for cf in disks:
2767
    try:
2768
      rbd = _RecursiveFindBD(cf)
2769
    except errors.BlockDeviceError:
2770
      result.append(None)
2771
      continue
2772
    if rbd is None:
2773
      result.append(None)
2774
    else:
2775
      result.append(rbd.GetActualDimensions())
2776
  return result
2777

    
2778

    
2779
def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
2780
  """Write a file to the filesystem.
2781

2782
  This allows the master to overwrite(!) a file. It will only perform
2783
  the operation if the file belongs to a list of configuration files.
2784

2785
  @type file_name: str
2786
  @param file_name: the target file name
2787
  @type data: str
2788
  @param data: the new contents of the file
2789
  @type mode: int
2790
  @param mode: the mode to give the file (can be None)
2791
  @type uid: string
2792
  @param uid: the owner of the file
2793
  @type gid: string
2794
  @param gid: the group of the file
2795
  @type atime: float
2796
  @param atime: the atime to set on the file (can be None)
2797
  @type mtime: float
2798
  @param mtime: the mtime to set on the file (can be None)
2799
  @rtype: None
2800

2801
  """
2802
  file_name = vcluster.LocalizeVirtualPath(file_name)
2803

    
2804
  if not os.path.isabs(file_name):
2805
    _Fail("Filename passed to UploadFile is not absolute: '%s'", file_name)
2806

    
2807
  if file_name not in _ALLOWED_UPLOAD_FILES:
2808
    _Fail("Filename passed to UploadFile not in allowed upload targets: '%s'",
2809
          file_name)
2810

    
2811
  raw_data = _Decompress(data)
2812

    
2813
  if not (isinstance(uid, basestring) and isinstance(gid, basestring)):
2814
    _Fail("Invalid username/groupname type")
2815

    
2816
  getents = runtime.GetEnts()
2817
  uid = getents.LookupUser(uid)
2818
  gid = getents.LookupGroup(gid)
2819

    
2820
  utils.SafeWriteFile(file_name, None,
2821
                      data=raw_data, mode=mode, uid=uid, gid=gid,
2822
                      atime=atime, mtime=mtime)
2823

    
2824

    
2825
def RunOob(oob_program, command, node, timeout):
2826
  """Executes oob_program with given command on given node.
2827

2828
  @param oob_program: The path to the executable oob_program
2829
  @param command: The command to invoke on oob_program
2830
  @param node: The node given as an argument to the program
2831
  @param timeout: Timeout after which we kill the oob program
2832

2833
  @return: stdout
2834
  @raise RPCFail: If execution fails for some reason
2835

2836
  """
2837
  result = utils.RunCmd([oob_program, command, node], timeout=timeout)
2838

    
2839
  if result.failed:
2840
    _Fail("'%s' failed with reason '%s'; output: %s", result.cmd,
2841
          result.fail_reason, result.output)
2842

    
2843
  return result.stdout
2844

    
2845

    
2846
def _OSOndiskAPIVersion(os_dir):
2847
  """Compute and return the API version of a given OS.
2848

2849
  This function will try to read the API version of the OS residing in
2850
  the 'os_dir' directory.
2851

2852
  @type os_dir: str
2853
  @param os_dir: the directory in which we should look for the OS
2854
  @rtype: tuple
2855
  @return: tuple (status, data) with status denoting the validity and
2856
      data holding either the vaid versions or an error message
2857

2858
  """
2859
  api_file = utils.PathJoin(os_dir, constants.OS_API_FILE)
2860

    
2861
  try:
2862
    st = os.stat(api_file)
2863
  except EnvironmentError, err:
2864
    return False, ("Required file '%s' not found under path %s: %s" %
2865
                   (constants.OS_API_FILE, os_dir, utils.ErrnoOrStr(err)))
2866

    
2867
  if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
2868
    return False, ("File '%s' in %s is not a regular file" %
2869
                   (constants.OS_API_FILE, os_dir))
2870

    
2871
  try:
2872
    api_versions = utils.ReadFile(api_file).splitlines()
2873
  except EnvironmentError, err:
2874
    return False, ("Error while reading the API version file at %s: %s" %
2875
                   (api_file, utils.ErrnoOrStr(err)))
2876

    
2877
  try:
2878
    api_versions = [int(version.strip()) for version in api_versions]
2879
  except (TypeError, ValueError), err:
2880
    return False, ("API version(s) can't be converted to integer: %s" %
2881
                   str(err))
2882

    
2883
  return True, api_versions
2884

    
2885

    
2886
def DiagnoseOS(top_dirs=None):
2887
  """Compute the validity for all OSes.
2888

2889
  @type top_dirs: list
2890
  @param top_dirs: the list of directories in which to
2891
      search (if not given defaults to
2892
      L{pathutils.OS_SEARCH_PATH})
2893
  @rtype: list of L{objects.OS}
2894
  @return: a list of tuples (name, path, status, diagnose, variants,
2895
      parameters, api_version) for all (potential) OSes under all
2896
      search paths, where:
2897
          - name is the (potential) OS name
2898
          - path is the full path to the OS
2899
          - status True/False is the validity of the OS
2900
          - diagnose is the error message for an invalid OS, otherwise empty
2901
          - variants is a list of supported OS variants, if any
2902
          - parameters is a list of (name, help) parameters, if any
2903
          - api_version is a list of support OS API versions
2904

2905
  """
2906
  if top_dirs is None:
2907
    top_dirs = pathutils.OS_SEARCH_PATH
2908

    
2909
  result = []
2910
  for dir_name in top_dirs:
2911
    if os.path.isdir(dir_name):
2912
      try:
2913
        f_names = utils.ListVisibleFiles(dir_name)
2914
      except EnvironmentError, err:
2915
        logging.exception("Can't list the OS directory %s: %s", dir_name, err)
2916
        break
2917
      for name in f_names:
2918
        os_path = utils.PathJoin(dir_name, name)
2919
        status, os_inst = _TryOSFromDisk(name, base_dir=dir_name)
2920
        if status:
2921
          diagnose = ""
2922
          variants = os_inst.supported_variants
2923
          parameters = os_inst.supported_parameters
2924
          api_versions = os_inst.api_versions
2925
        else:
2926
          diagnose = os_inst
2927
          variants = parameters = api_versions = []
2928
        result.append((name, os_path, status, diagnose, variants,
2929
                       parameters, api_versions))
2930

    
2931
  return result
2932

    
2933

    
2934
def _TryOSFromDisk(name, base_dir=None):
2935
  """Create an OS instance from disk.
2936

2937
  This function will return an OS instance if the given name is a
2938
  valid OS name.
2939

2940
  @type base_dir: string
2941
  @keyword base_dir: Base directory containing OS installations.
2942
                     Defaults to a search in all the OS_SEARCH_PATH dirs.
2943
  @rtype: tuple
2944
  @return: success and either the OS instance if we find a valid one,
2945
      or error message
2946

2947
  """
2948
  if base_dir is None:
2949
    os_dir = utils.FindFile(name, pathutils.OS_SEARCH_PATH, os.path.isdir)
2950
  else:
2951
    os_dir = utils.FindFile(name, [base_dir], os.path.isdir)
2952

    
2953
  if os_dir is None:
2954
    return False, "Directory for OS %s not found in search path" % name
2955

    
2956
  status, api_versions = _OSOndiskAPIVersion(os_dir)
2957
  if not status:
2958
    # push the error up
2959
    return status, api_versions
2960

    
2961
  if not constants.OS_API_VERSIONS.intersection(api_versions):
2962
    return False, ("API version mismatch for path '%s': found %s, want %s." %
2963
                   (os_dir, api_versions, constants.OS_API_VERSIONS))
2964

    
2965
  # OS Files dictionary, we will populate it with the absolute path
2966
  # names; if the value is True, then it is a required file, otherwise
2967
  # an optional one
2968
  os_files = dict.fromkeys(constants.OS_SCRIPTS, True)
2969

    
2970
  if max(api_versions) >= constants.OS_API_V15:
2971
    os_files[constants.OS_VARIANTS_FILE] = False
2972

    
2973
  if max(api_versions) >= constants.OS_API_V20:
2974
    os_files[constants.OS_PARAMETERS_FILE] = True
2975
  else:
2976
    del os_files[constants.OS_SCRIPT_VERIFY]
2977

    
2978
  for (filename, required) in os_files.items():
2979
    os_files[filename] = utils.PathJoin(os_dir, filename)
2980

    
2981
    try:
2982
      st = os.stat(os_files[filename])
2983
    except EnvironmentError, err:
2984
      if err.errno == errno.ENOENT and not required:
2985
        del os_files[filename]
2986
        continue
2987
      return False, ("File '%s' under path '%s' is missing (%s)" %
2988
                     (filename, os_dir, utils.ErrnoOrStr(err)))
2989

    
2990
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
2991
      return False, ("File '%s' under path '%s' is not a regular file" %
2992
                     (filename, os_dir))
2993

    
2994
    if filename in constants.OS_SCRIPTS:
2995
      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
2996
        return False, ("File '%s' under path '%s' is not executable" %
2997
                       (filename, os_dir))
2998

    
2999
  variants = []
3000
  if constants.OS_VARIANTS_FILE in os_files:
3001
    variants_file = os_files[constants.OS_VARIANTS_FILE]
3002
    try:
3003
      variants = \
3004
        utils.FilterEmptyLinesAndComments(utils.ReadFile(variants_file))
3005
    except EnvironmentError, err:
3006
      # we accept missing files, but not other errors
3007
      if err.errno != errno.ENOENT:
3008
        return False, ("Error while reading the OS variants file at %s: %s" %
3009
                       (variants_file, utils.ErrnoOrStr(err)))
3010

    
3011
  parameters = []
3012
  if constants.OS_PARAMETERS_FILE in os_files:
3013
    parameters_file = os_files[constants.OS_PARAMETERS_FILE]
3014
    try:
3015
      parameters = utils.ReadFile(parameters_file).splitlines()
3016
    except EnvironmentError, err:
3017
      return False, ("Error while reading the OS parameters file at %s: %s" %
3018
                     (parameters_file, utils.ErrnoOrStr(err)))
3019
    parameters = [v.split(None, 1) for v in parameters]
3020

    
3021
  os_obj = objects.OS(name=name, path=os_dir,
3022
                      create_script=os_files[constants.OS_SCRIPT_CREATE],
3023
                      export_script=os_files[constants.OS_SCRIPT_EXPORT],
3024
                      import_script=os_files[constants.OS_SCRIPT_IMPORT],
3025
                      rename_script=os_files[constants.OS_SCRIPT_RENAME],
3026
                      verify_script=os_files.get(constants.OS_SCRIPT_VERIFY,
3027
                                                 None),
3028
                      supported_variants=variants,
3029
                      supported_parameters=parameters,
3030
                      api_versions=api_versions)
3031
  return True, os_obj
3032

    
3033

    
3034
def OSFromDisk(name, base_dir=None):
3035
  """Create an OS instance from disk.
3036

3037
  This function will return an OS instance if the given name is a
3038
  valid OS name. Otherwise, it will raise an appropriate
3039
  L{RPCFail} exception, detailing why this is not a valid OS.
3040

3041
  This is just a wrapper over L{_TryOSFromDisk}, which doesn't raise
3042
  an exception but returns true/false status data.
3043

3044
  @type base_dir: string
3045
  @keyword base_dir: Base directory containing OS installations.
3046
                     Defaults to a search in all the OS_SEARCH_PATH dirs.
3047
  @rtype: L{objects.OS}
3048
  @return: the OS instance if we find a valid one
3049
  @raise RPCFail: if we don't find a valid OS
3050

3051
  """
3052
  name_only = objects.OS.GetName(name)
3053
  status, payload = _TryOSFromDisk(name_only, base_dir)
3054

    
3055
  if not status:
3056
    _Fail(payload)
3057

    
3058
  return payload
3059

    
3060

    
3061
def OSCoreEnv(os_name, inst_os, os_params, debug=0):
3062
  """Calculate the basic environment for an os script.
3063

3064
  @type os_name: str
3065
  @param os_name: full operating system name (including variant)
3066
  @type inst_os: L{objects.OS}
3067
  @param inst_os: operating system for which the environment is being built
3068
  @type os_params: dict
3069
  @param os_params: the OS parameters
3070
  @type debug: integer
3071
  @param debug: debug level (0 or 1, for OS Api 10)
3072
  @rtype: dict
3073
  @return: dict of environment variables
3074
  @raise errors.BlockDeviceError: if the block device
3075
      cannot be found
3076

3077
  """
3078
  result = {}
3079
  api_version = \
3080
    max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions))
3081
  result["OS_API_VERSION"] = "%d" % api_version
3082
  result["OS_NAME"] = inst_os.name
3083
  result["DEBUG_LEVEL"] = "%d" % debug
3084

    
3085
  # OS variants
3086
  if api_version >= constants.OS_API_V15 and inst_os.supported_variants:
3087
    variant = objects.OS.GetVariant(os_name)
3088
    if not variant:
3089
      variant = inst_os.supported_variants[0]
3090
  else:
3091
    variant = ""
3092
  result["OS_VARIANT"] = variant
3093

    
3094
  # OS params
3095
  for pname, pvalue in os_params.items():
3096
    result["OSP_%s" % pname.upper()] = pvalue
3097

    
3098
  # Set a default path otherwise programs called by OS scripts (or
3099
  # even hooks called from OS scripts) might break, and we don't want
3100
  # to have each script require setting a PATH variable
3101
  result["PATH"] = constants.HOOKS_PATH
3102

    
3103
  return result
3104

    
3105

    
3106
def OSEnvironment(instance, inst_os, debug=0):
3107
  """Calculate the environment for an os script.
3108

3109
  @type instance: L{objects.Instance}
3110
  @param instance: target instance for the os script run
3111
  @type inst_os: L{objects.OS}
3112
  @param inst_os: operating system for which the environment is being built
3113
  @type debug: integer
3114
  @param debug: debug level (0 or 1, for OS Api 10)
3115
  @rtype: dict
3116
  @return: dict of environment variables
3117
  @raise errors.BlockDeviceError: if the block device
3118
      cannot be found
3119

3120
  """
3121
  result = OSCoreEnv(instance.os, inst_os, instance.osparams, debug=debug)
3122

    
3123
  for attr in ["name", "os", "uuid", "ctime", "mtime", "primary_node"]:
3124
    result["INSTANCE_%s" % attr.upper()] = str(getattr(instance, attr))
3125

    
3126
  result["HYPERVISOR"] = instance.hypervisor
3127
  result["DISK_COUNT"] = "%d" % len(instance.disks)
3128
  result["NIC_COUNT"] = "%d" % len(instance.nics)
3129
  result["INSTANCE_SECONDARY_NODES"] = \
3130
      ("%s" % " ".join(instance.secondary_nodes))
3131

    
3132
  # Disks
3133
  for idx, disk in enumerate(instance.disks):
3134
    real_disk = _OpenRealBD(disk)
3135
    result["DISK_%d_PATH" % idx] = real_disk.dev_path
3136
    result["DISK_%d_ACCESS" % idx] = disk.mode
3137
    result["DISK_%d_UUID" % idx] = disk.uuid
3138
    if disk.name:
3139
      result["DISK_%d_NAME" % idx] = disk.name
3140
    if constants.HV_DISK_TYPE in instance.hvparams:
3141
      result["DISK_%d_FRONTEND_TYPE" % idx] = \
3142
        instance.hvparams[constants.HV_DISK_TYPE]
3143
    if disk.dev_type in constants.DTS_BLOCK:
3144
      result["DISK_%d_BACKEND_TYPE" % idx] = "block"
3145
    elif disk.dev_type in constants.DTS_FILEBASED:
3146
      result["DISK_%d_BACKEND_TYPE" % idx] = \
3147
        "file:%s" % disk.logical_id[0]
3148

    
3149
  # NICs
3150
  for idx, nic in enumerate(instance.nics):
3151
    result["NIC_%d_MAC" % idx] = nic.mac
3152
    result["NIC_%d_UUID" % idx] = nic.uuid
3153
    if nic.name:
3154
      result["NIC_%d_NAME" % idx] = nic.name
3155
    if nic.ip:
3156
      result["NIC_%d_IP" % idx] = nic.ip
3157
    result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE]
3158
    if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3159
      result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK]
3160
    if nic.nicparams[constants.NIC_LINK]:
3161
      result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK]
3162
    if nic.netinfo:
3163
      nobj = objects.Network.FromDict(nic.netinfo)
3164
      result.update(nobj.HooksDict("NIC_%d_" % idx))
3165
    if constants.HV_NIC_TYPE in instance.hvparams:
3166
      result["NIC_%d_FRONTEND_TYPE" % idx] = \
3167
        instance.hvparams[constants.HV_NIC_TYPE]
3168

    
3169
  # HV/BE params
3170
  for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]:
3171
    for key, value in source.items():
3172
      result["INSTANCE_%s_%s" % (kind, key)] = str(value)
3173

    
3174
  return result
3175

    
3176

    
3177
def DiagnoseExtStorage(top_dirs=None):
3178
  """Compute the validity for all ExtStorage Providers.
3179

3180
  @type top_dirs: list
3181
  @param top_dirs: the list of directories in which to
3182
      search (if not given defaults to
3183
      L{pathutils.ES_SEARCH_PATH})
3184
  @rtype: list of L{objects.ExtStorage}
3185
  @return: a list of tuples (name, path, status, diagnose, parameters)
3186
      for all (potential) ExtStorage Providers under all
3187
      search paths, where:
3188
          - name is the (potential) ExtStorage Provider
3189
          - path is the full path to the ExtStorage Provider
3190
          - status True/False is the validity of the ExtStorage Provider
3191
          - diagnose is the error message for an invalid ExtStorage Provider,
3192
            otherwise empty
3193
          - parameters is a list of (name, help) parameters, if any
3194

3195
  """
3196
  if top_dirs is None:
3197
    top_dirs = pathutils.ES_SEARCH_PATH
3198

    
3199
  result = []
3200
  for dir_name in top_dirs:
3201
    if os.path.isdir(dir_name):
3202
      try:
3203
        f_names = utils.ListVisibleFiles(dir_name)
3204
      except EnvironmentError, err:
3205
        logging.exception("Can't list the ExtStorage directory %s: %s",
3206
                          dir_name, err)
3207
        break
3208
      for name in f_names:
3209
        es_path = utils.PathJoin(dir_name, name)
3210
        status, es_inst = bdev.ExtStorageFromDisk(name, base_dir=dir_name)
3211
        if status:
3212
          diagnose = ""
3213
          parameters = es_inst.supported_parameters
3214
        else:
3215
          diagnose = es_inst
3216
          parameters = []
3217
        result.append((name, es_path, status, diagnose, parameters))
3218

    
3219
  return result
3220

    
3221

    
3222
def BlockdevGrow(disk, amount, dryrun, backingstore, excl_stor):
3223
  """Grow a stack of block devices.
3224

3225
  This function is called recursively, with the childrens being the
3226
  first ones to resize.
3227

3228
  @type disk: L{objects.Disk}
3229
  @param disk: the disk to be grown
3230
  @type amount: integer
3231
  @param amount: the amount (in mebibytes) to grow with
3232
  @type dryrun: boolean
3233
  @param dryrun: whether to execute the operation in simulation mode
3234
      only, without actually increasing the size
3235
  @param backingstore: whether to execute the operation on backing storage
3236
      only, or on "logical" storage only; e.g. DRBD is logical storage,
3237
      whereas LVM, file, RBD are backing storage
3238
  @rtype: (status, result)
3239
  @type excl_stor: boolean
3240
  @param excl_stor: Whether exclusive_storage is active
3241
  @return: a tuple with the status of the operation (True/False), and
3242
      the errors message if status is False
3243

3244
  """
3245
  r_dev = _RecursiveFindBD(disk)
3246
  if r_dev is None:
3247
    _Fail("Cannot find block device %s", disk)
3248

    
3249
  try:
3250
    r_dev.Grow(amount, dryrun, backingstore, excl_stor)
3251
  except errors.BlockDeviceError, err:
3252
    _Fail("Failed to grow block device: %s", err, exc=True)
3253

    
3254

    
3255
def BlockdevSnapshot(disk):
3256
  """Create a snapshot copy of a block device.
3257

3258
  This function is called recursively, and the snapshot is actually created
3259
  just for the leaf lvm backend device.
3260

3261
  @type disk: L{objects.Disk}
3262
  @param disk: the disk to be snapshotted
3263
  @rtype: string
3264
  @return: snapshot disk ID as (vg, lv)
3265

3266
  """
3267
  if disk.dev_type == constants.DT_DRBD8:
3268
    if not disk.children:
3269
      _Fail("DRBD device '%s' without backing storage cannot be snapshotted",
3270
            disk.unique_id)
3271
    return BlockdevSnapshot(disk.children[0])
3272
  elif disk.dev_type == constants.DT_PLAIN:
3273
    r_dev = _RecursiveFindBD(disk)
3274
    if r_dev is not None:
3275
      # FIXME: choose a saner value for the snapshot size
3276
      # let's stay on the safe side and ask for the full size, for now
3277
      return r_dev.Snapshot(disk.size)
3278
    else:
3279
      _Fail("Cannot find block device %s", disk)
3280
  else:
3281
    _Fail("Cannot snapshot non-lvm block device '%s' of type '%s'",
3282
          disk.unique_id, disk.dev_type)
3283

    
3284

    
3285
def BlockdevSetInfo(disk, info):
3286
  """Sets 'metadata' information on block devices.
3287

3288
  This function sets 'info' metadata on block devices. Initial
3289
  information is set at device creation; this function should be used
3290
  for example after renames.
3291

3292
  @type disk: L{objects.Disk}
3293
  @param disk: the disk to be grown
3294
  @type info: string
3295
  @param info: new 'info' metadata
3296
  @rtype: (status, result)
3297
  @return: a tuple with the status of the operation (True/False), and
3298
      the errors message if status is False
3299

3300
  """
3301
  r_dev = _RecursiveFindBD(disk)
3302
  if r_dev is None:
3303
    _Fail("Cannot find block device %s", disk)
3304

    
3305
  try:
3306
    r_dev.SetInfo(info)
3307
  except errors.BlockDeviceError, err:
3308
    _Fail("Failed to set information on block device: %s", err, exc=True)
3309

    
3310

    
3311
def FinalizeExport(instance, snap_disks):
3312
  """Write out the export configuration information.
3313

3314
  @type instance: L{objects.Instance}
3315
  @param instance: the instance which we export, used for
3316
      saving configuration
3317
  @type snap_disks: list of L{objects.Disk}
3318
  @param snap_disks: list of snapshot block devices, which
3319
      will be used to get the actual name of the dump file
3320

3321
  @rtype: None
3322

3323
  """
3324
  destdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name + ".new")
3325
  finaldestdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name)
3326

    
3327
  config = objects.SerializableConfigParser()
3328

    
3329
  config.add_section(constants.INISECT_EXP)
3330
  config.set(constants.INISECT_EXP, "version", "0")
3331
  config.set(constants.INISECT_EXP, "timestamp", "%d" % int(time.time()))
3332
  config.set(constants.INISECT_EXP, "source", instance.primary_node)
3333
  config.set(constants.INISECT_EXP, "os", instance.os)
3334
  config.set(constants.INISECT_EXP, "compression", "none")
3335

    
3336
  config.add_section(constants.INISECT_INS)
3337
  config.set(constants.INISECT_INS, "name", instance.name)
3338
  config.set(constants.INISECT_INS, "maxmem", "%d" %
3339
             instance.beparams[constants.BE_MAXMEM])
3340
  config.set(constants.INISECT_INS, "minmem", "%d" %
3341
             instance.beparams[constants.BE_MINMEM])
3342
  # "memory" is deprecated, but useful for exporting to old ganeti versions
3343
  config.set(constants.INISECT_INS, "memory", "%d" %
3344
             instance.beparams[constants.BE_MAXMEM])
3345
  config.set(constants.INISECT_INS, "vcpus", "%d" %
3346
             instance.beparams[constants.BE_VCPUS])
3347
  config.set(constants.INISECT_INS, "disk_template", instance.disk_template)
3348
  config.set(constants.INISECT_INS, "hypervisor", instance.hypervisor)
3349
  config.set(constants.INISECT_INS, "tags", " ".join(instance.GetTags()))
3350

    
3351
  nic_total = 0
3352
  for nic_count, nic in enumerate(instance.nics):
3353
    nic_total += 1
3354
    config.set(constants.INISECT_INS, "nic%d_mac" %
3355
               nic_count, "%s" % nic.mac)
3356
    config.set(constants.INISECT_INS, "nic%d_ip" % nic_count, "%s" % nic.ip)
3357
    config.set(constants.INISECT_INS, "nic%d_network" % nic_count,
3358
               "%s" % nic.network)
3359
    config.set(constants.INISECT_INS, "nic%d_name" % nic_count,
3360
               "%s" % nic.name)
3361
    for param in constants.NICS_PARAMETER_TYPES:
3362
      config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param),
3363
                 "%s" % nic.nicparams.get(param, None))
3364
  # TODO: redundant: on load can read nics until it doesn't exist
3365
  config.set(constants.INISECT_INS, "nic_count", "%d" % nic_total)
3366

    
3367
  disk_total = 0
3368
  for disk_count, disk in enumerate(snap_disks):
3369
    if disk:
3370
      disk_total += 1
3371
      config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count,
3372
                 ("%s" % disk.iv_name))
3373
      config.set(constants.INISECT_INS, "disk%d_dump" % disk_count,
3374
                 ("%s" % disk.logical_id[1]))
3375
      config.set(constants.INISECT_INS, "disk%d_size" % disk_count,
3376
                 ("%d" % disk.size))
3377
      config.set(constants.INISECT_INS, "disk%d_name" % disk_count,
3378
                 "%s" % disk.name)
3379

    
3380
  config.set(constants.INISECT_INS, "disk_count", "%d" % disk_total)
3381

    
3382
  # New-style hypervisor/backend parameters
3383

    
3384
  config.add_section(constants.INISECT_HYP)
3385
  for name, value in instance.hvparams.items():
3386
    if name not in constants.HVC_GLOBALS:
3387
      config.set(constants.INISECT_HYP, name, str(value))
3388

    
3389
  config.add_section(constants.INISECT_BEP)
3390
  for name, value in instance.beparams.items():
3391
    config.set(constants.INISECT_BEP, name, str(value))
3392

    
3393
  config.add_section(constants.INISECT_OSP)
3394
  for name, value in instance.osparams.items():
3395
    config.set(constants.INISECT_OSP, name, str(value))
3396

    
3397
  config.add_section(constants.INISECT_OSP_PRIVATE)
3398
  for name, value in instance.osparams_private.items():
3399
    config.set(constants.INISECT_OSP_PRIVATE, name, str(value.Get()))
3400

    
3401
  utils.WriteFile(utils.PathJoin(destdir, constants.EXPORT_CONF_FILE),
3402
                  data=config.Dumps())
3403
  shutil.rmtree(finaldestdir, ignore_errors=True)
3404
  shutil.move(destdir, finaldestdir)
3405

    
3406

    
3407
def ExportInfo(dest):
3408
  """Get export configuration information.
3409

3410
  @type dest: str
3411
  @param dest: directory containing the export
3412

3413
  @rtype: L{objects.SerializableConfigParser}
3414
  @return: a serializable config file containing the
3415
      export info
3416

3417
  """
3418
  cff = utils.PathJoin(dest, constants.EXPORT_CONF_FILE)
3419

    
3420
  config = objects.SerializableConfigParser()
3421
  config.read(cff)
3422

    
3423
  if (not config.has_section(constants.INISECT_EXP) or
3424
      not config.has_section(constants.INISECT_INS)):
3425
    _Fail("Export info file doesn't have the required fields")
3426

    
3427
  return config.Dumps()
3428

    
3429

    
3430
def ListExports():
3431
  """Return a list of exports currently available on this machine.
3432

3433
  @rtype: list
3434
  @return: list of the exports
3435

3436
  """
3437
  if os.path.isdir(pathutils.EXPORT_DIR):
3438
    return sorted(utils.ListVisibleFiles(pathutils.EXPORT_DIR))
3439
  else:
3440
    _Fail("No exports directory")
3441

    
3442

    
3443
def RemoveExport(export):
3444
  """Remove an existing export from the node.
3445

3446
  @type export: str
3447
  @param export: the name of the export to remove
3448
  @rtype: None
3449

3450
  """
3451
  target = utils.PathJoin(pathutils.EXPORT_DIR, export)
3452

    
3453
  try:
3454
    shutil.rmtree(target)
3455
  except EnvironmentError, err:
3456
    _Fail("Error while removing the export: %s", err, exc=True)
3457

    
3458

    
3459
def BlockdevRename(devlist):
3460
  """Rename a list of block devices.
3461

3462
  @type devlist: list of tuples
3463
  @param devlist: list of tuples of the form  (disk, new_unique_id); disk is
3464
      an L{objects.Disk} object describing the current disk, and new
3465
      unique_id is the name we rename it to
3466
  @rtype: boolean
3467
  @return: True if all renames succeeded, False otherwise
3468

3469
  """
3470
  msgs = []
3471
  result = True
3472
  for disk, unique_id in devlist:
3473
    dev = _RecursiveFindBD(disk)
3474
    if dev is None:
3475
      msgs.append("Can't find device %s in rename" % str(disk))
3476
      result = False
3477
      continue
3478
    try:
3479
      old_rpath = dev.dev_path
3480
      dev.Rename(unique_id)
3481
      new_rpath = dev.dev_path
3482
      if old_rpath != new_rpath:
3483
        DevCacheManager.RemoveCache(old_rpath)
3484
        # FIXME: we should add the new cache information here, like:
3485
        # DevCacheManager.UpdateCache(new_rpath, owner, ...)
3486
        # but we don't have the owner here - maybe parse from existing
3487
        # cache? for now, we only lose lvm data when we rename, which
3488
        # is less critical than DRBD or MD
3489
    except errors.BlockDeviceError, err:
3490
      msgs.append("Can't rename device '%s' to '%s': %s" %
3491
                  (dev, unique_id, err))
3492
      logging.exception("Can't rename device '%s' to '%s'", dev, unique_id)
3493
      result = False
3494
  if not result:
3495
    _Fail("; ".join(msgs))
3496

    
3497

    
3498
def _TransformFileStorageDir(fs_dir):
3499
  """Checks whether given file_storage_dir is valid.
3500

3501
  Checks wheter the given fs_dir is within the cluster-wide default
3502
  file_storage_dir or the shared_file_storage_dir, which are stored in
3503
  SimpleStore. Only paths under those directories are allowed.
3504

3505
  @type fs_dir: str
3506
  @param fs_dir: the path to check
3507

3508
  @return: the normalized path if valid, None otherwise
3509

3510
  """
3511
  filestorage.CheckFileStoragePath(fs_dir)
3512

    
3513
  return os.path.normpath(fs_dir)
3514

    
3515

    
3516
def CreateFileStorageDir(file_storage_dir):
3517
  """Create file storage directory.
3518

3519
  @type file_storage_dir: str
3520
  @param file_storage_dir: directory to create
3521

3522
  @rtype: tuple
3523
  @return: tuple with first element a boolean indicating wheter dir
3524
      creation was successful or not
3525

3526
  """
3527
  file_storage_dir = _TransformFileStorageDir(file_storage_dir)
3528
  if os.path.exists(file_storage_dir):
3529
    if not os.path.isdir(file_storage_dir):
3530
      _Fail("Specified storage dir '%s' is not a directory",
3531
            file_storage_dir)
3532
  else:
3533
    try:
3534
      os.makedirs(file_storage_dir, 0750)
3535
    except OSError, err:
3536
      _Fail("Cannot create file storage directory '%s': %s",
3537
            file_storage_dir, err, exc=True)
3538

    
3539

    
3540
def RemoveFileStorageDir(file_storage_dir):
3541
  """Remove file storage directory.
3542

3543
  Remove it only if it's empty. If not log an error and return.
3544

3545
  @type file_storage_dir: str
3546
  @param file_storage_dir: the directory we should cleanup
3547
  @rtype: tuple (success,)
3548
  @return: tuple of one element, C{success}, denoting
3549
      whether the operation was successful
3550

3551
  """
3552
  file_storage_dir = _TransformFileStorageDir(file_storage_dir)
3553
  if os.path.exists(file_storage_dir):
3554
    if not os.path.isdir(file_storage_dir):
3555
      _Fail("Specified Storage directory '%s' is not a directory",
3556
            file_storage_dir)
3557
    # deletes dir only if empty, otherwise we want to fail the rpc call
3558
    try:
3559
      os.rmdir(file_storage_dir)
3560
    except OSError, err:
3561
      _Fail("Cannot remove file storage directory '%s': %s",
3562
            file_storage_dir, err)
3563

    
3564

    
3565
def RenameFileStorageDir(old_file_storage_dir, new_file_storage_dir):
3566
  """Rename the file storage directory.
3567

3568
  @type old_file_storage_dir: str
3569
  @param old_file_storage_dir: the current path
3570
  @type new_file_storage_dir: str
3571
  @param new_file_storage_dir: the name we should rename to
3572
  @rtype: tuple (success,)
3573
  @return: tuple of one element, C{success}, denoting
3574
      whether the operation was successful
3575

3576
  """
3577
  old_file_storage_dir = _TransformFileStorageDir(old_file_storage_dir)
3578
  new_file_storage_dir = _TransformFileStorageDir(new_file_storage_dir)
3579
  if not os.path.exists(new_file_storage_dir):
3580
    if os.path.isdir(old_file_storage_dir):
3581
      try:
3582
        os.rename(old_file_storage_dir, new_file_storage_dir)
3583
      except OSError, err:
3584
        _Fail("Cannot rename '%s' to '%s': %s",
3585
              old_file_storage_dir, new_file_storage_dir, err)
3586
    else:
3587
      _Fail("Specified storage dir '%s' is not a directory",
3588
            old_file_storage_dir)
3589
  else:
3590
    if os.path.exists(old_file_storage_dir):
3591
      _Fail("Cannot rename '%s' to '%s': both locations exist",
3592
            old_file_storage_dir, new_file_storage_dir)
3593

    
3594

    
3595
def _EnsureJobQueueFile(file_name):
3596
  """Checks whether the given filename is in the queue directory.
3597

3598
  @type file_name: str
3599
  @param file_name: the file name we should check
3600
  @rtype: None
3601
  @raises RPCFail: if the file is not valid
3602

3603
  """
3604
  if not utils.IsBelowDir(pathutils.QUEUE_DIR, file_name):
3605
    _Fail("Passed job queue file '%s' does not belong to"
3606
          " the queue directory '%s'", file_name, pathutils.QUEUE_DIR)
3607

    
3608

    
3609
def JobQueueUpdate(file_name, content):
3610
  """Updates a file in the queue directory.
3611

3612
  This is just a wrapper over L{utils.io.WriteFile}, with proper
3613
  checking.
3614

3615
  @type file_name: str
3616
  @param file_name: the job file name
3617
  @type content: str
3618
  @param content: the new job contents
3619
  @rtype: boolean
3620
  @return: the success of the operation
3621

3622
  """
3623
  file_name = vcluster.LocalizeVirtualPath(file_name)
3624

    
3625
  _EnsureJobQueueFile(file_name)
3626
  getents = runtime.GetEnts()
3627

    
3628
  # Write and replace the file atomically
3629
  utils.WriteFile(file_name, data=_Decompress(content), uid=getents.masterd_uid,
3630
                  gid=getents.daemons_gid, mode=constants.JOB_QUEUE_FILES_PERMS)
3631

    
3632

    
3633
def JobQueueRename(old, new):
3634
  """Renames a job queue file.
3635

3636
  This is just a wrapper over os.rename with proper checking.
3637

3638
  @type old: str
3639
  @param old: the old (actual) file name
3640
  @type new: str
3641
  @param new: the desired file name
3642
  @rtype: tuple
3643
  @return: the success of the operation and payload
3644

3645
  """
3646
  old = vcluster.LocalizeVirtualPath(old)
3647
  new = vcluster.LocalizeVirtualPath(new)
3648

    
3649
  _EnsureJobQueueFile(old)
3650
  _EnsureJobQueueFile(new)
3651

    
3652
  getents = runtime.GetEnts()
3653

    
3654
  utils.RenameFile(old, new, mkdir=True, mkdir_mode=0750,
3655
                   dir_uid=getents.masterd_uid, dir_gid=getents.daemons_gid)
3656

    
3657

    
3658
def BlockdevClose(instance_name, disks):
3659
  """Closes the given block devices.
3660

3661
  This means they will be switched to secondary mode (in case of
3662
  DRBD).
3663

3664
  @param instance_name: if the argument is not empty, the symlinks
3665
      of this instance will be removed
3666
  @type disks: list of L{objects.Disk}
3667
  @param disks: the list of disks to be closed
3668
  @rtype: tuple (success, message)
3669
  @return: a tuple of success and message, where success
3670
      indicates the succes of the operation, and message
3671
      which will contain the error details in case we
3672
      failed
3673

3674
  """
3675
  bdevs = []
3676
  for cf in disks:
3677
    rd = _RecursiveFindBD(cf)
3678
    if rd is None:
3679
      _Fail("Can't find device %s", cf)
3680
    bdevs.append(rd)
3681

    
3682
  msg = []
3683
  for rd in bdevs:
3684
    try:
3685
      rd.Close()
3686
    except errors.BlockDeviceError, err:
3687
      msg.append(str(err))
3688
  if msg:
3689
    _Fail("Can't make devices secondary: %s", ",".join(msg))
3690
  else:
3691
    if instance_name:
3692
      _RemoveBlockDevLinks(instance_name, disks)
3693

    
3694

    
3695
def ValidateHVParams(hvname, hvparams):
3696
  """Validates the given hypervisor parameters.
3697

3698
  @type hvname: string
3699
  @param hvname: the hypervisor name
3700
  @type hvparams: dict
3701
  @param hvparams: the hypervisor parameters to be validated
3702
  @rtype: None
3703

3704
  """
3705
  try:
3706
    hv_type = hypervisor.GetHypervisor(hvname)
3707
    hv_type.ValidateParameters(hvparams)
3708
  except errors.HypervisorError, err:
3709
    _Fail(str(err), log=False)
3710

    
3711

    
3712
def _CheckOSPList(os_obj, parameters):
3713
  """Check whether a list of parameters is supported by the OS.
3714

3715
  @type os_obj: L{objects.OS}
3716
  @param os_obj: OS object to check
3717
  @type parameters: list
3718
  @param parameters: the list of parameters to check
3719

3720
  """
3721
  supported = [v[0] for v in os_obj.supported_parameters]
3722
  delta = frozenset(parameters).difference(supported)
3723
  if delta:
3724
    _Fail("The following parameters are not supported"
3725
          " by the OS %s: %s" % (os_obj.name, utils.CommaJoin(delta)))
3726

    
3727

    
3728
def ValidateOS(required, osname, checks, osparams):
3729
  """Validate the given OS parameters.
3730

3731
  @type required: boolean
3732
  @param required: whether absence of the OS should translate into
3733
      failure or not
3734
  @type osname: string
3735
  @param osname: the OS to be validated
3736
  @type checks: list
3737
  @param checks: list of the checks to run (currently only 'parameters')
3738
  @type osparams: dict
3739
  @param osparams: dictionary with OS parameters, some of which may be
3740
                   private.
3741
  @rtype: boolean
3742
  @return: True if the validation passed, or False if the OS was not
3743
      found and L{required} was false
3744

3745
  """
3746
  if not constants.OS_VALIDATE_CALLS.issuperset(checks):
3747
    _Fail("Unknown checks required for OS %s: %s", osname,
3748
          set(checks).difference(constants.OS_VALIDATE_CALLS))
3749

    
3750
  name_only = objects.OS.GetName(osname)
3751
  status, tbv = _TryOSFromDisk(name_only, None)
3752

    
3753
  if not status:
3754
    if required:
3755
      _Fail(tbv)
3756
    else:
3757
      return False
3758

    
3759
  if max(tbv.api_versions) < constants.OS_API_V20:
3760
    return True
3761

    
3762
  if constants.OS_VALIDATE_PARAMETERS in checks:
3763
    _CheckOSPList(tbv, osparams.keys())
3764

    
3765
  validate_env = OSCoreEnv(osname, tbv, osparams)
3766
  result = utils.RunCmd([tbv.verify_script] + checks, env=validate_env,
3767
                        cwd=tbv.path, reset_env=True)
3768
  if result.failed:
3769
    logging.error("os validate command '%s' returned error: %s output: %s",
3770
                  result.cmd, result.fail_reason, result.output)
3771
    _Fail("OS validation script failed (%s), output: %s",
3772
          result.fail_reason, result.output, log=False)
3773

    
3774
  return True
3775

    
3776

    
3777
def DemoteFromMC():
3778
  """Demotes the current node from master candidate role.
3779

3780
  """
3781
  # try to ensure we're not the master by mistake
3782
  master, myself = ssconf.GetMasterAndMyself()
3783
  if master == myself:
3784
    _Fail("ssconf status shows I'm the master node, will not demote")
3785

    
3786
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "check", constants.MASTERD])
3787
  if not result.failed:
3788
    _Fail("The master daemon is running, will not demote")
3789

    
3790
  try:
3791
    if os.path.isfile(pathutils.CLUSTER_CONF_FILE):
3792
      utils.CreateBackup(pathutils.CLUSTER_CONF_FILE)
3793
  except EnvironmentError, err:
3794
    if err.errno != errno.ENOENT:
3795
      _Fail("Error while backing up cluster file: %s", err, exc=True)
3796

    
3797
  utils.RemoveFile(pathutils.CLUSTER_CONF_FILE)
3798

    
3799

    
3800
def _GetX509Filenames(cryptodir, name):
3801
  """Returns the full paths for the private key and certificate.
3802

3803
  """
3804
  return (utils.PathJoin(cryptodir, name),
3805
          utils.PathJoin(cryptodir, name, _X509_KEY_FILE),
3806
          utils.PathJoin(cryptodir, name, _X509_CERT_FILE))
3807

    
3808

    
3809
def CreateX509Certificate(validity, cryptodir=pathutils.CRYPTO_KEYS_DIR):
3810
  """Creates a new X509 certificate for SSL/TLS.
3811

3812
  @type validity: int
3813
  @param validity: Validity in seconds
3814
  @rtype: tuple; (string, string)
3815
  @return: Certificate name and public part
3816

3817
  """
3818
  (key_pem, cert_pem) = \
3819
    utils.GenerateSelfSignedX509Cert(netutils.Hostname.GetSysName(),
3820
                                     min(validity, _MAX_SSL_CERT_VALIDITY), 1)
3821

    
3822
  cert_dir = tempfile.mkdtemp(dir=cryptodir,
3823
                              prefix="x509-%s-" % utils.TimestampForFilename())
3824
  try:
3825
    name = os.path.basename(cert_dir)
3826
    assert len(name) > 5
3827

    
3828
    (_, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
3829

    
3830
    utils.WriteFile(key_file, mode=0400, data=key_pem)
3831
    utils.WriteFile(cert_file, mode=0400, data=cert_pem)
3832

    
3833
    # Never return private key as it shouldn't leave the node
3834
    return (name, cert_pem)
3835
  except Exception:
3836
    shutil.rmtree(cert_dir, ignore_errors=True)
3837
    raise
3838

    
3839

    
3840
def RemoveX509Certificate(name, cryptodir=pathutils.CRYPTO_KEYS_DIR):
3841
  """Removes a X509 certificate.
3842

3843
  @type name: string
3844
  @param name: Certificate name
3845

3846
  """
3847
  (cert_dir, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
3848

    
3849
  utils.RemoveFile(key_file)
3850
  utils.RemoveFile(cert_file)
3851

    
3852
  try:
3853
    os.rmdir(cert_dir)
3854
  except EnvironmentError, err:
3855
    _Fail("Cannot remove certificate directory '%s': %s",
3856
          cert_dir, err)
3857

    
3858

    
3859
def _GetImportExportIoCommand(instance, mode, ieio, ieargs):
3860
  """Returns the command for the requested input/output.
3861

3862
  @type instance: L{objects.Instance}
3863
  @param instance: The instance object
3864
  @param mode: Import/export mode
3865
  @param ieio: Input/output type
3866
  @param ieargs: Input/output arguments
3867

3868
  """
3869
  assert mode in (constants.IEM_IMPORT, constants.IEM_EXPORT)
3870

    
3871
  env = None
3872
  prefix = None
3873
  suffix = None
3874
  exp_size = None
3875

    
3876
  if ieio == constants.IEIO_FILE:
3877
    (filename, ) = ieargs
3878

    
3879
    if not utils.IsNormAbsPath(filename):
3880
      _Fail("Path '%s' is not normalized or absolute", filename)
3881

    
3882
    real_filename = os.path.realpath(filename)
3883
    directory = os.path.dirname(real_filename)
3884

    
3885
    if not utils.IsBelowDir(pathutils.EXPORT_DIR, real_filename):
3886
      _Fail("File '%s' is not under exports directory '%s': %s",
3887
            filename, pathutils.EXPORT_DIR, real_filename)
3888

    
3889
    # Create directory
3890
    utils.Makedirs(directory, mode=0750)
3891

    
3892
    quoted_filename = utils.ShellQuote(filename)
3893

    
3894
    if mode == constants.IEM_IMPORT:
3895
      suffix = "> %s" % quoted_filename
3896
    elif mode == constants.IEM_EXPORT:
3897
      suffix = "< %s" % quoted_filename
3898

    
3899
      # Retrieve file size
3900
      try:
3901
        st = os.stat(filename)
3902
      except EnvironmentError, err:
3903
        logging.error("Can't stat(2) %s: %s", filename, err)
3904
      else:
3905
        exp_size = utils.BytesToMebibyte(st.st_size)
3906

    
3907
  elif ieio == constants.IEIO_RAW_DISK:
3908
    (disk, ) = ieargs
3909

    
3910
    real_disk = _OpenRealBD(disk)
3911

    
3912
    if mode == constants.IEM_IMPORT:
3913
      # we use nocreat to fail if the device is not already there or we pass a
3914
      # wrong path; we use notrunc to no attempt truncate on an LV device
3915
      suffix = utils.BuildShellCmd("| dd of=%s conv=nocreat,notrunc bs=%s",
3916
                                   real_disk.dev_path,
3917
                                   str(1024 * 1024)) # 1 MB
3918

    
3919
    elif mode == constants.IEM_EXPORT:
3920
      # the block size on the read dd is 1MiB to match our units
3921
      prefix = utils.BuildShellCmd("dd if=%s bs=%s count=%s |",
3922
                                   real_disk.dev_path,
3923
                                   str(1024 * 1024), # 1 MB
3924
                                   str(disk.size))
3925
      exp_size = disk.size
3926

    
3927
  elif ieio == constants.IEIO_SCRIPT:
3928
    (disk, disk_index, ) = ieargs
3929

    
3930
    assert isinstance(disk_index, (int, long))
3931

    
3932
    inst_os = OSFromDisk(instance.os)
3933
    env = OSEnvironment(instance, inst_os)
3934

    
3935
    if mode == constants.IEM_IMPORT:
3936
      env["IMPORT_DEVICE"] = env["DISK_%d_PATH" % disk_index]
3937
      env["IMPORT_INDEX"] = str(disk_index)
3938
      script = inst_os.import_script
3939

    
3940
    elif mode == constants.IEM_EXPORT:
3941
      real_disk = _OpenRealBD(disk)
3942
      env["EXPORT_DEVICE"] = real_disk.dev_path
3943
      env["EXPORT_INDEX"] = str(disk_index)
3944
      script = inst_os.export_script
3945

    
3946
    # TODO: Pass special environment only to script
3947
    script_cmd = utils.BuildShellCmd("( cd %s && %s; )", inst_os.path, script)
3948

    
3949
    if mode == constants.IEM_IMPORT:
3950
      suffix = "| %s" % script_cmd
3951

    
3952
    elif mode == constants.IEM_EXPORT:
3953
      prefix = "%s |" % script_cmd
3954

    
3955
    # Let script predict size
3956
    exp_size = constants.IE_CUSTOM_SIZE
3957

    
3958
  else:
3959
    _Fail("Invalid %s I/O mode %r", mode, ieio)
3960

    
3961
  return (env, prefix, suffix, exp_size)
3962

    
3963

    
3964
def _CreateImportExportStatusDir(prefix):
3965
  """Creates status directory for import/export.
3966

3967
  """
3968
  return tempfile.mkdtemp(dir=pathutils.IMPORT_EXPORT_DIR,
3969
                          prefix=("%s-%s-" %
3970
                                  (prefix, utils.TimestampForFilename())))
3971

    
3972

    
3973
def StartImportExportDaemon(mode, opts, host, port, instance, component,
3974
                            ieio, ieioargs):
3975
  """Starts an import or export daemon.
3976

3977
  @param mode: Import/output mode
3978
  @type opts: L{objects.ImportExportOptions}
3979
  @param opts: Daemon options
3980
  @type host: string
3981
  @param host: Remote host for export (None for import)
3982
  @type port: int
3983
  @param port: Remote port for export (None for import)
3984
  @type instance: L{objects.Instance}
3985
  @param instance: Instance object
3986
  @type component: string
3987
  @param component: which part of the instance is transferred now,
3988
      e.g. 'disk/0'
3989
  @param ieio: Input/output type
3990
  @param ieioargs: Input/output arguments
3991

3992
  """
3993
  if mode == constants.IEM_IMPORT:
3994
    prefix = "import"
3995

    
3996
    if not (host is None and port is None):
3997
      _Fail("Can not specify host or port on import")
3998

    
3999
  elif mode == constants.IEM_EXPORT:
4000
    prefix = "export"
4001

    
4002
    if host is None or port is None:
4003
      _Fail("Host and port must be specified for an export")
4004

    
4005
  else:
4006
    _Fail("Invalid mode %r", mode)
4007

    
4008
  if (opts.key_name is None) ^ (opts.ca_pem is None):
4009
    _Fail("Cluster certificate can only be used for both key and CA")
4010

    
4011
  (cmd_env, cmd_prefix, cmd_suffix, exp_size) = \
4012
    _GetImportExportIoCommand(instance, mode, ieio, ieioargs)
4013

    
4014
  if opts.key_name is None:
4015
    # Use server.pem
4016
    key_path = pathutils.NODED_CERT_FILE
4017
    cert_path = pathutils.NODED_CERT_FILE
4018
    assert opts.ca_pem is None
4019
  else:
4020
    (_, key_path, cert_path) = _GetX509Filenames(pathutils.CRYPTO_KEYS_DIR,
4021
                                                 opts.key_name)
4022
    assert opts.ca_pem is not None
4023

    
4024
  for i in [key_path, cert_path]:
4025
    if not os.path.exists(i):
4026
      _Fail("File '%s' does not exist" % i)
4027

    
4028
  status_dir = _CreateImportExportStatusDir("%s-%s" % (prefix, component))
4029
  try:
4030
    status_file = utils.PathJoin(status_dir, _IES_STATUS_FILE)
4031
    pid_file = utils.PathJoin(status_dir, _IES_PID_FILE)
4032
    ca_file = utils.PathJoin(status_dir, _IES_CA_FILE)
4033

    
4034
    if opts.ca_pem is None:
4035
      # Use server.pem
4036
      ca = utils.ReadFile(pathutils.NODED_CERT_FILE)
4037
    else:
4038
      ca = opts.ca_pem
4039

    
4040
    # Write CA file
4041
    utils.WriteFile(ca_file, data=ca, mode=0400)
4042

    
4043
    cmd = [
4044
      pathutils.IMPORT_EXPORT_DAEMON,
4045
      status_file, mode,
4046
      "--key=%s" % key_path,
4047
      "--cert=%s" % cert_path,
4048
      "--ca=%s" % ca_file,
4049
      ]
4050

    
4051
    if host:
4052
      cmd.append("--host=%s" % host)
4053

    
4054
    if port:
4055
      cmd.append("--port=%s" % port)
4056

    
4057
    if opts.ipv6:
4058
      cmd.append("--ipv6")
4059
    else:
4060
      cmd.append("--ipv4")
4061

    
4062
    if opts.compress:
4063
      cmd.append("--compress=%s" % opts.compress)
4064

    
4065
    if opts.magic:
4066
      cmd.append("--magic=%s" % opts.magic)
4067

    
4068
    if exp_size is not None:
4069
      cmd.append("--expected-size=%s" % exp_size)
4070

    
4071
    if cmd_prefix:
4072
      cmd.append("--cmd-prefix=%s" % cmd_prefix)
4073

    
4074
    if cmd_suffix:
4075
      cmd.append("--cmd-suffix=%s" % cmd_suffix)
4076

    
4077
    if mode == constants.IEM_EXPORT:
4078
      # Retry connection a few times when connecting to remote peer
4079
      cmd.append("--connect-retries=%s" % constants.RIE_CONNECT_RETRIES)
4080
      cmd.append("--connect-timeout=%s" % constants.RIE_CONNECT_ATTEMPT_TIMEOUT)
4081
    elif opts.connect_timeout is not None:
4082
      assert mode == constants.IEM_IMPORT
4083
      # Overall timeout for establishing connection while listening
4084
      cmd.append("--connect-timeout=%s" % opts.connect_timeout)
4085

    
4086
    logfile = _InstanceLogName(prefix, instance.os, instance.name, component)
4087

    
4088
    # TODO: Once _InstanceLogName uses tempfile.mkstemp, StartDaemon has
4089
    # support for receiving a file descriptor for output
4090
    utils.StartDaemon(cmd, env=cmd_env, pidfile=pid_file,
4091
                      output=logfile)
4092

    
4093
    # The import/export name is simply the status directory name
4094
    return os.path.basename(status_dir)
4095

    
4096
  except Exception:
4097
    shutil.rmtree(status_dir, ignore_errors=True)
4098
    raise
4099

    
4100

    
4101
def GetImportExportStatus(names):
4102
  """Returns import/export daemon status.
4103

4104
  @type names: sequence
4105
  @param names: List of names
4106
  @rtype: List of dicts
4107
  @return: Returns a list of the state of each named import/export or None if a
4108
           status couldn't be read
4109

4110
  """
4111
  result = []
4112

    
4113
  for name in names:
4114
    status_file = utils.PathJoin(pathutils.IMPORT_EXPORT_DIR, name,
4115
                                 _IES_STATUS_FILE)
4116

    
4117
    try:
4118
      data = utils.ReadFile(status_file)
4119
    except EnvironmentError, err:
4120
      if err.errno != errno.ENOENT:
4121
        raise
4122
      data = None
4123

    
4124
    if not data:
4125
      result.append(None)
4126
      continue
4127

    
4128
    result.append(serializer.LoadJson(data))
4129

    
4130
  return result
4131

    
4132

    
4133
def AbortImportExport(name):
4134
  """Sends SIGTERM to a running import/export daemon.
4135

4136
  """
4137
  logging.info("Abort import/export %s", name)
4138

    
4139
  status_dir = utils.PathJoin(pathutils.IMPORT_EXPORT_DIR, name)
4140
  pid = utils.ReadLockedPidFile(utils.PathJoin(status_dir, _IES_PID_FILE))
4141

    
4142
  if pid:
4143
    logging.info("Import/export %s is running with PID %s, sending SIGTERM",
4144
                 name, pid)
4145
    utils.IgnoreProcessNotFound(os.kill, pid, signal.SIGTERM)
4146

    
4147

    
4148
def CleanupImportExport(name):
4149
  """Cleanup after an import or export.
4150

4151
  If the import/export daemon is still running it's killed. Afterwards the
4152
  whole status directory is removed.
4153

4154
  """
4155
  logging.info("Finalizing import/export %s", name)
4156

    
4157
  status_dir = utils.PathJoin(pathutils.IMPORT_EXPORT_DIR, name)
4158

    
4159
  pid = utils.ReadLockedPidFile(utils.PathJoin(status_dir, _IES_PID_FILE))
4160

    
4161
  if pid:
4162
    logging.info("Import/export %s is still running with PID %s",
4163
                 name, pid)
4164
    utils.KillProcess(pid, waitpid=False)
4165

    
4166
  shutil.rmtree(status_dir, ignore_errors=True)
4167

    
4168

    
4169
def _FindDisks(disks):
4170
  """Finds attached L{BlockDev}s for the given disks.
4171

4172
  @type disks: list of L{objects.Disk}
4173
  @param disks: the disk objects we need to find
4174

4175
  @return: list of L{BlockDev} objects or C{None} if a given disk
4176
           was not found or was no attached.
4177

4178
  """
4179
  bdevs = []
4180

    
4181
  for disk in disks:
4182
    rd = _RecursiveFindBD(disk)
4183
    if rd is None:
4184
      _Fail("Can't find device %s", disk)
4185
    bdevs.append(rd)
4186
  return bdevs
4187

    
4188

    
4189
def DrbdDisconnectNet(disks):
4190
  """Disconnects the network on a list of drbd devices.
4191

4192
  """
4193
  bdevs = _FindDisks(disks)
4194

    
4195
  # disconnect disks
4196
  for rd in bdevs:
4197
    try:
4198
      rd.DisconnectNet()
4199
    except errors.BlockDeviceError, err:
4200
      _Fail("Can't change network configuration to standalone mode: %s",
4201
            err, exc=True)
4202

    
4203

    
4204
def DrbdAttachNet(disks, instance_name, multimaster):
4205
  """Attaches the network on a list of drbd devices.
4206

4207
  """
4208
  bdevs = _FindDisks(disks)
4209

    
4210
  if multimaster:
4211
    for idx, rd in enumerate(bdevs):
4212
      try:
4213
        _SymlinkBlockDev(instance_name, rd.dev_path, idx)
4214
      except EnvironmentError, err:
4215
        _Fail("Can't create symlink: %s", err)
4216
  # reconnect disks, switch to new master configuration and if
4217
  # needed primary mode
4218
  for rd in bdevs:
4219
    try:
4220
      rd.AttachNet(multimaster)
4221
    except errors.BlockDeviceError, err:
4222
      _Fail("Can't change network configuration: %s", err)
4223

    
4224
  # wait until the disks are connected; we need to retry the re-attach
4225
  # if the device becomes standalone, as this might happen if the one
4226
  # node disconnects and reconnects in a different mode before the
4227
  # other node reconnects; in this case, one or both of the nodes will
4228
  # decide it has wrong configuration and switch to standalone
4229

    
4230
  def _Attach():
4231
    all_connected = True
4232

    
4233
    for rd in bdevs:
4234
      stats = rd.GetProcStatus()
4235

    
4236
      if multimaster:
4237
        # In the multimaster case we have to wait explicitly until
4238
        # the resource is Connected and UpToDate/UpToDate, because
4239
        # we promote *both nodes* to primary directly afterwards.
4240
        # Being in resync is not enough, since there is a race during which we
4241
        # may promote a node with an Outdated disk to primary, effectively
4242
        # tearing down the connection.
4243
        all_connected = (all_connected and
4244
                         stats.is_connected and
4245
                         stats.is_disk_uptodate and
4246
                         stats.peer_disk_uptodate)
4247
      else:
4248
        all_connected = (all_connected and
4249
                         (stats.is_connected or stats.is_in_resync))
4250

    
4251
      if stats.is_standalone:
4252
        # peer had different config info and this node became
4253
        # standalone, even though this should not happen with the
4254
        # new staged way of changing disk configs
4255
        try:
4256
          rd.AttachNet(multimaster)
4257
        except errors.BlockDeviceError, err:
4258
          _Fail("Can't change network configuration: %s", err)
4259

    
4260
    if not all_connected:
4261
      raise utils.RetryAgain()
4262

    
4263
  try:
4264
    # Start with a delay of 100 miliseconds and go up to 5 seconds
4265
    utils.Retry(_Attach, (0.1, 1.5, 5.0), 2 * 60)
4266
  except utils.RetryTimeout:
4267
    _Fail("Timeout in disk reconnecting")
4268

    
4269
  if multimaster:
4270
    # change to primary mode
4271
    for rd in bdevs:
4272
      try:
4273
        rd.Open()
4274
      except errors.BlockDeviceError, err:
4275
        _Fail("Can't change to primary mode: %s", err)
4276

    
4277

    
4278
def DrbdWaitSync(disks):
4279
  """Wait until DRBDs have synchronized.
4280

4281
  """
4282
  def _helper(rd):
4283
    stats = rd.GetProcStatus()
4284
    if not (stats.is_connected or stats.is_in_resync):
4285
      raise utils.RetryAgain()
4286
    return stats
4287

    
4288
  bdevs = _FindDisks(disks)
4289

    
4290
  min_resync = 100
4291
  alldone = True
4292
  for rd in bdevs:
4293
    try:
4294
      # poll each second for 15 seconds
4295
      stats = utils.Retry(_helper, 1, 15, args=[rd])
4296
    except utils.RetryTimeout:
4297
      stats = rd.GetProcStatus()
4298
      # last check
4299
      if not (stats.is_connected or stats.is_in_resync):
4300
        _Fail("DRBD device %s is not in sync: stats=%s", rd, stats)
4301
    alldone = alldone and (not stats.is_in_resync)
4302
    if stats.sync_percent is not None:
4303
      min_resync = min(min_resync, stats.sync_percent)
4304

    
4305
  return (alldone, min_resync)
4306

    
4307

    
4308
def DrbdNeedsActivation(disks):
4309
  """Checks which of the passed disks needs activation and returns their UUIDs.
4310

4311
  """
4312
  faulty_disks = []
4313

    
4314
  for disk in disks:
4315
    rd = _RecursiveFindBD(disk)
4316
    if rd is None:
4317
      faulty_disks.append(disk)
4318
      continue
4319

    
4320
    stats = rd.GetProcStatus()
4321
    if stats.is_standalone or stats.is_diskless:
4322
      faulty_disks.append(disk)
4323

    
4324
  return [disk.uuid for disk in faulty_disks]
4325

    
4326

    
4327
def GetDrbdUsermodeHelper():
4328
  """Returns DRBD usermode helper currently configured.
4329

4330
  """
4331
  try:
4332
    return drbd.DRBD8.GetUsermodeHelper()
4333
  except errors.BlockDeviceError, err:
4334
    _Fail(str(err))
4335

    
4336

    
4337
def PowercycleNode(hypervisor_type, hvparams=None):
4338
  """Hard-powercycle the node.
4339

4340
  Because we need to return first, and schedule the powercycle in the
4341
  background, we won't be able to report failures nicely.
4342

4343
  """
4344
  hyper = hypervisor.GetHypervisor(hypervisor_type)
4345
  try:
4346
    pid = os.fork()
4347
  except OSError:
4348
    # if we can't fork, we'll pretend that we're in the child process
4349
    pid = 0
4350
  if pid > 0:
4351
    return "Reboot scheduled in 5 seconds"
4352
  # ensure the child is running on ram
4353
  try:
4354
    utils.Mlockall()
4355
  except Exception: # pylint: disable=W0703
4356
    pass
4357
  time.sleep(5)
4358
  hyper.PowercycleNode(hvparams=hvparams)
4359

    
4360

    
4361
def _VerifyRestrictedCmdName(cmd):
4362
  """Verifies a restricted command name.
4363

4364
  @type cmd: string
4365
  @param cmd: Command name
4366
  @rtype: tuple; (boolean, string or None)
4367
  @return: The tuple's first element is the status; if C{False}, the second
4368
    element is an error message string, otherwise it's C{None}
4369

4370
  """
4371
  if not cmd.strip():
4372
    return (False, "Missing command name")
4373

    
4374
  if os.path.basename(cmd) != cmd:
4375
    return (False, "Invalid command name")
4376

    
4377
  if not constants.EXT_PLUGIN_MASK.match(cmd):
4378
    return (False, "Command name contains forbidden characters")
4379

    
4380
  return (True, None)
4381

    
4382

    
4383
def _CommonRestrictedCmdCheck(path, owner):
4384
  """Common checks for restricted command file system directories and files.
4385

4386
  @type path: string
4387
  @param path: Path to check
4388
  @param owner: C{None} or tuple containing UID and GID
4389
  @rtype: tuple; (boolean, string or C{os.stat} result)
4390
  @return: The tuple's first element is the status; if C{False}, the second
4391
    element is an error message string, otherwise it's the result of C{os.stat}
4392

4393
  """
4394
  if owner is None:
4395
    # Default to root as owner
4396
    owner = (0, 0)
4397

    
4398
  try:
4399
    st = os.stat(path)
4400
  except EnvironmentError, err:
4401
    return (False, "Can't stat(2) '%s': %s" % (path, err))
4402

    
4403
  if stat.S_IMODE(st.st_mode) & (~_RCMD_MAX_MODE):
4404
    return (False, "Permissions on '%s' are too permissive" % path)
4405

    
4406
  if (st.st_uid, st.st_gid) != owner:
4407
    (owner_uid, owner_gid) = owner
4408
    return (False, "'%s' is not owned by %s:%s" % (path, owner_uid, owner_gid))
4409

    
4410
  return (True, st)
4411

    
4412

    
4413
def _VerifyRestrictedCmdDirectory(path, _owner=None):
4414
  """Verifies restricted command directory.
4415

4416
  @type path: string
4417
  @param path: Path to check
4418
  @rtype: tuple; (boolean, string or None)
4419
  @return: The tuple's first element is the status; if C{False}, the second
4420
    element is an error message string, otherwise it's C{None}
4421

4422
  """
4423
  (status, value) = _CommonRestrictedCmdCheck(path, _owner)
4424

    
4425
  if not status:
4426
    return (False, value)
4427

    
4428
  if not stat.S_ISDIR(value.st_mode):
4429
    return (False, "Path '%s' is not a directory" % path)
4430

    
4431
  return (True, None)
4432

    
4433

    
4434
def _VerifyRestrictedCmd(path, cmd, _owner=None):
4435
  """Verifies a whole restricted command and returns its executable filename.
4436

4437
  @type path: string
4438
  @param path: Directory containing restricted commands
4439
  @type cmd: string
4440
  @param cmd: Command name
4441
  @rtype: tuple; (boolean, string)
4442
  @return: The tuple's first element is the status; if C{False}, the second
4443
    element is an error message string, otherwise the second element is the
4444
    absolute path to the executable
4445

4446
  """
4447
  executable = utils.PathJoin(path, cmd)
4448

    
4449
  (status, msg) = _CommonRestrictedCmdCheck(executable, _owner)
4450

    
4451
  if not status:
4452
    return (False, msg)
4453

    
4454
  if not utils.IsExecutable(executable):
4455
    return (False, "access(2) thinks '%s' can't be executed" % executable)
4456

    
4457
  return (True, executable)
4458

    
4459

    
4460
def _PrepareRestrictedCmd(path, cmd,
4461
                          _verify_dir=_VerifyRestrictedCmdDirectory,
4462
                          _verify_name=_VerifyRestrictedCmdName,
4463
                          _verify_cmd=_VerifyRestrictedCmd):
4464
  """Performs a number of tests on a restricted command.
4465

4466
  @type path: string
4467
  @param path: Directory containing restricted commands
4468
  @type cmd: string
4469
  @param cmd: Command name
4470
  @return: Same as L{_VerifyRestrictedCmd}
4471

4472
  """
4473
  # Verify the directory first
4474
  (status, msg) = _verify_dir(path)
4475
  if status:
4476
    # Check command if everything was alright
4477
    (status, msg) = _verify_name(cmd)
4478

    
4479
  if not status:
4480
    return (False, msg)
4481

    
4482
  # Check actual executable
4483
  return _verify_cmd(path, cmd)
4484

    
4485

    
4486
def RunRestrictedCmd(cmd,
4487
                     _lock_timeout=_RCMD_LOCK_TIMEOUT,
4488
                     _lock_file=pathutils.RESTRICTED_COMMANDS_LOCK_FILE,
4489
                     _path=pathutils.RESTRICTED_COMMANDS_DIR,
4490
                     _sleep_fn=time.sleep,
4491
                     _prepare_fn=_PrepareRestrictedCmd,
4492
                     _runcmd_fn=utils.RunCmd,
4493
                     _enabled=constants.ENABLE_RESTRICTED_COMMANDS):
4494
  """Executes a restricted command after performing strict tests.
4495

4496
  @type cmd: string
4497
  @param cmd: Command name
4498
  @rtype: string
4499
  @return: Command output
4500
  @raise RPCFail: In case of an error
4501

4502
  """
4503
  logging.info("Preparing to run restricted command '%s'", cmd)
4504

    
4505
  if not _enabled:
4506
    _Fail("Restricted commands disabled at configure time")
4507

    
4508
  lock = None
4509
  try:
4510
    cmdresult = None
4511
    try:
4512
      lock = utils.FileLock.Open(_lock_file)
4513
      lock.Exclusive(blocking=True, timeout=_lock_timeout)
4514

    
4515
      (status, value) = _prepare_fn(_path, cmd)
4516

    
4517
      if status:
4518
        cmdresult = _runcmd_fn([value], env={}, reset_env=True,
4519
                               postfork_fn=lambda _: lock.Unlock())
4520
      else:
4521
        logging.error(value)
4522
    except Exception: # pylint: disable=W0703
4523
      # Keep original error in log
4524
      logging.exception("Caught exception")
4525

    
4526
    if cmdresult is None:
4527
      logging.info("Sleeping for %0.1f seconds before returning",
4528
                   _RCMD_INVALID_DELAY)
4529
      _sleep_fn(_RCMD_INVALID_DELAY)
4530

    
4531
      # Do not include original error message in returned error
4532
      _Fail("Executing command '%s' failed" % cmd)
4533
    elif cmdresult.failed or cmdresult.fail_reason:
4534
      _Fail("Restricted command '%s' failed: %s; output: %s",
4535
            cmd, cmdresult.fail_reason, cmdresult.output)
4536
    else:
4537
      return cmdresult.output
4538
  finally:
4539
    if lock is not None:
4540
      # Release lock at last
4541
      lock.Close()
4542
      lock = None
4543

    
4544

    
4545
def SetWatcherPause(until, _filename=pathutils.WATCHER_PAUSEFILE):
4546
  """Creates or removes the watcher pause file.
4547

4548
  @type until: None or number
4549
  @param until: Unix timestamp saying until when the watcher shouldn't run
4550

4551
  """
4552
  if until is None:
4553
    logging.info("Received request to no longer pause watcher")
4554
    utils.RemoveFile(_filename)
4555
  else:
4556
    logging.info("Received request to pause watcher until %s", until)
4557

    
4558
    if not ht.TNumber(until):
4559
      _Fail("Duration must be numeric")
4560

    
4561
    utils.WriteFile(_filename, data="%d\n" % (until, ), mode=0644)
4562

    
4563

    
4564
def ConfigureOVS(ovs_name, ovs_link):
4565
  """Creates a OpenvSwitch on the node.
4566

4567
  This function sets up a OpenvSwitch on the node with given name nad
4568
  connects it via a given eth device.
4569

4570
  @type ovs_name: string
4571
  @param ovs_name: Name of the OpenvSwitch to create.
4572
  @type ovs_link: None or string
4573
  @param ovs_link: Ethernet device for outside connection (can be missing)
4574

4575
  """
4576
  # Initialize the OpenvSwitch
4577
  result = utils.RunCmd(["ovs-vsctl", "add-br", ovs_name])
4578
  if result.failed:
4579
    _Fail("Failed to create openvswitch. Script return value: %s, output: '%s'"
4580
          % (result.exit_code, result.output), log=True)
4581

    
4582
  # And connect it to a physical interface, if given
4583
  if ovs_link:
4584
    result = utils.RunCmd(["ovs-vsctl", "add-port", ovs_name, ovs_link])
4585
    if result.failed:
4586
      _Fail("Failed to connect openvswitch to  interface %s. Script return"
4587
            " value: %s, output: '%s'" % (ovs_link, result.exit_code,
4588
            result.output), log=True)
4589

    
4590

    
4591
class HooksRunner(object):
4592
  """Hook runner.
4593

4594
  This class is instantiated on the node side (ganeti-noded) and not
4595
  on the master side.
4596

4597
  """
4598
  def __init__(self, hooks_base_dir=None):
4599
    """Constructor for hooks runner.
4600

4601
    @type hooks_base_dir: str or None
4602
    @param hooks_base_dir: if not None, this overrides the
4603
        L{pathutils.HOOKS_BASE_DIR} (useful for unittests)
4604

4605
    """
4606
    if hooks_base_dir is None:
4607
      hooks_base_dir = pathutils.HOOKS_BASE_DIR
4608
    # yeah, _BASE_DIR is not valid for attributes, we use it like a
4609
    # constant
4610
    self._BASE_DIR = hooks_base_dir # pylint: disable=C0103
4611

    
4612
  def RunLocalHooks(self, node_list, hpath, phase, env):
4613
    """Check that the hooks will be run only locally and then run them.
4614

4615
    """
4616
    assert len(node_list) == 1
4617
    node = node_list[0]
4618
    _, myself = ssconf.GetMasterAndMyself()
4619
    assert node == myself
4620

    
4621
    results = self.RunHooks(hpath, phase, env)
4622

    
4623
    # Return values in the form expected by HooksMaster
4624
    return {node: (None, False, results)}
4625

    
4626
  def RunHooks(self, hpath, phase, env):
4627
    """Run the scripts in the hooks directory.
4628

4629
    @type hpath: str
4630
    @param hpath: the path to the hooks directory which
4631
        holds the scripts
4632
    @type phase: str
4633
    @param phase: either L{constants.HOOKS_PHASE_PRE} or
4634
        L{constants.HOOKS_PHASE_POST}
4635
    @type env: dict
4636
    @param env: dictionary with the environment for the hook
4637
    @rtype: list
4638
    @return: list of 3-element tuples:
4639
      - script path
4640
      - script result, either L{constants.HKR_SUCCESS} or
4641
        L{constants.HKR_FAIL}
4642
      - output of the script
4643

4644
    @raise errors.ProgrammerError: for invalid input
4645
        parameters
4646

4647
    """
4648
    if phase == constants.HOOKS_PHASE_PRE:
4649
      suffix = "pre"
4650
    elif phase == constants.HOOKS_PHASE_POST:
4651
      suffix = "post"
4652
    else:
4653
      _Fail("Unknown hooks phase '%s'", phase)
4654

    
4655
    subdir = "%s-%s.d" % (hpath, suffix)
4656
    dir_name = utils.PathJoin(self._BASE_DIR, subdir)
4657

    
4658
    results = []
4659

    
4660
    if not os.path.isdir(dir_name):
4661
      # for non-existing/non-dirs, we simply exit instead of logging a
4662
      # warning at every operation
4663
      return results
4664

    
4665
    runparts_results = utils.RunParts(dir_name, env=env, reset_env=True)
4666

    
4667
    for (relname, relstatus, runresult) in runparts_results:
4668
      if relstatus == constants.RUNPARTS_SKIP:
4669
        rrval = constants.HKR_SKIP
4670
        output = ""
4671
      elif relstatus == constants.RUNPARTS_ERR:
4672
        rrval = constants.HKR_FAIL
4673
        output = "Hook script execution error: %s" % runresult
4674
      elif relstatus == constants.RUNPARTS_RUN:
4675
        if runresult.failed:
4676
          rrval = constants.HKR_FAIL
4677
        else:
4678
          rrval = constants.HKR_SUCCESS
4679
        output = utils.SafeEncode(runresult.output.strip())
4680
      results.append(("%s/%s" % (subdir, relname), rrval, output))
4681

    
4682
    return results
4683

    
4684

    
4685
class IAllocatorRunner(object):
4686
  """IAllocator runner.
4687

4688
  This class is instantiated on the node side (ganeti-noded) and not on
4689
  the master side.
4690

4691
  """
4692
  @staticmethod
4693
  def Run(name, idata, ial_params):
4694
    """Run an iallocator script.
4695

4696
    @type name: str
4697
    @param name: the iallocator script name
4698
    @type idata: str
4699
    @param idata: the allocator input data
4700
    @type ial_params: list
4701
    @param ial_params: the iallocator parameters
4702

4703
    @rtype: tuple
4704
    @return: two element tuple of:
4705
       - status
4706
       - either error message or stdout of allocator (for success)
4707

4708
    """
4709
    alloc_script = utils.FindFile(name, constants.IALLOCATOR_SEARCH_PATH,
4710
                                  os.path.isfile)
4711
    if alloc_script is None:
4712
      _Fail("iallocator module '%s' not found in the search path", name)
4713

    
4714
    fd, fin_name = tempfile.mkstemp(prefix="ganeti-iallocator.")
4715
    try:
4716
      os.write(fd, idata)
4717
      os.close(fd)
4718
      result = utils.RunCmd([alloc_script, fin_name] + ial_params)
4719
      if result.failed:
4720
        _Fail("iallocator module '%s' failed: %s, output '%s'",
4721
              name, result.fail_reason, result.output)
4722
    finally:
4723
      os.unlink(fin_name)
4724

    
4725
    return result.stdout
4726

    
4727

    
4728
class DevCacheManager(object):
4729
  """Simple class for managing a cache of block device information.
4730

4731
  """
4732
  _DEV_PREFIX = "/dev/"
4733
  _ROOT_DIR = pathutils.BDEV_CACHE_DIR
4734

    
4735
  @classmethod
4736
  def _ConvertPath(cls, dev_path):
4737
    """Converts a /dev/name path to the cache file name.
4738

4739
    This replaces slashes with underscores and strips the /dev
4740
    prefix. It then returns the full path to the cache file.
4741

4742
    @type dev_path: str
4743
    @param dev_path: the C{/dev/} path name
4744
    @rtype: str
4745
    @return: the converted path name
4746

4747
    """
4748
    if dev_path.startswith(cls._DEV_PREFIX):
4749
      dev_path = dev_path[len(cls._DEV_PREFIX):]
4750
    dev_path = dev_path.replace("/", "_")
4751
    fpath = utils.PathJoin(cls._ROOT_DIR, "bdev_%s" % dev_path)
4752
    return fpath
4753

    
4754
  @classmethod
4755
  def UpdateCache(cls, dev_path, owner, on_primary, iv_name):
4756
    """Updates the cache information for a given device.
4757

4758
    @type dev_path: str
4759
    @param dev_path: the pathname of the device
4760
    @type owner: str
4761
    @param owner: the owner (instance name) of the device
4762
    @type on_primary: bool
4763
    @param on_primary: whether this is the primary
4764
        node nor not
4765
    @type iv_name: str
4766
    @param iv_name: the instance-visible name of the
4767
        device, as in objects.Disk.iv_name
4768

4769
    @rtype: None
4770

4771
    """
4772
    if dev_path is None:
4773
      logging.error("DevCacheManager.UpdateCache got a None dev_path")
4774
      return
4775
    fpath = cls._ConvertPath(dev_path)
4776
    if on_primary:
4777
      state = "primary"
4778
    else:
4779
      state = "secondary"
4780
    if iv_name is None:
4781
      iv_name = "not_visible"
4782
    fdata = "%s %s %s\n" % (str(owner), state, iv_name)
4783
    try:
4784
      utils.WriteFile(fpath, data=fdata)
4785
    except EnvironmentError, err:
4786
      logging.exception("Can't update bdev cache for %s: %s", dev_path, err)
4787

    
4788
  @classmethod
4789
  def RemoveCache(cls, dev_path):
4790
    """Remove data for a dev_path.
4791

4792
    This is just a wrapper over L{utils.io.RemoveFile} with a converted
4793
    path name and logging.
4794

4795
    @type dev_path: str
4796
    @param dev_path: the pathname of the device
4797

4798
    @rtype: None
4799

4800
    """
4801
    if dev_path is None:
4802
      logging.error("DevCacheManager.RemoveCache got a None dev_path")
4803
      return
4804
    fpath = cls._ConvertPath(dev_path)
4805
    try:
4806
      utils.RemoveFile(fpath)
4807
    except EnvironmentError, err:
4808
      logging.exception("Can't update bdev cache for %s: %s", dev_path, err)