Statistics
| Branch: | Tag: | Revision:

root / lib / backend.py @ 94ab995a

History | View | Annotate | Download (138 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions used by the node daemon
23

24
@var _ALLOWED_UPLOAD_FILES: denotes which files are accepted in
25
     the L{UploadFile} function
26
@var _ALLOWED_CLEAN_DIRS: denotes which directories are accepted
27
     in the L{_CleanDirectory} function
28

29
"""
30

    
31
# pylint: disable=E1103,C0302
32

    
33
# E1103: %s %r has no %r member (but some types could not be
34
# inferred), because the _TryOSFromDisk returns either (True, os_obj)
35
# or (False, "string") which confuses pylint
36

    
37
# C0302: This module has become too big and should be split up
38

    
39

    
40
import os
41
import os.path
42
import shutil
43
import time
44
import stat
45
import errno
46
import re
47
import random
48
import logging
49
import tempfile
50
import zlib
51
import base64
52
import signal
53

    
54
from ganeti import errors
55
from ganeti import utils
56
from ganeti import ssh
57
from ganeti import hypervisor
58
from ganeti import constants
59
from ganeti.storage import bdev
60
from ganeti.storage import drbd
61
from ganeti.storage import filestorage
62
from ganeti import objects
63
from ganeti import ssconf
64
from ganeti import serializer
65
from ganeti import netutils
66
from ganeti import runtime
67
from ganeti import compat
68
from ganeti import pathutils
69
from ganeti import vcluster
70
from ganeti import ht
71
from ganeti.storage.base import BlockDev
72
from ganeti.storage.drbd import DRBD8
73
from ganeti import hooksmaster
74

    
75

    
76
_BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
77
_ALLOWED_CLEAN_DIRS = compat.UniqueFrozenset([
78
  pathutils.DATA_DIR,
79
  pathutils.JOB_QUEUE_ARCHIVE_DIR,
80
  pathutils.QUEUE_DIR,
81
  pathutils.CRYPTO_KEYS_DIR,
82
  ])
83
_MAX_SSL_CERT_VALIDITY = 7 * 24 * 60 * 60
84
_X509_KEY_FILE = "key"
85
_X509_CERT_FILE = "cert"
86
_IES_STATUS_FILE = "status"
87
_IES_PID_FILE = "pid"
88
_IES_CA_FILE = "ca"
89

    
90
#: Valid LVS output line regex
91
_LVSLINE_REGEX = re.compile(r"^ *([^|]+)\|([^|]+)\|([0-9.]+)\|([^|]{6,})\|?$")
92

    
93
# Actions for the master setup script
94
_MASTER_START = "start"
95
_MASTER_STOP = "stop"
96

    
97
#: Maximum file permissions for restricted command directory and executables
98
_RCMD_MAX_MODE = (stat.S_IRWXU |
99
                  stat.S_IRGRP | stat.S_IXGRP |
100
                  stat.S_IROTH | stat.S_IXOTH)
101

    
102
#: Delay before returning an error for restricted commands
103
_RCMD_INVALID_DELAY = 10
104

    
105
#: How long to wait to acquire lock for restricted commands (shorter than
106
#: L{_RCMD_INVALID_DELAY}) to reduce blockage of noded forks when many
107
#: command requests arrive
108
_RCMD_LOCK_TIMEOUT = _RCMD_INVALID_DELAY * 0.8
109

    
110

    
111
class RPCFail(Exception):
112
  """Class denoting RPC failure.
113

114
  Its argument is the error message.
115

116
  """
117

    
118

    
119
def _GetInstReasonFilename(instance_name):
120
  """Path of the file containing the reason of the instance status change.
121

122
  @type instance_name: string
123
  @param instance_name: The name of the instance
124
  @rtype: string
125
  @return: The path of the file
126

127
  """
128
  return utils.PathJoin(pathutils.INSTANCE_REASON_DIR, instance_name)
129

    
130

    
131
def _StoreInstReasonTrail(instance_name, trail):
132
  """Serialize a reason trail related to an instance change of state to file.
133

134
  The exact location of the file depends on the name of the instance and on
135
  the configuration of the Ganeti cluster defined at deploy time.
136

137
  @type instance_name: string
138
  @param instance_name: The name of the instance
139
  @rtype: None
140

141
  """
142
  json = serializer.DumpJson(trail)
143
  filename = _GetInstReasonFilename(instance_name)
144
  utils.WriteFile(filename, data=json)
145

    
146

    
147
def _Fail(msg, *args, **kwargs):
148
  """Log an error and the raise an RPCFail exception.
149

150
  This exception is then handled specially in the ganeti daemon and
151
  turned into a 'failed' return type. As such, this function is a
152
  useful shortcut for logging the error and returning it to the master
153
  daemon.
154

155
  @type msg: string
156
  @param msg: the text of the exception
157
  @raise RPCFail
158

159
  """
160
  if args:
161
    msg = msg % args
162
  if "log" not in kwargs or kwargs["log"]: # if we should log this error
163
    if "exc" in kwargs and kwargs["exc"]:
164
      logging.exception(msg)
165
    else:
166
      logging.error(msg)
167
  raise RPCFail(msg)
168

    
169

    
170
def _GetConfig():
171
  """Simple wrapper to return a SimpleStore.
172

173
  @rtype: L{ssconf.SimpleStore}
174
  @return: a SimpleStore instance
175

176
  """
177
  return ssconf.SimpleStore()
178

    
179

    
180
def _GetSshRunner(cluster_name):
181
  """Simple wrapper to return an SshRunner.
182

183
  @type cluster_name: str
184
  @param cluster_name: the cluster name, which is needed
185
      by the SshRunner constructor
186
  @rtype: L{ssh.SshRunner}
187
  @return: an SshRunner instance
188

189
  """
190
  return ssh.SshRunner(cluster_name)
191

    
192

    
193
def _Decompress(data):
194
  """Unpacks data compressed by the RPC client.
195

196
  @type data: list or tuple
197
  @param data: Data sent by RPC client
198
  @rtype: str
199
  @return: Decompressed data
200

201
  """
202
  assert isinstance(data, (list, tuple))
203
  assert len(data) == 2
204
  (encoding, content) = data
205
  if encoding == constants.RPC_ENCODING_NONE:
206
    return content
207
  elif encoding == constants.RPC_ENCODING_ZLIB_BASE64:
208
    return zlib.decompress(base64.b64decode(content))
209
  else:
210
    raise AssertionError("Unknown data encoding")
211

    
212

    
213
def _CleanDirectory(path, exclude=None):
214
  """Removes all regular files in a directory.
215

216
  @type path: str
217
  @param path: the directory to clean
218
  @type exclude: list
219
  @param exclude: list of files to be excluded, defaults
220
      to the empty list
221

222
  """
223
  if path not in _ALLOWED_CLEAN_DIRS:
224
    _Fail("Path passed to _CleanDirectory not in allowed clean targets: '%s'",
225
          path)
226

    
227
  if not os.path.isdir(path):
228
    return
229
  if exclude is None:
230
    exclude = []
231
  else:
232
    # Normalize excluded paths
233
    exclude = [os.path.normpath(i) for i in exclude]
234

    
235
  for rel_name in utils.ListVisibleFiles(path):
236
    full_name = utils.PathJoin(path, rel_name)
237
    if full_name in exclude:
238
      continue
239
    if os.path.isfile(full_name) and not os.path.islink(full_name):
240
      utils.RemoveFile(full_name)
241

    
242

    
243
def _BuildUploadFileList():
244
  """Build the list of allowed upload files.
245

246
  This is abstracted so that it's built only once at module import time.
247

248
  """
249
  allowed_files = set([
250
    pathutils.CLUSTER_CONF_FILE,
251
    pathutils.ETC_HOSTS,
252
    pathutils.SSH_KNOWN_HOSTS_FILE,
253
    pathutils.VNC_PASSWORD_FILE,
254
    pathutils.RAPI_CERT_FILE,
255
    pathutils.SPICE_CERT_FILE,
256
    pathutils.SPICE_CACERT_FILE,
257
    pathutils.RAPI_USERS_FILE,
258
    pathutils.CONFD_HMAC_KEY,
259
    pathutils.CLUSTER_DOMAIN_SECRET_FILE,
260
    ])
261

    
262
  for hv_name in constants.HYPER_TYPES:
263
    hv_class = hypervisor.GetHypervisorClass(hv_name)
264
    allowed_files.update(hv_class.GetAncillaryFiles()[0])
265

    
266
  assert pathutils.FILE_STORAGE_PATHS_FILE not in allowed_files, \
267
    "Allowed file storage paths should never be uploaded via RPC"
268

    
269
  return frozenset(allowed_files)
270

    
271

    
272
_ALLOWED_UPLOAD_FILES = _BuildUploadFileList()
273

    
274

    
275
def JobQueuePurge():
276
  """Removes job queue files and archived jobs.
277

278
  @rtype: tuple
279
  @return: True, None
280

281
  """
282
  _CleanDirectory(pathutils.QUEUE_DIR, exclude=[pathutils.JOB_QUEUE_LOCK_FILE])
283
  _CleanDirectory(pathutils.JOB_QUEUE_ARCHIVE_DIR)
284

    
285

    
286
def GetMasterInfo():
287
  """Returns master information.
288

289
  This is an utility function to compute master information, either
290
  for consumption here or from the node daemon.
291

292
  @rtype: tuple
293
  @return: master_netdev, master_ip, master_name, primary_ip_family,
294
    master_netmask
295
  @raise RPCFail: in case of errors
296

297
  """
298
  try:
299
    cfg = _GetConfig()
300
    master_netdev = cfg.GetMasterNetdev()
301
    master_ip = cfg.GetMasterIP()
302
    master_netmask = cfg.GetMasterNetmask()
303
    master_node = cfg.GetMasterNode()
304
    primary_ip_family = cfg.GetPrimaryIPFamily()
305
  except errors.ConfigurationError, err:
306
    _Fail("Cluster configuration incomplete: %s", err, exc=True)
307
  return (master_netdev, master_ip, master_node, primary_ip_family,
308
          master_netmask)
309

    
310

    
311
def RunLocalHooks(hook_opcode, hooks_path, env_builder_fn):
312
  """Decorator that runs hooks before and after the decorated function.
313

314
  @type hook_opcode: string
315
  @param hook_opcode: opcode of the hook
316
  @type hooks_path: string
317
  @param hooks_path: path of the hooks
318
  @type env_builder_fn: function
319
  @param env_builder_fn: function that returns a dictionary containing the
320
    environment variables for the hooks. Will get all the parameters of the
321
    decorated function.
322
  @raise RPCFail: in case of pre-hook failure
323

324
  """
325
  def decorator(fn):
326
    def wrapper(*args, **kwargs):
327
      _, myself = ssconf.GetMasterAndMyself()
328
      nodes = ([myself], [myself])  # these hooks run locally
329

    
330
      env_fn = compat.partial(env_builder_fn, *args, **kwargs)
331

    
332
      cfg = _GetConfig()
333
      hr = HooksRunner()
334
      hm = hooksmaster.HooksMaster(hook_opcode, hooks_path, nodes,
335
                                   hr.RunLocalHooks, None, env_fn,
336
                                   logging.warning, cfg.GetClusterName(),
337
                                   cfg.GetMasterNode())
338
      hm.RunPhase(constants.HOOKS_PHASE_PRE)
339
      result = fn(*args, **kwargs)
340
      hm.RunPhase(constants.HOOKS_PHASE_POST)
341

    
342
      return result
343
    return wrapper
344
  return decorator
345

    
346

    
347
def _BuildMasterIpEnv(master_params, use_external_mip_script=None):
348
  """Builds environment variables for master IP hooks.
349

350
  @type master_params: L{objects.MasterNetworkParameters}
351
  @param master_params: network parameters of the master
352
  @type use_external_mip_script: boolean
353
  @param use_external_mip_script: whether to use an external master IP
354
    address setup script (unused, but necessary per the implementation of the
355
    _RunLocalHooks decorator)
356

357
  """
358
  # pylint: disable=W0613
359
  ver = netutils.IPAddress.GetVersionFromAddressFamily(master_params.ip_family)
360
  env = {
361
    "MASTER_NETDEV": master_params.netdev,
362
    "MASTER_IP": master_params.ip,
363
    "MASTER_NETMASK": str(master_params.netmask),
364
    "CLUSTER_IP_VERSION": str(ver),
365
  }
366

    
367
  return env
368

    
369

    
370
def _RunMasterSetupScript(master_params, action, use_external_mip_script):
371
  """Execute the master IP address setup script.
372

373
  @type master_params: L{objects.MasterNetworkParameters}
374
  @param master_params: network parameters of the master
375
  @type action: string
376
  @param action: action to pass to the script. Must be one of
377
    L{backend._MASTER_START} or L{backend._MASTER_STOP}
378
  @type use_external_mip_script: boolean
379
  @param use_external_mip_script: whether to use an external master IP
380
    address setup script
381
  @raise backend.RPCFail: if there are errors during the execution of the
382
    script
383

384
  """
385
  env = _BuildMasterIpEnv(master_params)
386

    
387
  if use_external_mip_script:
388
    setup_script = pathutils.EXTERNAL_MASTER_SETUP_SCRIPT
389
  else:
390
    setup_script = pathutils.DEFAULT_MASTER_SETUP_SCRIPT
391

    
392
  result = utils.RunCmd([setup_script, action], env=env, reset_env=True)
393

    
394
  if result.failed:
395
    _Fail("Failed to %s the master IP. Script return value: %s, output: '%s'" %
396
          (action, result.exit_code, result.output), log=True)
397

    
398

    
399
@RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup",
400
               _BuildMasterIpEnv)
401
def ActivateMasterIp(master_params, use_external_mip_script):
402
  """Activate the IP address of the master daemon.
403

404
  @type master_params: L{objects.MasterNetworkParameters}
405
  @param master_params: network parameters of the master
406
  @type use_external_mip_script: boolean
407
  @param use_external_mip_script: whether to use an external master IP
408
    address setup script
409
  @raise RPCFail: in case of errors during the IP startup
410

411
  """
412
  _RunMasterSetupScript(master_params, _MASTER_START,
413
                        use_external_mip_script)
414

    
415

    
416
def StartMasterDaemons(no_voting):
417
  """Activate local node as master node.
418

419
  The function will start the master daemons (ganeti-masterd and ganeti-rapi).
420

421
  @type no_voting: boolean
422
  @param no_voting: whether to start ganeti-masterd without a node vote
423
      but still non-interactively
424
  @rtype: None
425

426
  """
427

    
428
  if no_voting:
429
    masterd_args = "--no-voting --yes-do-it"
430
  else:
431
    masterd_args = ""
432

    
433
  env = {
434
    "EXTRA_MASTERD_ARGS": masterd_args,
435
    }
436

    
437
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-master"], env=env)
438
  if result.failed:
439
    msg = "Can't start Ganeti master: %s" % result.output
440
    logging.error(msg)
441
    _Fail(msg)
442

    
443

    
444
@RunLocalHooks(constants.FAKE_OP_MASTER_TURNDOWN, "master-ip-turndown",
445
               _BuildMasterIpEnv)
446
def DeactivateMasterIp(master_params, use_external_mip_script):
447
  """Deactivate the master IP on this node.
448

449
  @type master_params: L{objects.MasterNetworkParameters}
450
  @param master_params: network parameters of the master
451
  @type use_external_mip_script: boolean
452
  @param use_external_mip_script: whether to use an external master IP
453
    address setup script
454
  @raise RPCFail: in case of errors during the IP turndown
455

456
  """
457
  _RunMasterSetupScript(master_params, _MASTER_STOP,
458
                        use_external_mip_script)
459

    
460

    
461
def StopMasterDaemons():
462
  """Stop the master daemons on this node.
463

464
  Stop the master daemons (ganeti-masterd and ganeti-rapi) on this node.
465

466
  @rtype: None
467

468
  """
469
  # TODO: log and report back to the caller the error failures; we
470
  # need to decide in which case we fail the RPC for this
471

    
472
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-master"])
473
  if result.failed:
474
    logging.error("Could not stop Ganeti master, command %s had exitcode %s"
475
                  " and error %s",
476
                  result.cmd, result.exit_code, result.output)
477

    
478

    
479
def ChangeMasterNetmask(old_netmask, netmask, master_ip, master_netdev):
480
  """Change the netmask of the master IP.
481

482
  @param old_netmask: the old value of the netmask
483
  @param netmask: the new value of the netmask
484
  @param master_ip: the master IP
485
  @param master_netdev: the master network device
486

487
  """
488
  if old_netmask == netmask:
489
    return
490

    
491
  if not netutils.IPAddress.Own(master_ip):
492
    _Fail("The master IP address is not up, not attempting to change its"
493
          " netmask")
494

    
495
  result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "add",
496
                         "%s/%s" % (master_ip, netmask),
497
                         "dev", master_netdev, "label",
498
                         "%s:0" % master_netdev])
499
  if result.failed:
500
    _Fail("Could not set the new netmask on the master IP address")
501

    
502
  result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "del",
503
                         "%s/%s" % (master_ip, old_netmask),
504
                         "dev", master_netdev, "label",
505
                         "%s:0" % master_netdev])
506
  if result.failed:
507
    _Fail("Could not bring down the master IP address with the old netmask")
508

    
509

    
510
def EtcHostsModify(mode, host, ip):
511
  """Modify a host entry in /etc/hosts.
512

513
  @param mode: The mode to operate. Either add or remove entry
514
  @param host: The host to operate on
515
  @param ip: The ip associated with the entry
516

517
  """
518
  if mode == constants.ETC_HOSTS_ADD:
519
    if not ip:
520
      RPCFail("Mode 'add' needs 'ip' parameter, but parameter not"
521
              " present")
522
    utils.AddHostToEtcHosts(host, ip)
523
  elif mode == constants.ETC_HOSTS_REMOVE:
524
    if ip:
525
      RPCFail("Mode 'remove' does not allow 'ip' parameter, but"
526
              " parameter is present")
527
    utils.RemoveHostFromEtcHosts(host)
528
  else:
529
    RPCFail("Mode not supported")
530

    
531

    
532
def LeaveCluster(modify_ssh_setup):
533
  """Cleans up and remove the current node.
534

535
  This function cleans up and prepares the current node to be removed
536
  from the cluster.
537

538
  If processing is successful, then it raises an
539
  L{errors.QuitGanetiException} which is used as a special case to
540
  shutdown the node daemon.
541

542
  @param modify_ssh_setup: boolean
543

544
  """
545
  _CleanDirectory(pathutils.DATA_DIR)
546
  _CleanDirectory(pathutils.CRYPTO_KEYS_DIR)
547
  JobQueuePurge()
548

    
549
  if modify_ssh_setup:
550
    try:
551
      priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)
552

    
553
      utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
554

    
555
      utils.RemoveFile(priv_key)
556
      utils.RemoveFile(pub_key)
557
    except errors.OpExecError:
558
      logging.exception("Error while processing ssh files")
559

    
560
  try:
561
    utils.RemoveFile(pathutils.CONFD_HMAC_KEY)
562
    utils.RemoveFile(pathutils.RAPI_CERT_FILE)
563
    utils.RemoveFile(pathutils.SPICE_CERT_FILE)
564
    utils.RemoveFile(pathutils.SPICE_CACERT_FILE)
565
    utils.RemoveFile(pathutils.NODED_CERT_FILE)
566
  except: # pylint: disable=W0702
567
    logging.exception("Error while removing cluster secrets")
568

    
569
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop", constants.CONFD])
570
  if result.failed:
571
    logging.error("Command %s failed with exitcode %s and error %s",
572
                  result.cmd, result.exit_code, result.output)
573

    
574
  # Raise a custom exception (handled in ganeti-noded)
575
  raise errors.QuitGanetiException(True, "Shutdown scheduled")
576

    
577

    
578
def _CheckStorageParams(params, num_params):
579
  """Performs sanity checks for storage parameters.
580

581
  @type params: list
582
  @param params: list of storage parameters
583
  @type num_params: int
584
  @param num_params: expected number of parameters
585

586
  """
587
  if params is None:
588
    raise errors.ProgrammerError("No storage parameters for storage"
589
                                 " reporting is provided.")
590
  if not isinstance(params, list):
591
    raise errors.ProgrammerError("The storage parameters are not of type"
592
                                 " list: '%s'" % params)
593
  if not len(params) == num_params:
594
    raise errors.ProgrammerError("Did not receive the expected number of"
595
                                 "storage parameters: expected %s,"
596
                                 " received '%s'" % (num_params, len(params)))
597

    
598

    
599
def _CheckLvmStorageParams(params):
600
  """Performs sanity check for the 'exclusive storage' flag.
601

602
  @see: C{_CheckStorageParams}
603

604
  """
605
  _CheckStorageParams(params, 1)
606
  excl_stor = params[0]
607
  if not isinstance(params[0], bool):
608
    raise errors.ProgrammerError("Exclusive storage parameter is not"
609
                                 " boolean: '%s'." % excl_stor)
610
  return excl_stor
611

    
612

    
613
def _GetLvmVgSpaceInfo(name, params):
614
  """Wrapper around C{_GetVgInfo} which checks the storage parameters.
615

616
  @type name: string
617
  @param name: name of the volume group
618
  @type params: list
619
  @param params: list of storage parameters, which in this case should be
620
    containing only one for exclusive storage
621

622
  """
623
  excl_stor = _CheckLvmStorageParams(params)
624
  return _GetVgInfo(name, excl_stor)
625

    
626

    
627
def _GetVgInfo(
628
    name, excl_stor, info_fn=bdev.LogicalVolume.GetVGInfo):
629
  """Retrieves information about a LVM volume group.
630

631
  """
632
  # TODO: GetVGInfo supports returning information for multiple VGs at once
633
  vginfo = info_fn([name], excl_stor)
634
  if vginfo:
635
    vg_free = int(round(vginfo[0][0], 0))
636
    vg_size = int(round(vginfo[0][1], 0))
637
  else:
638
    vg_free = None
639
    vg_size = None
640

    
641
  return {
642
    "type": constants.ST_LVM_VG,
643
    "name": name,
644
    "storage_free": vg_free,
645
    "storage_size": vg_size,
646
    }
647

    
648

    
649
def _GetLvmPvSpaceInfo(name, params):
650
  """Wrapper around C{_GetVgSpindlesInfo} with sanity checks.
651

652
  @see: C{_GetLvmVgSpaceInfo}
653

654
  """
655
  excl_stor = _CheckLvmStorageParams(params)
656
  return _GetVgSpindlesInfo(name, excl_stor)
657

    
658

    
659
def _GetVgSpindlesInfo(
660
    name, excl_stor, info_fn=bdev.LogicalVolume.GetVgSpindlesInfo):
661
  """Retrieves information about spindles in an LVM volume group.
662

663
  @type name: string
664
  @param name: VG name
665
  @type excl_stor: bool
666
  @param excl_stor: exclusive storage
667
  @rtype: dict
668
  @return: dictionary whose keys are "name", "vg_free", "vg_size" for VG name,
669
      free spindles, total spindles respectively
670

671
  """
672
  if excl_stor:
673
    (vg_free, vg_size) = info_fn(name)
674
  else:
675
    vg_free = 0
676
    vg_size = 0
677
  return {
678
    "type": constants.ST_LVM_PV,
679
    "name": name,
680
    "storage_free": vg_free,
681
    "storage_size": vg_size,
682
    }
683

    
684

    
685
def _GetHvInfo(name, hvparams, get_hv_fn=hypervisor.GetHypervisor):
686
  """Retrieves node information from a hypervisor.
687

688
  The information returned depends on the hypervisor. Common items:
689

690
    - vg_size is the size of the configured volume group in MiB
691
    - vg_free is the free size of the volume group in MiB
692
    - memory_dom0 is the memory allocated for domain0 in MiB
693
    - memory_free is the currently available (free) ram in MiB
694
    - memory_total is the total number of ram in MiB
695
    - hv_version: the hypervisor version, if available
696

697
  @type hvparams: dict of string
698
  @param hvparams: the hypervisor's hvparams
699

700
  """
701
  return get_hv_fn(name).GetNodeInfo(hvparams=hvparams)
702

    
703

    
704
def _GetHvInfoAll(hv_specs, get_hv_fn=hypervisor.GetHypervisor):
705
  """Retrieves node information for all hypervisors.
706

707
  See C{_GetHvInfo} for information on the output.
708

709
  @type hv_specs: list of pairs (string, dict of strings)
710
  @param hv_specs: list of pairs of a hypervisor's name and its hvparams
711

712
  """
713
  if hv_specs is None:
714
    return None
715

    
716
  result = []
717
  for hvname, hvparams in hv_specs:
718
    result.append(_GetHvInfo(hvname, hvparams, get_hv_fn))
719
  return result
720

    
721

    
722
def _GetNamedNodeInfo(names, fn):
723
  """Calls C{fn} for all names in C{names} and returns a dictionary.
724

725
  @rtype: None or dict
726

727
  """
728
  if names is None:
729
    return None
730
  else:
731
    return map(fn, names)
732

    
733

    
734
def GetNodeInfo(storage_units, hv_specs):
735
  """Gives back a hash with different information about the node.
736

737
  @type storage_units: list of tuples (string, string, list)
738
  @param storage_units: List of tuples (storage unit, identifier, parameters) to
739
    ask for disk space information. In case of lvm-vg, the identifier is
740
    the VG name. The parameters can contain additional, storage-type-specific
741
    parameters, for example exclusive storage for lvm storage.
742
  @type hv_specs: list of pairs (string, dict of strings)
743
  @param hv_specs: list of pairs of a hypervisor's name and its hvparams
744
  @rtype: tuple; (string, None/dict, None/dict)
745
  @return: Tuple containing boot ID, volume group information and hypervisor
746
    information
747

748
  """
749
  bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
750
  storage_info = _GetNamedNodeInfo(
751
    storage_units,
752
    (lambda (storage_type, storage_key, storage_params):
753
        _ApplyStorageInfoFunction(storage_type, storage_key, storage_params)))
754
  hv_info = _GetHvInfoAll(hv_specs)
755
  return (bootid, storage_info, hv_info)
756

    
757

    
758
def _GetFileStorageSpaceInfo(path, params):
759
  """Wrapper around filestorage.GetSpaceInfo.
760

761
  The purpose of this wrapper is to call filestorage.GetFileStorageSpaceInfo
762
  and ignore the *args parameter to not leak it into the filestorage
763
  module's code.
764

765
  @see: C{filestorage.GetFileStorageSpaceInfo} for description of the
766
    parameters.
767

768
  """
769
  _CheckStorageParams(params, 0)
770
  return filestorage.GetFileStorageSpaceInfo(path)
771

    
772

    
773
# FIXME: implement storage reporting for all missing storage types.
774
_STORAGE_TYPE_INFO_FN = {
775
  constants.ST_BLOCK: None,
776
  constants.ST_DISKLESS: None,
777
  constants.ST_EXT: None,
778
  constants.ST_FILE: _GetFileStorageSpaceInfo,
779
  constants.ST_LVM_PV: _GetLvmPvSpaceInfo,
780
  constants.ST_LVM_VG: _GetLvmVgSpaceInfo,
781
  constants.ST_RADOS: None,
782
}
783

    
784

    
785
def _ApplyStorageInfoFunction(storage_type, storage_key, *args):
786
  """Looks up and applies the correct function to calculate free and total
787
  storage for the given storage type.
788

789
  @type storage_type: string
790
  @param storage_type: the storage type for which the storage shall be reported.
791
  @type storage_key: string
792
  @param storage_key: identifier of a storage unit, e.g. the volume group name
793
    of an LVM storage unit
794
  @type args: any
795
  @param args: various parameters that can be used for storage reporting. These
796
    parameters and their semantics vary from storage type to storage type and
797
    are just propagated in this function.
798
  @return: the results of the application of the storage space function (see
799
    _STORAGE_TYPE_INFO_FN) if storage space reporting is implemented for that
800
    storage type
801
  @raises NotImplementedError: for storage types who don't support space
802
    reporting yet
803
  """
804
  fn = _STORAGE_TYPE_INFO_FN[storage_type]
805
  if fn is not None:
806
    return fn(storage_key, *args)
807
  else:
808
    raise NotImplementedError
809

    
810

    
811
def _CheckExclusivePvs(pvi_list):
812
  """Check that PVs are not shared among LVs
813

814
  @type pvi_list: list of L{objects.LvmPvInfo} objects
815
  @param pvi_list: information about the PVs
816

817
  @rtype: list of tuples (string, list of strings)
818
  @return: offending volumes, as tuples: (pv_name, [lv1_name, lv2_name...])
819

820
  """
821
  res = []
822
  for pvi in pvi_list:
823
    if len(pvi.lv_list) > 1:
824
      res.append((pvi.name, pvi.lv_list))
825
  return res
826

    
827

    
828
def _VerifyHypervisors(what, vm_capable, result, all_hvparams,
829
                       get_hv_fn=hypervisor.GetHypervisor):
830
  """Verifies the hypervisor. Appends the results to the 'results' list.
831

832
  @type what: C{dict}
833
  @param what: a dictionary of things to check
834
  @type vm_capable: boolean
835
  @param vm_capable: whether or not this node is vm capable
836
  @type result: dict
837
  @param result: dictionary of verification results; results of the
838
    verifications in this function will be added here
839
  @type all_hvparams: dict of dict of string
840
  @param all_hvparams: dictionary mapping hypervisor names to hvparams
841
  @type get_hv_fn: function
842
  @param get_hv_fn: function to retrieve the hypervisor, to improve testability
843

844
  """
845
  if not vm_capable:
846
    return
847

    
848
  if constants.NV_HYPERVISOR in what:
849
    result[constants.NV_HYPERVISOR] = {}
850
    for hv_name in what[constants.NV_HYPERVISOR]:
851
      hvparams = all_hvparams[hv_name]
852
      try:
853
        val = get_hv_fn(hv_name).Verify(hvparams=hvparams)
854
      except errors.HypervisorError, err:
855
        val = "Error while checking hypervisor: %s" % str(err)
856
      result[constants.NV_HYPERVISOR][hv_name] = val
857

    
858

    
859
def _VerifyHvparams(what, vm_capable, result,
860
                    get_hv_fn=hypervisor.GetHypervisor):
861
  """Verifies the hvparams. Appends the results to the 'results' list.
862

863
  @type what: C{dict}
864
  @param what: a dictionary of things to check
865
  @type vm_capable: boolean
866
  @param vm_capable: whether or not this node is vm capable
867
  @type result: dict
868
  @param result: dictionary of verification results; results of the
869
    verifications in this function will be added here
870
  @type get_hv_fn: function
871
  @param get_hv_fn: function to retrieve the hypervisor, to improve testability
872

873
  """
874
  if not vm_capable:
875
    return
876

    
877
  if constants.NV_HVPARAMS in what:
878
    result[constants.NV_HVPARAMS] = []
879
    for source, hv_name, hvparms in what[constants.NV_HVPARAMS]:
880
      try:
881
        logging.info("Validating hv %s, %s", hv_name, hvparms)
882
        get_hv_fn(hv_name).ValidateParameters(hvparms)
883
      except errors.HypervisorError, err:
884
        result[constants.NV_HVPARAMS].append((source, hv_name, str(err)))
885

    
886

    
887
def _VerifyInstanceList(what, vm_capable, result, all_hvparams):
888
  """Verifies the instance list.
889

890
  @type what: C{dict}
891
  @param what: a dictionary of things to check
892
  @type vm_capable: boolean
893
  @param vm_capable: whether or not this node is vm capable
894
  @type result: dict
895
  @param result: dictionary of verification results; results of the
896
    verifications in this function will be added here
897
  @type all_hvparams: dict of dict of string
898
  @param all_hvparams: dictionary mapping hypervisor names to hvparams
899

900
  """
901
  if constants.NV_INSTANCELIST in what and vm_capable:
902
    # GetInstanceList can fail
903
    try:
904
      val = GetInstanceList(what[constants.NV_INSTANCELIST],
905
                            all_hvparams=all_hvparams)
906
    except RPCFail, err:
907
      val = str(err)
908
    result[constants.NV_INSTANCELIST] = val
909

    
910

    
911
def _VerifyNodeInfo(what, vm_capable, result, all_hvparams):
912
  """Verifies the node info.
913

914
  @type what: C{dict}
915
  @param what: a dictionary of things to check
916
  @type vm_capable: boolean
917
  @param vm_capable: whether or not this node is vm capable
918
  @type result: dict
919
  @param result: dictionary of verification results; results of the
920
    verifications in this function will be added here
921
  @type all_hvparams: dict of dict of string
922
  @param all_hvparams: dictionary mapping hypervisor names to hvparams
923

924
  """
925
  if constants.NV_HVINFO in what and vm_capable:
926
    hvname = what[constants.NV_HVINFO]
927
    hyper = hypervisor.GetHypervisor(hvname)
928
    hvparams = all_hvparams[hvname]
929
    result[constants.NV_HVINFO] = hyper.GetNodeInfo(hvparams=hvparams)
930

    
931

    
932
def VerifyNode(what, cluster_name, all_hvparams):
933
  """Verify the status of the local node.
934

935
  Based on the input L{what} parameter, various checks are done on the
936
  local node.
937

938
  If the I{filelist} key is present, this list of
939
  files is checksummed and the file/checksum pairs are returned.
940

941
  If the I{nodelist} key is present, we check that we have
942
  connectivity via ssh with the target nodes (and check the hostname
943
  report).
944

945
  If the I{node-net-test} key is present, we check that we have
946
  connectivity to the given nodes via both primary IP and, if
947
  applicable, secondary IPs.
948

949
  @type what: C{dict}
950
  @param what: a dictionary of things to check:
951
      - filelist: list of files for which to compute checksums
952
      - nodelist: list of nodes we should check ssh communication with
953
      - node-net-test: list of nodes we should check node daemon port
954
        connectivity with
955
      - hypervisor: list with hypervisors to run the verify for
956
  @type cluster_name: string
957
  @param cluster_name: the cluster's name
958
  @type all_hvparams: dict of dict of strings
959
  @param all_hvparams: a dictionary mapping hypervisor names to hvparams
960
  @rtype: dict
961
  @return: a dictionary with the same keys as the input dict, and
962
      values representing the result of the checks
963

964
  """
965
  result = {}
966
  my_name = netutils.Hostname.GetSysName()
967
  port = netutils.GetDaemonPort(constants.NODED)
968
  vm_capable = my_name not in what.get(constants.NV_VMNODES, [])
969

    
970
  _VerifyHypervisors(what, vm_capable, result, all_hvparams)
971
  _VerifyHvparams(what, vm_capable, result)
972

    
973
  if constants.NV_FILELIST in what:
974
    fingerprints = utils.FingerprintFiles(map(vcluster.LocalizeVirtualPath,
975
                                              what[constants.NV_FILELIST]))
976
    result[constants.NV_FILELIST] = \
977
      dict((vcluster.MakeVirtualPath(key), value)
978
           for (key, value) in fingerprints.items())
979

    
980
  if constants.NV_NODELIST in what:
981
    (nodes, bynode) = what[constants.NV_NODELIST]
982

    
983
    # Add nodes from other groups (different for each node)
984
    try:
985
      nodes.extend(bynode[my_name])
986
    except KeyError:
987
      pass
988

    
989
    # Use a random order
990
    random.shuffle(nodes)
991

    
992
    # Try to contact all nodes
993
    val = {}
994
    for node in nodes:
995
      success, message = _GetSshRunner(cluster_name).VerifyNodeHostname(node)
996
      if not success:
997
        val[node] = message
998

    
999
    result[constants.NV_NODELIST] = val
1000

    
1001
  if constants.NV_NODENETTEST in what:
1002
    result[constants.NV_NODENETTEST] = tmp = {}
1003
    my_pip = my_sip = None
1004
    for name, pip, sip in what[constants.NV_NODENETTEST]:
1005
      if name == my_name:
1006
        my_pip = pip
1007
        my_sip = sip
1008
        break
1009
    if not my_pip:
1010
      tmp[my_name] = ("Can't find my own primary/secondary IP"
1011
                      " in the node list")
1012
    else:
1013
      for name, pip, sip in what[constants.NV_NODENETTEST]:
1014
        fail = []
1015
        if not netutils.TcpPing(pip, port, source=my_pip):
1016
          fail.append("primary")
1017
        if sip != pip:
1018
          if not netutils.TcpPing(sip, port, source=my_sip):
1019
            fail.append("secondary")
1020
        if fail:
1021
          tmp[name] = ("failure using the %s interface(s)" %
1022
                       " and ".join(fail))
1023

    
1024
  if constants.NV_MASTERIP in what:
1025
    # FIXME: add checks on incoming data structures (here and in the
1026
    # rest of the function)
1027
    master_name, master_ip = what[constants.NV_MASTERIP]
1028
    if master_name == my_name:
1029
      source = constants.IP4_ADDRESS_LOCALHOST
1030
    else:
1031
      source = None
1032
    result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port,
1033
                                                     source=source)
1034

    
1035
  if constants.NV_USERSCRIPTS in what:
1036
    result[constants.NV_USERSCRIPTS] = \
1037
      [script for script in what[constants.NV_USERSCRIPTS]
1038
       if not utils.IsExecutable(script)]
1039

    
1040
  if constants.NV_OOB_PATHS in what:
1041
    result[constants.NV_OOB_PATHS] = tmp = []
1042
    for path in what[constants.NV_OOB_PATHS]:
1043
      try:
1044
        st = os.stat(path)
1045
      except OSError, err:
1046
        tmp.append("error stating out of band helper: %s" % err)
1047
      else:
1048
        if stat.S_ISREG(st.st_mode):
1049
          if stat.S_IMODE(st.st_mode) & stat.S_IXUSR:
1050
            tmp.append(None)
1051
          else:
1052
            tmp.append("out of band helper %s is not executable" % path)
1053
        else:
1054
          tmp.append("out of band helper %s is not a file" % path)
1055

    
1056
  if constants.NV_LVLIST in what and vm_capable:
1057
    try:
1058
      val = GetVolumeList(utils.ListVolumeGroups().keys())
1059
    except RPCFail, err:
1060
      val = str(err)
1061
    result[constants.NV_LVLIST] = val
1062

    
1063
  _VerifyInstanceList(what, vm_capable, result, all_hvparams)
1064

    
1065
  if constants.NV_VGLIST in what and vm_capable:
1066
    result[constants.NV_VGLIST] = utils.ListVolumeGroups()
1067

    
1068
  if constants.NV_PVLIST in what and vm_capable:
1069
    check_exclusive_pvs = constants.NV_EXCLUSIVEPVS in what
1070
    val = bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST],
1071
                                       filter_allocatable=False,
1072
                                       include_lvs=check_exclusive_pvs)
1073
    if check_exclusive_pvs:
1074
      result[constants.NV_EXCLUSIVEPVS] = _CheckExclusivePvs(val)
1075
      for pvi in val:
1076
        # Avoid sending useless data on the wire
1077
        pvi.lv_list = []
1078
    result[constants.NV_PVLIST] = map(objects.LvmPvInfo.ToDict, val)
1079

    
1080
  if constants.NV_VERSION in what:
1081
    result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION,
1082
                                    constants.RELEASE_VERSION)
1083

    
1084
  _VerifyNodeInfo(what, vm_capable, result, all_hvparams)
1085

    
1086
  if constants.NV_DRBDVERSION in what and vm_capable:
1087
    try:
1088
      drbd_version = DRBD8.GetProcInfo().GetVersionString()
1089
    except errors.BlockDeviceError, err:
1090
      logging.warning("Can't get DRBD version", exc_info=True)
1091
      drbd_version = str(err)
1092
    result[constants.NV_DRBDVERSION] = drbd_version
1093

    
1094
  if constants.NV_DRBDLIST in what and vm_capable:
1095
    try:
1096
      used_minors = drbd.DRBD8.GetUsedDevs()
1097
    except errors.BlockDeviceError, err:
1098
      logging.warning("Can't get used minors list", exc_info=True)
1099
      used_minors = str(err)
1100
    result[constants.NV_DRBDLIST] = used_minors
1101

    
1102
  if constants.NV_DRBDHELPER in what and vm_capable:
1103
    status = True
1104
    try:
1105
      payload = drbd.DRBD8.GetUsermodeHelper()
1106
    except errors.BlockDeviceError, err:
1107
      logging.error("Can't get DRBD usermode helper: %s", str(err))
1108
      status = False
1109
      payload = str(err)
1110
    result[constants.NV_DRBDHELPER] = (status, payload)
1111

    
1112
  if constants.NV_NODESETUP in what:
1113
    result[constants.NV_NODESETUP] = tmpr = []
1114
    if not os.path.isdir("/sys/block") or not os.path.isdir("/sys/class/net"):
1115
      tmpr.append("The sysfs filesytem doesn't seem to be mounted"
1116
                  " under /sys, missing required directories /sys/block"
1117
                  " and /sys/class/net")
1118
    if (not os.path.isdir("/proc/sys") or
1119
        not os.path.isfile("/proc/sysrq-trigger")):
1120
      tmpr.append("The procfs filesystem doesn't seem to be mounted"
1121
                  " under /proc, missing required directory /proc/sys and"
1122
                  " the file /proc/sysrq-trigger")
1123

    
1124
  if constants.NV_TIME in what:
1125
    result[constants.NV_TIME] = utils.SplitTime(time.time())
1126

    
1127
  if constants.NV_OSLIST in what and vm_capable:
1128
    result[constants.NV_OSLIST] = DiagnoseOS()
1129

    
1130
  if constants.NV_BRIDGES in what and vm_capable:
1131
    result[constants.NV_BRIDGES] = [bridge
1132
                                    for bridge in what[constants.NV_BRIDGES]
1133
                                    if not utils.BridgeExists(bridge)]
1134

    
1135
  if what.get(constants.NV_ACCEPTED_STORAGE_PATHS) == my_name:
1136
    result[constants.NV_ACCEPTED_STORAGE_PATHS] = \
1137
        filestorage.ComputeWrongFileStoragePaths()
1138

    
1139
  if what.get(constants.NV_FILE_STORAGE_PATH):
1140
    pathresult = filestorage.CheckFileStoragePath(
1141
        what[constants.NV_FILE_STORAGE_PATH])
1142
    if pathresult:
1143
      result[constants.NV_FILE_STORAGE_PATH] = pathresult
1144

    
1145
  if what.get(constants.NV_SHARED_FILE_STORAGE_PATH):
1146
    pathresult = filestorage.CheckFileStoragePath(
1147
        what[constants.NV_SHARED_FILE_STORAGE_PATH])
1148
    if pathresult:
1149
      result[constants.NV_SHARED_FILE_STORAGE_PATH] = pathresult
1150

    
1151
  return result
1152

    
1153

    
1154
def GetBlockDevSizes(devices):
1155
  """Return the size of the given block devices
1156

1157
  @type devices: list
1158
  @param devices: list of block device nodes to query
1159
  @rtype: dict
1160
  @return:
1161
    dictionary of all block devices under /dev (key). The value is their
1162
    size in MiB.
1163

1164
    {'/dev/disk/by-uuid/123456-12321231-312312-312': 124}
1165

1166
  """
1167
  DEV_PREFIX = "/dev/"
1168
  blockdevs = {}
1169

    
1170
  for devpath in devices:
1171
    if not utils.IsBelowDir(DEV_PREFIX, devpath):
1172
      continue
1173

    
1174
    try:
1175
      st = os.stat(devpath)
1176
    except EnvironmentError, err:
1177
      logging.warning("Error stat()'ing device %s: %s", devpath, str(err))
1178
      continue
1179

    
1180
    if stat.S_ISBLK(st.st_mode):
1181
      result = utils.RunCmd(["blockdev", "--getsize64", devpath])
1182
      if result.failed:
1183
        # We don't want to fail, just do not list this device as available
1184
        logging.warning("Cannot get size for block device %s", devpath)
1185
        continue
1186

    
1187
      size = int(result.stdout) / (1024 * 1024)
1188
      blockdevs[devpath] = size
1189
  return blockdevs
1190

    
1191

    
1192
def GetVolumeList(vg_names):
1193
  """Compute list of logical volumes and their size.
1194

1195
  @type vg_names: list
1196
  @param vg_names: the volume groups whose LVs we should list, or
1197
      empty for all volume groups
1198
  @rtype: dict
1199
  @return:
1200
      dictionary of all partions (key) with value being a tuple of
1201
      their size (in MiB), inactive and online status::
1202

1203
        {'xenvg/test1': ('20.06', True, True)}
1204

1205
      in case of errors, a string is returned with the error
1206
      details.
1207

1208
  """
1209
  lvs = {}
1210
  sep = "|"
1211
  if not vg_names:
1212
    vg_names = []
1213
  result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
1214
                         "--separator=%s" % sep,
1215
                         "-ovg_name,lv_name,lv_size,lv_attr"] + vg_names)
1216
  if result.failed:
1217
    _Fail("Failed to list logical volumes, lvs output: %s", result.output)
1218

    
1219
  for line in result.stdout.splitlines():
1220
    line = line.strip()
1221
    match = _LVSLINE_REGEX.match(line)
1222
    if not match:
1223
      logging.error("Invalid line returned from lvs output: '%s'", line)
1224
      continue
1225
    vg_name, name, size, attr = match.groups()
1226
    inactive = attr[4] == "-"
1227
    online = attr[5] == "o"
1228
    virtual = attr[0] == "v"
1229
    if virtual:
1230
      # we don't want to report such volumes as existing, since they
1231
      # don't really hold data
1232
      continue
1233
    lvs[vg_name + "/" + name] = (size, inactive, online)
1234

    
1235
  return lvs
1236

    
1237

    
1238
def ListVolumeGroups():
1239
  """List the volume groups and their size.
1240

1241
  @rtype: dict
1242
  @return: dictionary with keys volume name and values the
1243
      size of the volume
1244

1245
  """
1246
  return utils.ListVolumeGroups()
1247

    
1248

    
1249
def NodeVolumes():
1250
  """List all volumes on this node.
1251

1252
  @rtype: list
1253
  @return:
1254
    A list of dictionaries, each having four keys:
1255
      - name: the logical volume name,
1256
      - size: the size of the logical volume
1257
      - dev: the physical device on which the LV lives
1258
      - vg: the volume group to which it belongs
1259

1260
    In case of errors, we return an empty list and log the
1261
    error.
1262

1263
    Note that since a logical volume can live on multiple physical
1264
    volumes, the resulting list might include a logical volume
1265
    multiple times.
1266

1267
  """
1268
  result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
1269
                         "--separator=|",
1270
                         "--options=lv_name,lv_size,devices,vg_name"])
1271
  if result.failed:
1272
    _Fail("Failed to list logical volumes, lvs output: %s",
1273
          result.output)
1274

    
1275
  def parse_dev(dev):
1276
    return dev.split("(")[0]
1277

    
1278
  def handle_dev(dev):
1279
    return [parse_dev(x) for x in dev.split(",")]
1280

    
1281
  def map_line(line):
1282
    line = [v.strip() for v in line]
1283
    return [{"name": line[0], "size": line[1],
1284
             "dev": dev, "vg": line[3]} for dev in handle_dev(line[2])]
1285

    
1286
  all_devs = []
1287
  for line in result.stdout.splitlines():
1288
    if line.count("|") >= 3:
1289
      all_devs.extend(map_line(line.split("|")))
1290
    else:
1291
      logging.warning("Strange line in the output from lvs: '%s'", line)
1292
  return all_devs
1293

    
1294

    
1295
def BridgesExist(bridges_list):
1296
  """Check if a list of bridges exist on the current node.
1297

1298
  @rtype: boolean
1299
  @return: C{True} if all of them exist, C{False} otherwise
1300

1301
  """
1302
  missing = []
1303
  for bridge in bridges_list:
1304
    if not utils.BridgeExists(bridge):
1305
      missing.append(bridge)
1306

    
1307
  if missing:
1308
    _Fail("Missing bridges %s", utils.CommaJoin(missing))
1309

    
1310

    
1311
def GetInstanceListForHypervisor(hname, hvparams=None,
1312
                                 get_hv_fn=hypervisor.GetHypervisor):
1313
  """Provides a list of instances of the given hypervisor.
1314

1315
  @type hname: string
1316
  @param hname: name of the hypervisor
1317
  @type hvparams: dict of strings
1318
  @param hvparams: hypervisor parameters for the given hypervisor
1319
  @type get_hv_fn: function
1320
  @param get_hv_fn: function that returns a hypervisor for the given hypervisor
1321
    name; optional parameter to increase testability
1322

1323
  @rtype: list
1324
  @return: a list of all running instances on the current node
1325
    - instance1.example.com
1326
    - instance2.example.com
1327

1328
  """
1329
  results = []
1330
  try:
1331
    hv = get_hv_fn(hname)
1332
    names = hv.ListInstances(hvparams=hvparams)
1333
    results.extend(names)
1334
  except errors.HypervisorError, err:
1335
    _Fail("Error enumerating instances (hypervisor %s): %s",
1336
          hname, err, exc=True)
1337
  return results
1338

    
1339

    
1340
def GetInstanceList(hypervisor_list, all_hvparams=None,
1341
                    get_hv_fn=hypervisor.GetHypervisor):
1342
  """Provides a list of instances.
1343

1344
  @type hypervisor_list: list
1345
  @param hypervisor_list: the list of hypervisors to query information
1346
  @type all_hvparams: dict of dict of strings
1347
  @param all_hvparams: a dictionary mapping hypervisor types to respective
1348
    cluster-wide hypervisor parameters
1349
  @type get_hv_fn: function
1350
  @param get_hv_fn: function that returns a hypervisor for the given hypervisor
1351
    name; optional parameter to increase testability
1352

1353
  @rtype: list
1354
  @return: a list of all running instances on the current node
1355
    - instance1.example.com
1356
    - instance2.example.com
1357

1358
  """
1359
  results = []
1360
  for hname in hypervisor_list:
1361
    hvparams = all_hvparams[hname]
1362
    results.extend(GetInstanceListForHypervisor(hname, hvparams=hvparams,
1363
                                                get_hv_fn=get_hv_fn))
1364
  return results
1365

    
1366

    
1367
def GetInstanceInfo(instance, hname, hvparams=None):
1368
  """Gives back the information about an instance as a dictionary.
1369

1370
  @type instance: string
1371
  @param instance: the instance name
1372
  @type hname: string
1373
  @param hname: the hypervisor type of the instance
1374
  @type hvparams: dict of strings
1375
  @param hvparams: the instance's hvparams
1376

1377
  @rtype: dict
1378
  @return: dictionary with the following keys:
1379
      - memory: memory size of instance (int)
1380
      - state: xen state of instance (string)
1381
      - time: cpu time of instance (float)
1382
      - vcpus: the number of vcpus (int)
1383

1384
  """
1385
  output = {}
1386

    
1387
  iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance,
1388
                                                          hvparams=hvparams)
1389
  if iinfo is not None:
1390
    output["memory"] = iinfo[2]
1391
    output["vcpus"] = iinfo[3]
1392
    output["state"] = iinfo[4]
1393
    output["time"] = iinfo[5]
1394

    
1395
  return output
1396

    
1397

    
1398
def GetInstanceMigratable(instance):
1399
  """Computes whether an instance can be migrated.
1400

1401
  @type instance: L{objects.Instance}
1402
  @param instance: object representing the instance to be checked.
1403

1404
  @rtype: tuple
1405
  @return: tuple of (result, description) where:
1406
      - result: whether the instance can be migrated or not
1407
      - description: a description of the issue, if relevant
1408

1409
  """
1410
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1411
  iname = instance.name
1412
  if iname not in hyper.ListInstances(instance.hvparams):
1413
    _Fail("Instance %s is not running", iname)
1414

    
1415
  for idx in range(len(instance.disks)):
1416
    link_name = _GetBlockDevSymlinkPath(iname, idx)
1417
    if not os.path.islink(link_name):
1418
      logging.warning("Instance %s is missing symlink %s for disk %d",
1419
                      iname, link_name, idx)
1420

    
1421

    
1422
def GetAllInstancesInfo(hypervisor_list, all_hvparams):
1423
  """Gather data about all instances.
1424

1425
  This is the equivalent of L{GetInstanceInfo}, except that it
1426
  computes data for all instances at once, thus being faster if one
1427
  needs data about more than one instance.
1428

1429
  @type hypervisor_list: list
1430
  @param hypervisor_list: list of hypervisors to query for instance data
1431
  @type all_hvparams: dict of dict of strings
1432
  @param all_hvparams: mapping of hypervisor names to hvparams
1433

1434
  @rtype: dict
1435
  @return: dictionary of instance: data, with data having the following keys:
1436
      - memory: memory size of instance (int)
1437
      - state: xen state of instance (string)
1438
      - time: cpu time of instance (float)
1439
      - vcpus: the number of vcpus
1440

1441
  """
1442
  output = {}
1443
  for hname in hypervisor_list:
1444
    hvparams = all_hvparams[hname]
1445
    iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo(hvparams)
1446
    if iinfo:
1447
      for name, _, memory, vcpus, state, times in iinfo:
1448
        value = {
1449
          "memory": memory,
1450
          "vcpus": vcpus,
1451
          "state": state,
1452
          "time": times,
1453
          }
1454
        if name in output:
1455
          # we only check static parameters, like memory and vcpus,
1456
          # and not state and time which can change between the
1457
          # invocations of the different hypervisors
1458
          for key in "memory", "vcpus":
1459
            if value[key] != output[name][key]:
1460
              _Fail("Instance %s is running twice"
1461
                    " with different parameters", name)
1462
        output[name] = value
1463

    
1464
  return output
1465

    
1466

    
1467
def GetInstanceConsoleInfo(instance_param_dict,
1468
                           get_hv_fn=hypervisor.GetHypervisor):
1469
  """Gather data about the console access of a set of instances of this node.
1470

1471
  This function assumes that the caller already knows which instances are on
1472
  this node, by calling a function such as L{GetAllInstancesInfo} or
1473
  L{GetInstanceList}.
1474

1475
  For every instance, a large amount of configuration data needs to be
1476
  provided to the hypervisor interface in order to receive the console
1477
  information. Whether this could or should be cut down can be discussed.
1478
  The information is provided in a dictionary indexed by instance name,
1479
  allowing any number of instance queries to be done.
1480

1481
  @type instance_param_dict: dict of string to tuple of dictionaries, where the
1482
    dictionaries represent: L{objects.Instance}, L{objects.Node}, HvParams,
1483
    BeParams
1484
  @param instance_param_dict: mapping of instance name to parameters necessary
1485
    for console information retrieval
1486

1487
  @rtype: dict
1488
  @return: dictionary of instance: data, with data having the following keys:
1489
      - instance: instance name
1490
      - kind: console kind
1491
      - message: used with kind == CONS_MESSAGE, indicates console to be
1492
                 unavailable, supplies error message
1493
      - host: host to connect to
1494
      - port: port to use
1495
      - user: user for login
1496
      - command: the command, broken into parts as an array
1497
      - display: unknown, potentially unused?
1498

1499
  """
1500

    
1501
  output = {}
1502
  for inst_name in instance_param_dict:
1503
    instance = instance_param_dict[inst_name]["instance"]
1504
    pnode = instance_param_dict[inst_name]["node"]
1505
    hvparams = instance_param_dict[inst_name]["hvParams"]
1506
    beparams = instance_param_dict[inst_name]["beParams"]
1507

    
1508
    instance = objects.Instance.FromDict(instance)
1509
    pnode = objects.Node.FromDict(pnode)
1510

    
1511
    h = get_hv_fn(instance.hypervisor)
1512
    output[inst_name] = h.GetInstanceConsole(instance, pnode, hvparams,
1513
                                             beparams).ToDict()
1514

    
1515
  return output
1516

    
1517

    
1518
def _InstanceLogName(kind, os_name, instance, component):
1519
  """Compute the OS log filename for a given instance and operation.
1520

1521
  The instance name and os name are passed in as strings since not all
1522
  operations have these as part of an instance object.
1523

1524
  @type kind: string
1525
  @param kind: the operation type (e.g. add, import, etc.)
1526
  @type os_name: string
1527
  @param os_name: the os name
1528
  @type instance: string
1529
  @param instance: the name of the instance being imported/added/etc.
1530
  @type component: string or None
1531
  @param component: the name of the component of the instance being
1532
      transferred
1533

1534
  """
1535
  # TODO: Use tempfile.mkstemp to create unique filename
1536
  if component:
1537
    assert "/" not in component
1538
    c_msg = "-%s" % component
1539
  else:
1540
    c_msg = ""
1541
  base = ("%s-%s-%s%s-%s.log" %
1542
          (kind, os_name, instance, c_msg, utils.TimestampForFilename()))
1543
  return utils.PathJoin(pathutils.LOG_OS_DIR, base)
1544

    
1545

    
1546
def InstanceOsAdd(instance, reinstall, debug):
1547
  """Add an OS to an instance.
1548

1549
  @type instance: L{objects.Instance}
1550
  @param instance: Instance whose OS is to be installed
1551
  @type reinstall: boolean
1552
  @param reinstall: whether this is an instance reinstall
1553
  @type debug: integer
1554
  @param debug: debug level, passed to the OS scripts
1555
  @rtype: None
1556

1557
  """
1558
  inst_os = OSFromDisk(instance.os)
1559

    
1560
  create_env = OSEnvironment(instance, inst_os, debug)
1561
  if reinstall:
1562
    create_env["INSTANCE_REINSTALL"] = "1"
1563

    
1564
  logfile = _InstanceLogName("add", instance.os, instance.name, None)
1565

    
1566
  result = utils.RunCmd([inst_os.create_script], env=create_env,
1567
                        cwd=inst_os.path, output=logfile, reset_env=True)
1568
  if result.failed:
1569
    logging.error("os create command '%s' returned error: %s, logfile: %s,"
1570
                  " output: %s", result.cmd, result.fail_reason, logfile,
1571
                  result.output)
1572
    lines = [utils.SafeEncode(val)
1573
             for val in utils.TailFile(logfile, lines=20)]
1574
    _Fail("OS create script failed (%s), last lines in the"
1575
          " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1576

    
1577

    
1578
def RunRenameInstance(instance, old_name, debug):
1579
  """Run the OS rename script for an instance.
1580

1581
  @type instance: L{objects.Instance}
1582
  @param instance: Instance whose OS is to be installed
1583
  @type old_name: string
1584
  @param old_name: previous instance name
1585
  @type debug: integer
1586
  @param debug: debug level, passed to the OS scripts
1587
  @rtype: boolean
1588
  @return: the success of the operation
1589

1590
  """
1591
  inst_os = OSFromDisk(instance.os)
1592

    
1593
  rename_env = OSEnvironment(instance, inst_os, debug)
1594
  rename_env["OLD_INSTANCE_NAME"] = old_name
1595

    
1596
  logfile = _InstanceLogName("rename", instance.os,
1597
                             "%s-%s" % (old_name, instance.name), None)
1598

    
1599
  result = utils.RunCmd([inst_os.rename_script], env=rename_env,
1600
                        cwd=inst_os.path, output=logfile, reset_env=True)
1601

    
1602
  if result.failed:
1603
    logging.error("os create command '%s' returned error: %s output: %s",
1604
                  result.cmd, result.fail_reason, result.output)
1605
    lines = [utils.SafeEncode(val)
1606
             for val in utils.TailFile(logfile, lines=20)]
1607
    _Fail("OS rename script failed (%s), last lines in the"
1608
          " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1609

    
1610

    
1611
def _GetBlockDevSymlinkPath(instance_name, idx, _dir=None):
1612
  """Returns symlink path for block device.
1613

1614
  """
1615
  if _dir is None:
1616
    _dir = pathutils.DISK_LINKS_DIR
1617

    
1618
  return utils.PathJoin(_dir,
1619
                        ("%s%s%s" %
1620
                         (instance_name, constants.DISK_SEPARATOR, idx)))
1621

    
1622

    
1623
def _SymlinkBlockDev(instance_name, device_path, idx):
1624
  """Set up symlinks to a instance's block device.
1625

1626
  This is an auxiliary function run when an instance is start (on the primary
1627
  node) or when an instance is migrated (on the target node).
1628

1629

1630
  @param instance_name: the name of the target instance
1631
  @param device_path: path of the physical block device, on the node
1632
  @param idx: the disk index
1633
  @return: absolute path to the disk's symlink
1634

1635
  """
1636
  link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1637
  try:
1638
    os.symlink(device_path, link_name)
1639
  except OSError, err:
1640
    if err.errno == errno.EEXIST:
1641
      if (not os.path.islink(link_name) or
1642
          os.readlink(link_name) != device_path):
1643
        os.remove(link_name)
1644
        os.symlink(device_path, link_name)
1645
    else:
1646
      raise
1647

    
1648
  return link_name
1649

    
1650

    
1651
def _RemoveBlockDevLinks(instance_name, disks):
1652
  """Remove the block device symlinks belonging to the given instance.
1653

1654
  """
1655
  for idx, _ in enumerate(disks):
1656
    link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1657
    if os.path.islink(link_name):
1658
      try:
1659
        os.remove(link_name)
1660
      except OSError:
1661
        logging.exception("Can't remove symlink '%s'", link_name)
1662

    
1663

    
1664
def _GatherAndLinkBlockDevs(instance):
1665
  """Set up an instance's block device(s).
1666

1667
  This is run on the primary node at instance startup. The block
1668
  devices must be already assembled.
1669

1670
  @type instance: L{objects.Instance}
1671
  @param instance: the instance whose disks we should assemble
1672
  @rtype: list
1673
  @return: list of (disk_object, device_path)
1674

1675
  """
1676
  block_devices = []
1677
  for idx, disk in enumerate(instance.disks):
1678
    device = _RecursiveFindBD(disk)
1679
    if device is None:
1680
      raise errors.BlockDeviceError("Block device '%s' is not set up." %
1681
                                    str(disk))
1682
    device.Open()
1683
    try:
1684
      link_name = _SymlinkBlockDev(instance.name, device.dev_path, idx)
1685
    except OSError, e:
1686
      raise errors.BlockDeviceError("Cannot create block device symlink: %s" %
1687
                                    e.strerror)
1688

    
1689
    block_devices.append((disk, link_name, device))
1690

    
1691
  return block_devices
1692

    
1693

    
1694
def StartInstance(instance, startup_paused, reason, store_reason=True):
1695
  """Start an instance.
1696

1697
  @type instance: L{objects.Instance}
1698
  @param instance: the instance object
1699
  @type startup_paused: bool
1700
  @param instance: pause instance at startup?
1701
  @type reason: list of reasons
1702
  @param reason: the reason trail for this startup
1703
  @type store_reason: boolean
1704
  @param store_reason: whether to store the shutdown reason trail on file
1705
  @rtype: None
1706

1707
  """
1708
  running_instances = GetInstanceListForHypervisor(instance.hypervisor,
1709
                                                   instance.hvparams)
1710

    
1711
  if instance.name in running_instances:
1712
    logging.info("Instance %s already running, not starting", instance.name)
1713
    return
1714

    
1715
  try:
1716
    block_devices = _GatherAndLinkBlockDevs(instance)
1717
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
1718
    hyper.StartInstance(instance, block_devices, startup_paused)
1719
    if store_reason:
1720
      _StoreInstReasonTrail(instance.name, reason)
1721
  except errors.BlockDeviceError, err:
1722
    _Fail("Block device error: %s", err, exc=True)
1723
  except errors.HypervisorError, err:
1724
    _RemoveBlockDevLinks(instance.name, instance.disks)
1725
    _Fail("Hypervisor error: %s", err, exc=True)
1726

    
1727

    
1728
def InstanceShutdown(instance, timeout, reason, store_reason=True):
1729
  """Shut an instance down.
1730

1731
  @note: this functions uses polling with a hardcoded timeout.
1732

1733
  @type instance: L{objects.Instance}
1734
  @param instance: the instance object
1735
  @type timeout: integer
1736
  @param timeout: maximum timeout for soft shutdown
1737
  @type reason: list of reasons
1738
  @param reason: the reason trail for this shutdown
1739
  @type store_reason: boolean
1740
  @param store_reason: whether to store the shutdown reason trail on file
1741
  @rtype: None
1742

1743
  """
1744
  hv_name = instance.hypervisor
1745
  hyper = hypervisor.GetHypervisor(hv_name)
1746
  iname = instance.name
1747

    
1748
  if instance.name not in hyper.ListInstances(instance.hvparams):
1749
    logging.info("Instance %s not running, doing nothing", iname)
1750
    return
1751

    
1752
  class _TryShutdown:
1753
    def __init__(self):
1754
      self.tried_once = False
1755

    
1756
    def __call__(self):
1757
      if iname not in hyper.ListInstances(instance.hvparams):
1758
        return
1759

    
1760
      try:
1761
        hyper.StopInstance(instance, retry=self.tried_once)
1762
        if store_reason:
1763
          _StoreInstReasonTrail(instance.name, reason)
1764
      except errors.HypervisorError, err:
1765
        if iname not in hyper.ListInstances(instance.hvparams):
1766
          # if the instance is no longer existing, consider this a
1767
          # success and go to cleanup
1768
          return
1769

    
1770
        _Fail("Failed to stop instance %s: %s", iname, err)
1771

    
1772
      self.tried_once = True
1773

    
1774
      raise utils.RetryAgain()
1775

    
1776
  try:
1777
    utils.Retry(_TryShutdown(), 5, timeout)
1778
  except utils.RetryTimeout:
1779
    # the shutdown did not succeed
1780
    logging.error("Shutdown of '%s' unsuccessful, forcing", iname)
1781

    
1782
    try:
1783
      hyper.StopInstance(instance, force=True)
1784
    except errors.HypervisorError, err:
1785
      if iname in hyper.ListInstances(instance.hvparams):
1786
        # only raise an error if the instance still exists, otherwise
1787
        # the error could simply be "instance ... unknown"!
1788
        _Fail("Failed to force stop instance %s: %s", iname, err)
1789

    
1790
    time.sleep(1)
1791

    
1792
    if iname in hyper.ListInstances(instance.hvparams):
1793
      _Fail("Could not shutdown instance %s even by destroy", iname)
1794

    
1795
  try:
1796
    hyper.CleanupInstance(instance.name)
1797
  except errors.HypervisorError, err:
1798
    logging.warning("Failed to execute post-shutdown cleanup step: %s", err)
1799

    
1800
  _RemoveBlockDevLinks(iname, instance.disks)
1801

    
1802

    
1803
def InstanceReboot(instance, reboot_type, shutdown_timeout, reason):
1804
  """Reboot an instance.
1805

1806
  @type instance: L{objects.Instance}
1807
  @param instance: the instance object to reboot
1808
  @type reboot_type: str
1809
  @param reboot_type: the type of reboot, one the following
1810
    constants:
1811
      - L{constants.INSTANCE_REBOOT_SOFT}: only reboot the
1812
        instance OS, do not recreate the VM
1813
      - L{constants.INSTANCE_REBOOT_HARD}: tear down and
1814
        restart the VM (at the hypervisor level)
1815
      - the other reboot type (L{constants.INSTANCE_REBOOT_FULL}) is
1816
        not accepted here, since that mode is handled differently, in
1817
        cmdlib, and translates into full stop and start of the
1818
        instance (instead of a call_instance_reboot RPC)
1819
  @type shutdown_timeout: integer
1820
  @param shutdown_timeout: maximum timeout for soft shutdown
1821
  @type reason: list of reasons
1822
  @param reason: the reason trail for this reboot
1823
  @rtype: None
1824

1825
  """
1826
  running_instances = GetInstanceListForHypervisor(instance.hypervisor,
1827
                                                   instance.hvparams)
1828

    
1829
  if instance.name not in running_instances:
1830
    _Fail("Cannot reboot instance %s that is not running", instance.name)
1831

    
1832
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1833
  if reboot_type == constants.INSTANCE_REBOOT_SOFT:
1834
    try:
1835
      hyper.RebootInstance(instance)
1836
    except errors.HypervisorError, err:
1837
      _Fail("Failed to soft reboot instance %s: %s", instance.name, err)
1838
  elif reboot_type == constants.INSTANCE_REBOOT_HARD:
1839
    try:
1840
      InstanceShutdown(instance, shutdown_timeout, reason, store_reason=False)
1841
      result = StartInstance(instance, False, reason, store_reason=False)
1842
      _StoreInstReasonTrail(instance.name, reason)
1843
      return result
1844
    except errors.HypervisorError, err:
1845
      _Fail("Failed to hard reboot instance %s: %s", instance.name, err)
1846
  else:
1847
    _Fail("Invalid reboot_type received: %s", reboot_type)
1848

    
1849

    
1850
def InstanceBalloonMemory(instance, memory):
1851
  """Resize an instance's memory.
1852

1853
  @type instance: L{objects.Instance}
1854
  @param instance: the instance object
1855
  @type memory: int
1856
  @param memory: new memory amount in MB
1857
  @rtype: None
1858

1859
  """
1860
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1861
  running = hyper.ListInstances(instance.hvparams)
1862
  if instance.name not in running:
1863
    logging.info("Instance %s is not running, cannot balloon", instance.name)
1864
    return
1865
  try:
1866
    hyper.BalloonInstanceMemory(instance, memory)
1867
  except errors.HypervisorError, err:
1868
    _Fail("Failed to balloon instance memory: %s", err, exc=True)
1869

    
1870

    
1871
def MigrationInfo(instance):
1872
  """Gather information about an instance to be migrated.
1873

1874
  @type instance: L{objects.Instance}
1875
  @param instance: the instance definition
1876

1877
  """
1878
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1879
  try:
1880
    info = hyper.MigrationInfo(instance)
1881
  except errors.HypervisorError, err:
1882
    _Fail("Failed to fetch migration information: %s", err, exc=True)
1883
  return info
1884

    
1885

    
1886
def AcceptInstance(instance, info, target):
1887
  """Prepare the node to accept an instance.
1888

1889
  @type instance: L{objects.Instance}
1890
  @param instance: the instance definition
1891
  @type info: string/data (opaque)
1892
  @param info: migration information, from the source node
1893
  @type target: string
1894
  @param target: target host (usually ip), on this node
1895

1896
  """
1897
  # TODO: why is this required only for DTS_EXT_MIRROR?
1898
  if instance.disk_template in constants.DTS_EXT_MIRROR:
1899
    # Create the symlinks, as the disks are not active
1900
    # in any way
1901
    try:
1902
      _GatherAndLinkBlockDevs(instance)
1903
    except errors.BlockDeviceError, err:
1904
      _Fail("Block device error: %s", err, exc=True)
1905

    
1906
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1907
  try:
1908
    hyper.AcceptInstance(instance, info, target)
1909
  except errors.HypervisorError, err:
1910
    if instance.disk_template in constants.DTS_EXT_MIRROR:
1911
      _RemoveBlockDevLinks(instance.name, instance.disks)
1912
    _Fail("Failed to accept instance: %s", err, exc=True)
1913

    
1914

    
1915
def FinalizeMigrationDst(instance, info, success):
1916
  """Finalize any preparation to accept an instance.
1917

1918
  @type instance: L{objects.Instance}
1919
  @param instance: the instance definition
1920
  @type info: string/data (opaque)
1921
  @param info: migration information, from the source node
1922
  @type success: boolean
1923
  @param success: whether the migration was a success or a failure
1924

1925
  """
1926
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1927
  try:
1928
    hyper.FinalizeMigrationDst(instance, info, success)
1929
  except errors.HypervisorError, err:
1930
    _Fail("Failed to finalize migration on the target node: %s", err, exc=True)
1931

    
1932

    
1933
def MigrateInstance(cluster_name, instance, target, live):
1934
  """Migrates an instance to another node.
1935

1936
  @type cluster_name: string
1937
  @param cluster_name: name of the cluster
1938
  @type instance: L{objects.Instance}
1939
  @param instance: the instance definition
1940
  @type target: string
1941
  @param target: the target node name
1942
  @type live: boolean
1943
  @param live: whether the migration should be done live or not (the
1944
      interpretation of this parameter is left to the hypervisor)
1945
  @raise RPCFail: if migration fails for some reason
1946

1947
  """
1948
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1949

    
1950
  try:
1951
    hyper.MigrateInstance(cluster_name, instance, target, live)
1952
  except errors.HypervisorError, err:
1953
    _Fail("Failed to migrate instance: %s", err, exc=True)
1954

    
1955

    
1956
def FinalizeMigrationSource(instance, success, live):
1957
  """Finalize the instance migration on the source node.
1958

1959
  @type instance: L{objects.Instance}
1960
  @param instance: the instance definition of the migrated instance
1961
  @type success: bool
1962
  @param success: whether the migration succeeded or not
1963
  @type live: bool
1964
  @param live: whether the user requested a live migration or not
1965
  @raise RPCFail: If the execution fails for some reason
1966

1967
  """
1968
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1969

    
1970
  try:
1971
    hyper.FinalizeMigrationSource(instance, success, live)
1972
  except Exception, err:  # pylint: disable=W0703
1973
    _Fail("Failed to finalize the migration on the source node: %s", err,
1974
          exc=True)
1975

    
1976

    
1977
def GetMigrationStatus(instance):
1978
  """Get the migration status
1979

1980
  @type instance: L{objects.Instance}
1981
  @param instance: the instance that is being migrated
1982
  @rtype: L{objects.MigrationStatus}
1983
  @return: the status of the current migration (one of
1984
           L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
1985
           progress info that can be retrieved from the hypervisor
1986
  @raise RPCFail: If the migration status cannot be retrieved
1987

1988
  """
1989
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1990
  try:
1991
    return hyper.GetMigrationStatus(instance)
1992
  except Exception, err:  # pylint: disable=W0703
1993
    _Fail("Failed to get migration status: %s", err, exc=True)
1994

    
1995

    
1996
def BlockdevCreate(disk, size, owner, on_primary, info, excl_stor):
1997
  """Creates a block device for an instance.
1998

1999
  @type disk: L{objects.Disk}
2000
  @param disk: the object describing the disk we should create
2001
  @type size: int
2002
  @param size: the size of the physical underlying device, in MiB
2003
  @type owner: str
2004
  @param owner: the name of the instance for which disk is created,
2005
      used for device cache data
2006
  @type on_primary: boolean
2007
  @param on_primary:  indicates if it is the primary node or not
2008
  @type info: string
2009
  @param info: string that will be sent to the physical device
2010
      creation, used for example to set (LVM) tags on LVs
2011
  @type excl_stor: boolean
2012
  @param excl_stor: Whether exclusive_storage is active
2013

2014
  @return: the new unique_id of the device (this can sometime be
2015
      computed only after creation), or None. On secondary nodes,
2016
      it's not required to return anything.
2017

2018
  """
2019
  # TODO: remove the obsolete "size" argument
2020
  # pylint: disable=W0613
2021
  clist = []
2022
  if disk.children:
2023
    for child in disk.children:
2024
      try:
2025
        crdev = _RecursiveAssembleBD(child, owner, on_primary)
2026
      except errors.BlockDeviceError, err:
2027
        _Fail("Can't assemble device %s: %s", child, err)
2028
      if on_primary or disk.AssembleOnSecondary():
2029
        # we need the children open in case the device itself has to
2030
        # be assembled
2031
        try:
2032
          # pylint: disable=E1103
2033
          crdev.Open()
2034
        except errors.BlockDeviceError, err:
2035
          _Fail("Can't make child '%s' read-write: %s", child, err)
2036
      clist.append(crdev)
2037

    
2038
  try:
2039
    device = bdev.Create(disk, clist, excl_stor)
2040
  except errors.BlockDeviceError, err:
2041
    _Fail("Can't create block device: %s", err)
2042

    
2043
  if on_primary or disk.AssembleOnSecondary():
2044
    try:
2045
      device.Assemble()
2046
    except errors.BlockDeviceError, err:
2047
      _Fail("Can't assemble device after creation, unusual event: %s", err)
2048
    if on_primary or disk.OpenOnSecondary():
2049
      try:
2050
        device.Open(force=True)
2051
      except errors.BlockDeviceError, err:
2052
        _Fail("Can't make device r/w after creation, unusual event: %s", err)
2053
    DevCacheManager.UpdateCache(device.dev_path, owner,
2054
                                on_primary, disk.iv_name)
2055

    
2056
  device.SetInfo(info)
2057

    
2058
  return device.unique_id
2059

    
2060

    
2061
def _WipeDevice(path, offset, size):
2062
  """This function actually wipes the device.
2063

2064
  @param path: The path to the device to wipe
2065
  @param offset: The offset in MiB in the file
2066
  @param size: The size in MiB to write
2067

2068
  """
2069
  # Internal sizes are always in Mebibytes; if the following "dd" command
2070
  # should use a different block size the offset and size given to this
2071
  # function must be adjusted accordingly before being passed to "dd".
2072
  block_size = 1024 * 1024
2073

    
2074
  cmd = [constants.DD_CMD, "if=/dev/zero", "seek=%d" % offset,
2075
         "bs=%s" % block_size, "oflag=direct", "of=%s" % path,
2076
         "count=%d" % size]
2077
  result = utils.RunCmd(cmd)
2078

    
2079
  if result.failed:
2080
    _Fail("Wipe command '%s' exited with error: %s; output: %s", result.cmd,
2081
          result.fail_reason, result.output)
2082

    
2083

    
2084
def BlockdevWipe(disk, offset, size):
2085
  """Wipes a block device.
2086

2087
  @type disk: L{objects.Disk}
2088
  @param disk: the disk object we want to wipe
2089
  @type offset: int
2090
  @param offset: The offset in MiB in the file
2091
  @type size: int
2092
  @param size: The size in MiB to write
2093

2094
  """
2095
  try:
2096
    rdev = _RecursiveFindBD(disk)
2097
  except errors.BlockDeviceError:
2098
    rdev = None
2099

    
2100
  if not rdev:
2101
    _Fail("Cannot execute wipe for device %s: device not found", disk.iv_name)
2102

    
2103
  # Do cross verify some of the parameters
2104
  if offset < 0:
2105
    _Fail("Negative offset")
2106
  if size < 0:
2107
    _Fail("Negative size")
2108
  if offset > rdev.size:
2109
    _Fail("Offset is bigger than device size")
2110
  if (offset + size) > rdev.size:
2111
    _Fail("The provided offset and size to wipe is bigger than device size")
2112

    
2113
  _WipeDevice(rdev.dev_path, offset, size)
2114

    
2115

    
2116
def BlockdevPauseResumeSync(disks, pause):
2117
  """Pause or resume the sync of the block device.
2118

2119
  @type disks: list of L{objects.Disk}
2120
  @param disks: the disks object we want to pause/resume
2121
  @type pause: bool
2122
  @param pause: Wheater to pause or resume
2123

2124
  """
2125
  success = []
2126
  for disk in disks:
2127
    try:
2128
      rdev = _RecursiveFindBD(disk)
2129
    except errors.BlockDeviceError:
2130
      rdev = None
2131

    
2132
    if not rdev:
2133
      success.append((False, ("Cannot change sync for device %s:"
2134
                              " device not found" % disk.iv_name)))
2135
      continue
2136

    
2137
    result = rdev.PauseResumeSync(pause)
2138

    
2139
    if result:
2140
      success.append((result, None))
2141
    else:
2142
      if pause:
2143
        msg = "Pause"
2144
      else:
2145
        msg = "Resume"
2146
      success.append((result, "%s for device %s failed" % (msg, disk.iv_name)))
2147

    
2148
  return success
2149

    
2150

    
2151
def BlockdevRemove(disk):
2152
  """Remove a block device.
2153

2154
  @note: This is intended to be called recursively.
2155

2156
  @type disk: L{objects.Disk}
2157
  @param disk: the disk object we should remove
2158
  @rtype: boolean
2159
  @return: the success of the operation
2160

2161
  """
2162
  msgs = []
2163
  try:
2164
    rdev = _RecursiveFindBD(disk)
2165
  except errors.BlockDeviceError, err:
2166
    # probably can't attach
2167
    logging.info("Can't attach to device %s in remove", disk)
2168
    rdev = None
2169
  if rdev is not None:
2170
    r_path = rdev.dev_path
2171
    try:
2172
      rdev.Remove()
2173
    except errors.BlockDeviceError, err:
2174
      msgs.append(str(err))
2175
    if not msgs:
2176
      DevCacheManager.RemoveCache(r_path)
2177

    
2178
  if disk.children:
2179
    for child in disk.children:
2180
      try:
2181
        BlockdevRemove(child)
2182
      except RPCFail, err:
2183
        msgs.append(str(err))
2184

    
2185
  if msgs:
2186
    _Fail("; ".join(msgs))
2187

    
2188

    
2189
def _RecursiveAssembleBD(disk, owner, as_primary):
2190
  """Activate a block device for an instance.
2191

2192
  This is run on the primary and secondary nodes for an instance.
2193

2194
  @note: this function is called recursively.
2195

2196
  @type disk: L{objects.Disk}
2197
  @param disk: the disk we try to assemble
2198
  @type owner: str
2199
  @param owner: the name of the instance which owns the disk
2200
  @type as_primary: boolean
2201
  @param as_primary: if we should make the block device
2202
      read/write
2203

2204
  @return: the assembled device or None (in case no device
2205
      was assembled)
2206
  @raise errors.BlockDeviceError: in case there is an error
2207
      during the activation of the children or the device
2208
      itself
2209

2210
  """
2211
  children = []
2212
  if disk.children:
2213
    mcn = disk.ChildrenNeeded()
2214
    if mcn == -1:
2215
      mcn = 0 # max number of Nones allowed
2216
    else:
2217
      mcn = len(disk.children) - mcn # max number of Nones
2218
    for chld_disk in disk.children:
2219
      try:
2220
        cdev = _RecursiveAssembleBD(chld_disk, owner, as_primary)
2221
      except errors.BlockDeviceError, err:
2222
        if children.count(None) >= mcn:
2223
          raise
2224
        cdev = None
2225
        logging.error("Error in child activation (but continuing): %s",
2226
                      str(err))
2227
      children.append(cdev)
2228

    
2229
  if as_primary or disk.AssembleOnSecondary():
2230
    r_dev = bdev.Assemble(disk, children)
2231
    result = r_dev
2232
    if as_primary or disk.OpenOnSecondary():
2233
      r_dev.Open()
2234
    DevCacheManager.UpdateCache(r_dev.dev_path, owner,
2235
                                as_primary, disk.iv_name)
2236

    
2237
  else:
2238
    result = True
2239
  return result
2240

    
2241

    
2242
def BlockdevAssemble(disk, owner, as_primary, idx):
2243
  """Activate a block device for an instance.
2244

2245
  This is a wrapper over _RecursiveAssembleBD.
2246

2247
  @rtype: str or boolean
2248
  @return: a C{/dev/...} path for primary nodes, and
2249
      C{True} for secondary nodes
2250

2251
  """
2252
  try:
2253
    result = _RecursiveAssembleBD(disk, owner, as_primary)
2254
    if isinstance(result, BlockDev):
2255
      # pylint: disable=E1103
2256
      result = result.dev_path
2257
      if as_primary:
2258
        _SymlinkBlockDev(owner, result, idx)
2259
  except errors.BlockDeviceError, err:
2260
    _Fail("Error while assembling disk: %s", err, exc=True)
2261
  except OSError, err:
2262
    _Fail("Error while symlinking disk: %s", err, exc=True)
2263

    
2264
  return result
2265

    
2266

    
2267
def BlockdevShutdown(disk):
2268
  """Shut down a block device.
2269

2270
  First, if the device is assembled (Attach() is successful), then
2271
  the device is shutdown. Then the children of the device are
2272
  shutdown.
2273

2274
  This function is called recursively. Note that we don't cache the
2275
  children or such, as oppossed to assemble, shutdown of different
2276
  devices doesn't require that the upper device was active.
2277

2278
  @type disk: L{objects.Disk}
2279
  @param disk: the description of the disk we should
2280
      shutdown
2281
  @rtype: None
2282

2283
  """
2284
  msgs = []
2285
  r_dev = _RecursiveFindBD(disk)
2286
  if r_dev is not None:
2287
    r_path = r_dev.dev_path
2288
    try:
2289
      r_dev.Shutdown()
2290
      DevCacheManager.RemoveCache(r_path)
2291
    except errors.BlockDeviceError, err:
2292
      msgs.append(str(err))
2293

    
2294
  if disk.children:
2295
    for child in disk.children:
2296
      try:
2297
        BlockdevShutdown(child)
2298
      except RPCFail, err:
2299
        msgs.append(str(err))
2300

    
2301
  if msgs:
2302
    _Fail("; ".join(msgs))
2303

    
2304

    
2305
def BlockdevAddchildren(parent_cdev, new_cdevs):
2306
  """Extend a mirrored block device.
2307

2308
  @type parent_cdev: L{objects.Disk}
2309
  @param parent_cdev: the disk to which we should add children
2310
  @type new_cdevs: list of L{objects.Disk}
2311
  @param new_cdevs: the list of children which we should add
2312
  @rtype: None
2313

2314
  """
2315
  parent_bdev = _RecursiveFindBD(parent_cdev)
2316
  if parent_bdev is None:
2317
    _Fail("Can't find parent device '%s' in add children", parent_cdev)
2318
  new_bdevs = [_RecursiveFindBD(disk) for disk in new_cdevs]
2319
  if new_bdevs.count(None) > 0:
2320
    _Fail("Can't find new device(s) to add: %s:%s", new_bdevs, new_cdevs)
2321
  parent_bdev.AddChildren(new_bdevs)
2322

    
2323

    
2324
def BlockdevRemovechildren(parent_cdev, new_cdevs):
2325
  """Shrink a mirrored block device.
2326

2327
  @type parent_cdev: L{objects.Disk}
2328
  @param parent_cdev: the disk from which we should remove children
2329
  @type new_cdevs: list of L{objects.Disk}
2330
  @param new_cdevs: the list of children which we should remove
2331
  @rtype: None
2332

2333
  """
2334
  parent_bdev = _RecursiveFindBD(parent_cdev)
2335
  if parent_bdev is None:
2336
    _Fail("Can't find parent device '%s' in remove children", parent_cdev)
2337
  devs = []
2338
  for disk in new_cdevs:
2339
    rpath = disk.StaticDevPath()
2340
    if rpath is None:
2341
      bd = _RecursiveFindBD(disk)
2342
      if bd is None:
2343
        _Fail("Can't find device %s while removing children", disk)
2344
      else:
2345
        devs.append(bd.dev_path)
2346
    else:
2347
      if not utils.IsNormAbsPath(rpath):
2348
        _Fail("Strange path returned from StaticDevPath: '%s'", rpath)
2349
      devs.append(rpath)
2350
  parent_bdev.RemoveChildren(devs)
2351

    
2352

    
2353
def BlockdevGetmirrorstatus(disks):
2354
  """Get the mirroring status of a list of devices.
2355

2356
  @type disks: list of L{objects.Disk}
2357
  @param disks: the list of disks which we should query
2358
  @rtype: disk
2359
  @return: List of L{objects.BlockDevStatus}, one for each disk
2360
  @raise errors.BlockDeviceError: if any of the disks cannot be
2361
      found
2362

2363
  """
2364
  stats = []
2365
  for dsk in disks:
2366
    rbd = _RecursiveFindBD(dsk)
2367
    if rbd is None:
2368
      _Fail("Can't find device %s", dsk)
2369

    
2370
    stats.append(rbd.CombinedSyncStatus())
2371

    
2372
  return stats
2373

    
2374

    
2375
def BlockdevGetmirrorstatusMulti(disks):
2376
  """Get the mirroring status of a list of devices.
2377

2378
  @type disks: list of L{objects.Disk}
2379
  @param disks: the list of disks which we should query
2380
  @rtype: disk
2381
  @return: List of tuples, (bool, status), one for each disk; bool denotes
2382
    success/failure, status is L{objects.BlockDevStatus} on success, string
2383
    otherwise
2384

2385
  """
2386
  result = []
2387
  for disk in disks:
2388
    try:
2389
      rbd = _RecursiveFindBD(disk)
2390
      if rbd is None:
2391
        result.append((False, "Can't find device %s" % disk))
2392
        continue
2393

    
2394
      status = rbd.CombinedSyncStatus()
2395
    except errors.BlockDeviceError, err:
2396
      logging.exception("Error while getting disk status")
2397
      result.append((False, str(err)))
2398
    else:
2399
      result.append((True, status))
2400

    
2401
  assert len(disks) == len(result)
2402

    
2403
  return result
2404

    
2405

    
2406
def _RecursiveFindBD(disk):
2407
  """Check if a device is activated.
2408

2409
  If so, return information about the real device.
2410

2411
  @type disk: L{objects.Disk}
2412
  @param disk: the disk object we need to find
2413

2414
  @return: None if the device can't be found,
2415
      otherwise the device instance
2416

2417
  """
2418
  children = []
2419
  if disk.children:
2420
    for chdisk in disk.children:
2421
      children.append(_RecursiveFindBD(chdisk))
2422

    
2423
  return bdev.FindDevice(disk, children)
2424

    
2425

    
2426
def _OpenRealBD(disk):
2427
  """Opens the underlying block device of a disk.
2428

2429
  @type disk: L{objects.Disk}
2430
  @param disk: the disk object we want to open
2431

2432
  """
2433
  real_disk = _RecursiveFindBD(disk)
2434
  if real_disk is None:
2435
    _Fail("Block device '%s' is not set up", disk)
2436

    
2437
  real_disk.Open()
2438

    
2439
  return real_disk
2440

    
2441

    
2442
def BlockdevFind(disk):
2443
  """Check if a device is activated.
2444

2445
  If it is, return information about the real device.
2446

2447
  @type disk: L{objects.Disk}
2448
  @param disk: the disk to find
2449
  @rtype: None or objects.BlockDevStatus
2450
  @return: None if the disk cannot be found, otherwise a the current
2451
           information
2452

2453
  """
2454
  try:
2455
    rbd = _RecursiveFindBD(disk)
2456
  except errors.BlockDeviceError, err:
2457
    _Fail("Failed to find device: %s", err, exc=True)
2458

    
2459
  if rbd is None:
2460
    return None
2461

    
2462
  return rbd.GetSyncStatus()
2463

    
2464

    
2465
def BlockdevGetdimensions(disks):
2466
  """Computes the size of the given disks.
2467

2468
  If a disk is not found, returns None instead.
2469

2470
  @type disks: list of L{objects.Disk}
2471
  @param disks: the list of disk to compute the size for
2472
  @rtype: list
2473
  @return: list with elements None if the disk cannot be found,
2474
      otherwise the pair (size, spindles), where spindles is None if the
2475
      device doesn't support that
2476

2477
  """
2478
  result = []
2479
  for cf in disks:
2480
    try:
2481
      rbd = _RecursiveFindBD(cf)
2482
    except errors.BlockDeviceError:
2483
      result.append(None)
2484
      continue
2485
    if rbd is None:
2486
      result.append(None)
2487
    else:
2488
      result.append(rbd.GetActualDimensions())
2489
  return result
2490

    
2491

    
2492
def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
2493
  """Write a file to the filesystem.
2494

2495
  This allows the master to overwrite(!) a file. It will only perform
2496
  the operation if the file belongs to a list of configuration files.
2497

2498
  @type file_name: str
2499
  @param file_name: the target file name
2500
  @type data: str
2501
  @param data: the new contents of the file
2502
  @type mode: int
2503
  @param mode: the mode to give the file (can be None)
2504
  @type uid: string
2505
  @param uid: the owner of the file
2506
  @type gid: string
2507
  @param gid: the group of the file
2508
  @type atime: float
2509
  @param atime: the atime to set on the file (can be None)
2510
  @type mtime: float
2511
  @param mtime: the mtime to set on the file (can be None)
2512
  @rtype: None
2513

2514
  """
2515
  file_name = vcluster.LocalizeVirtualPath(file_name)
2516

    
2517
  if not os.path.isabs(file_name):
2518
    _Fail("Filename passed to UploadFile is not absolute: '%s'", file_name)
2519

    
2520
  if file_name not in _ALLOWED_UPLOAD_FILES:
2521
    _Fail("Filename passed to UploadFile not in allowed upload targets: '%s'",
2522
          file_name)
2523

    
2524
  raw_data = _Decompress(data)
2525

    
2526
  if not (isinstance(uid, basestring) and isinstance(gid, basestring)):
2527
    _Fail("Invalid username/groupname type")
2528

    
2529
  getents = runtime.GetEnts()
2530
  uid = getents.LookupUser(uid)
2531
  gid = getents.LookupGroup(gid)
2532

    
2533
  utils.SafeWriteFile(file_name, None,
2534
                      data=raw_data, mode=mode, uid=uid, gid=gid,
2535
                      atime=atime, mtime=mtime)
2536

    
2537

    
2538
def RunOob(oob_program, command, node, timeout):
2539
  """Executes oob_program with given command on given node.
2540

2541
  @param oob_program: The path to the executable oob_program
2542
  @param command: The command to invoke on oob_program
2543
  @param node: The node given as an argument to the program
2544
  @param timeout: Timeout after which we kill the oob program
2545

2546
  @return: stdout
2547
  @raise RPCFail: If execution fails for some reason
2548

2549
  """
2550
  result = utils.RunCmd([oob_program, command, node], timeout=timeout)
2551

    
2552
  if result.failed:
2553
    _Fail("'%s' failed with reason '%s'; output: %s", result.cmd,
2554
          result.fail_reason, result.output)
2555

    
2556
  return result.stdout
2557

    
2558

    
2559
def _OSOndiskAPIVersion(os_dir):
2560
  """Compute and return the API version of a given OS.
2561

2562
  This function will try to read the API version of the OS residing in
2563
  the 'os_dir' directory.
2564

2565
  @type os_dir: str
2566
  @param os_dir: the directory in which we should look for the OS
2567
  @rtype: tuple
2568
  @return: tuple (status, data) with status denoting the validity and
2569
      data holding either the vaid versions or an error message
2570

2571
  """
2572
  api_file = utils.PathJoin(os_dir, constants.OS_API_FILE)
2573

    
2574
  try:
2575
    st = os.stat(api_file)
2576
  except EnvironmentError, err:
2577
    return False, ("Required file '%s' not found under path %s: %s" %
2578
                   (constants.OS_API_FILE, os_dir, utils.ErrnoOrStr(err)))
2579

    
2580
  if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
2581
    return False, ("File '%s' in %s is not a regular file" %
2582
                   (constants.OS_API_FILE, os_dir))
2583

    
2584
  try:
2585
    api_versions = utils.ReadFile(api_file).splitlines()
2586
  except EnvironmentError, err:
2587
    return False, ("Error while reading the API version file at %s: %s" %
2588
                   (api_file, utils.ErrnoOrStr(err)))
2589

    
2590
  try:
2591
    api_versions = [int(version.strip()) for version in api_versions]
2592
  except (TypeError, ValueError), err:
2593
    return False, ("API version(s) can't be converted to integer: %s" %
2594
                   str(err))
2595

    
2596
  return True, api_versions
2597

    
2598

    
2599
def DiagnoseOS(top_dirs=None):
2600
  """Compute the validity for all OSes.
2601

2602
  @type top_dirs: list
2603
  @param top_dirs: the list of directories in which to
2604
      search (if not given defaults to
2605
      L{pathutils.OS_SEARCH_PATH})
2606
  @rtype: list of L{objects.OS}
2607
  @return: a list of tuples (name, path, status, diagnose, variants,
2608
      parameters, api_version) for all (potential) OSes under all
2609
      search paths, where:
2610
          - name is the (potential) OS name
2611
          - path is the full path to the OS
2612
          - status True/False is the validity of the OS
2613
          - diagnose is the error message for an invalid OS, otherwise empty
2614
          - variants is a list of supported OS variants, if any
2615
          - parameters is a list of (name, help) parameters, if any
2616
          - api_version is a list of support OS API versions
2617

2618
  """
2619
  if top_dirs is None:
2620
    top_dirs = pathutils.OS_SEARCH_PATH
2621

    
2622
  result = []
2623
  for dir_name in top_dirs:
2624
    if os.path.isdir(dir_name):
2625
      try:
2626
        f_names = utils.ListVisibleFiles(dir_name)
2627
      except EnvironmentError, err:
2628
        logging.exception("Can't list the OS directory %s: %s", dir_name, err)
2629
        break
2630
      for name in f_names:
2631
        os_path = utils.PathJoin(dir_name, name)
2632
        status, os_inst = _TryOSFromDisk(name, base_dir=dir_name)
2633
        if status:
2634
          diagnose = ""
2635
          variants = os_inst.supported_variants
2636
          parameters = os_inst.supported_parameters
2637
          api_versions = os_inst.api_versions
2638
        else:
2639
          diagnose = os_inst
2640
          variants = parameters = api_versions = []
2641
        result.append((name, os_path, status, diagnose, variants,
2642
                       parameters, api_versions))
2643

    
2644
  return result
2645

    
2646

    
2647
def _TryOSFromDisk(name, base_dir=None):
2648
  """Create an OS instance from disk.
2649

2650
  This function will return an OS instance if the given name is a
2651
  valid OS name.
2652

2653
  @type base_dir: string
2654
  @keyword base_dir: Base directory containing OS installations.
2655
                     Defaults to a search in all the OS_SEARCH_PATH dirs.
2656
  @rtype: tuple
2657
  @return: success and either the OS instance if we find a valid one,
2658
      or error message
2659

2660
  """
2661
  if base_dir is None:
2662
    os_dir = utils.FindFile(name, pathutils.OS_SEARCH_PATH, os.path.isdir)
2663
  else:
2664
    os_dir = utils.FindFile(name, [base_dir], os.path.isdir)
2665

    
2666
  if os_dir is None:
2667
    return False, "Directory for OS %s not found in search path" % name
2668

    
2669
  status, api_versions = _OSOndiskAPIVersion(os_dir)
2670
  if not status:
2671
    # push the error up
2672
    return status, api_versions
2673

    
2674
  if not constants.OS_API_VERSIONS.intersection(api_versions):
2675
    return False, ("API version mismatch for path '%s': found %s, want %s." %
2676
                   (os_dir, api_versions, constants.OS_API_VERSIONS))
2677

    
2678
  # OS Files dictionary, we will populate it with the absolute path
2679
  # names; if the value is True, then it is a required file, otherwise
2680
  # an optional one
2681
  os_files = dict.fromkeys(constants.OS_SCRIPTS, True)
2682

    
2683
  if max(api_versions) >= constants.OS_API_V15:
2684
    os_files[constants.OS_VARIANTS_FILE] = False
2685

    
2686
  if max(api_versions) >= constants.OS_API_V20:
2687
    os_files[constants.OS_PARAMETERS_FILE] = True
2688
  else:
2689
    del os_files[constants.OS_SCRIPT_VERIFY]
2690

    
2691
  for (filename, required) in os_files.items():
2692
    os_files[filename] = utils.PathJoin(os_dir, filename)
2693

    
2694
    try:
2695
      st = os.stat(os_files[filename])
2696
    except EnvironmentError, err:
2697
      if err.errno == errno.ENOENT and not required:
2698
        del os_files[filename]
2699
        continue
2700
      return False, ("File '%s' under path '%s' is missing (%s)" %
2701
                     (filename, os_dir, utils.ErrnoOrStr(err)))
2702

    
2703
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
2704
      return False, ("File '%s' under path '%s' is not a regular file" %
2705
                     (filename, os_dir))
2706

    
2707
    if filename in constants.OS_SCRIPTS:
2708
      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
2709
        return False, ("File '%s' under path '%s' is not executable" %
2710
                       (filename, os_dir))
2711

    
2712
  variants = []
2713
  if constants.OS_VARIANTS_FILE in os_files:
2714
    variants_file = os_files[constants.OS_VARIANTS_FILE]
2715
    try:
2716
      variants = \
2717
        utils.FilterEmptyLinesAndComments(utils.ReadFile(variants_file))
2718
    except EnvironmentError, err:
2719
      # we accept missing files, but not other errors
2720
      if err.errno != errno.ENOENT:
2721
        return False, ("Error while reading the OS variants file at %s: %s" %
2722
                       (variants_file, utils.ErrnoOrStr(err)))
2723

    
2724
  parameters = []
2725
  if constants.OS_PARAMETERS_FILE in os_files:
2726
    parameters_file = os_files[constants.OS_PARAMETERS_FILE]
2727
    try:
2728
      parameters = utils.ReadFile(parameters_file).splitlines()
2729
    except EnvironmentError, err:
2730
      return False, ("Error while reading the OS parameters file at %s: %s" %
2731
                     (parameters_file, utils.ErrnoOrStr(err)))
2732
    parameters = [v.split(None, 1) for v in parameters]
2733

    
2734
  os_obj = objects.OS(name=name, path=os_dir,
2735
                      create_script=os_files[constants.OS_SCRIPT_CREATE],
2736
                      export_script=os_files[constants.OS_SCRIPT_EXPORT],
2737
                      import_script=os_files[constants.OS_SCRIPT_IMPORT],
2738
                      rename_script=os_files[constants.OS_SCRIPT_RENAME],
2739
                      verify_script=os_files.get(constants.OS_SCRIPT_VERIFY,
2740
                                                 None),
2741
                      supported_variants=variants,
2742
                      supported_parameters=parameters,
2743
                      api_versions=api_versions)
2744
  return True, os_obj
2745

    
2746

    
2747
def OSFromDisk(name, base_dir=None):
2748
  """Create an OS instance from disk.
2749

2750
  This function will return an OS instance if the given name is a
2751
  valid OS name. Otherwise, it will raise an appropriate
2752
  L{RPCFail} exception, detailing why this is not a valid OS.
2753

2754
  This is just a wrapper over L{_TryOSFromDisk}, which doesn't raise
2755
  an exception but returns true/false status data.
2756

2757
  @type base_dir: string
2758
  @keyword base_dir: Base directory containing OS installations.
2759
                     Defaults to a search in all the OS_SEARCH_PATH dirs.
2760
  @rtype: L{objects.OS}
2761
  @return: the OS instance if we find a valid one
2762
  @raise RPCFail: if we don't find a valid OS
2763

2764
  """
2765
  name_only = objects.OS.GetName(name)
2766
  status, payload = _TryOSFromDisk(name_only, base_dir)
2767

    
2768
  if not status:
2769
    _Fail(payload)
2770

    
2771
  return payload
2772

    
2773

    
2774
def OSCoreEnv(os_name, inst_os, os_params, debug=0):
2775
  """Calculate the basic environment for an os script.
2776

2777
  @type os_name: str
2778
  @param os_name: full operating system name (including variant)
2779
  @type inst_os: L{objects.OS}
2780
  @param inst_os: operating system for which the environment is being built
2781
  @type os_params: dict
2782
  @param os_params: the OS parameters
2783
  @type debug: integer
2784
  @param debug: debug level (0 or 1, for OS Api 10)
2785
  @rtype: dict
2786
  @return: dict of environment variables
2787
  @raise errors.BlockDeviceError: if the block device
2788
      cannot be found
2789

2790
  """
2791
  result = {}
2792
  api_version = \
2793
    max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions))
2794
  result["OS_API_VERSION"] = "%d" % api_version
2795
  result["OS_NAME"] = inst_os.name
2796
  result["DEBUG_LEVEL"] = "%d" % debug
2797

    
2798
  # OS variants
2799
  if api_version >= constants.OS_API_V15 and inst_os.supported_variants:
2800
    variant = objects.OS.GetVariant(os_name)
2801
    if not variant:
2802
      variant = inst_os.supported_variants[0]
2803
  else:
2804
    variant = ""
2805
  result["OS_VARIANT"] = variant
2806

    
2807
  # OS params
2808
  for pname, pvalue in os_params.items():
2809
    result["OSP_%s" % pname.upper()] = pvalue
2810

    
2811
  # Set a default path otherwise programs called by OS scripts (or
2812
  # even hooks called from OS scripts) might break, and we don't want
2813
  # to have each script require setting a PATH variable
2814
  result["PATH"] = constants.HOOKS_PATH
2815

    
2816
  return result
2817

    
2818

    
2819
def OSEnvironment(instance, inst_os, debug=0):
2820
  """Calculate the environment for an os script.
2821

2822
  @type instance: L{objects.Instance}
2823
  @param instance: target instance for the os script run
2824
  @type inst_os: L{objects.OS}
2825
  @param inst_os: operating system for which the environment is being built
2826
  @type debug: integer
2827
  @param debug: debug level (0 or 1, for OS Api 10)
2828
  @rtype: dict
2829
  @return: dict of environment variables
2830
  @raise errors.BlockDeviceError: if the block device
2831
      cannot be found
2832

2833
  """
2834
  result = OSCoreEnv(instance.os, inst_os, instance.osparams, debug=debug)
2835

    
2836
  for attr in ["name", "os", "uuid", "ctime", "mtime", "primary_node"]:
2837
    result["INSTANCE_%s" % attr.upper()] = str(getattr(instance, attr))
2838

    
2839
  result["HYPERVISOR"] = instance.hypervisor
2840
  result["DISK_COUNT"] = "%d" % len(instance.disks)
2841
  result["NIC_COUNT"] = "%d" % len(instance.nics)
2842
  result["INSTANCE_SECONDARY_NODES"] = \
2843
      ("%s" % " ".join(instance.secondary_nodes))
2844

    
2845
  # Disks
2846
  for idx, disk in enumerate(instance.disks):
2847
    real_disk = _OpenRealBD(disk)
2848
    result["DISK_%d_PATH" % idx] = real_disk.dev_path
2849
    result["DISK_%d_ACCESS" % idx] = disk.mode
2850
    result["DISK_%d_UUID" % idx] = disk.uuid
2851
    if disk.name:
2852
      result["DISK_%d_NAME" % idx] = disk.name
2853
    if constants.HV_DISK_TYPE in instance.hvparams:
2854
      result["DISK_%d_FRONTEND_TYPE" % idx] = \
2855
        instance.hvparams[constants.HV_DISK_TYPE]
2856
    if disk.dev_type in constants.DTS_BLOCK:
2857
      result["DISK_%d_BACKEND_TYPE" % idx] = "block"
2858
    elif disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]:
2859
      result["DISK_%d_BACKEND_TYPE" % idx] = \
2860
        "file:%s" % disk.logical_id[0]
2861

    
2862
  # NICs
2863
  for idx, nic in enumerate(instance.nics):
2864
    result["NIC_%d_MAC" % idx] = nic.mac
2865
    result["NIC_%d_UUID" % idx] = nic.uuid
2866
    if nic.name:
2867
      result["NIC_%d_NAME" % idx] = nic.name
2868
    if nic.ip:
2869
      result["NIC_%d_IP" % idx] = nic.ip
2870
    result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE]
2871
    if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2872
      result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK]
2873
    if nic.nicparams[constants.NIC_LINK]:
2874
      result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK]
2875
    if nic.netinfo:
2876
      nobj = objects.Network.FromDict(nic.netinfo)
2877
      result.update(nobj.HooksDict("NIC_%d_" % idx))
2878
    if constants.HV_NIC_TYPE in instance.hvparams:
2879
      result["NIC_%d_FRONTEND_TYPE" % idx] = \
2880
        instance.hvparams[constants.HV_NIC_TYPE]
2881

    
2882
  # HV/BE params
2883
  for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]:
2884
    for key, value in source.items():
2885
      result["INSTANCE_%s_%s" % (kind, key)] = str(value)
2886

    
2887
  return result
2888

    
2889

    
2890
def DiagnoseExtStorage(top_dirs=None):
2891
  """Compute the validity for all ExtStorage Providers.
2892

2893
  @type top_dirs: list
2894
  @param top_dirs: the list of directories in which to
2895
      search (if not given defaults to
2896
      L{pathutils.ES_SEARCH_PATH})
2897
  @rtype: list of L{objects.ExtStorage}
2898
  @return: a list of tuples (name, path, status, diagnose, parameters)
2899
      for all (potential) ExtStorage Providers under all
2900
      search paths, where:
2901
          - name is the (potential) ExtStorage Provider
2902
          - path is the full path to the ExtStorage Provider
2903
          - status True/False is the validity of the ExtStorage Provider
2904
          - diagnose is the error message for an invalid ExtStorage Provider,
2905
            otherwise empty
2906
          - parameters is a list of (name, help) parameters, if any
2907

2908
  """
2909
  if top_dirs is None:
2910
    top_dirs = pathutils.ES_SEARCH_PATH
2911

    
2912
  result = []
2913
  for dir_name in top_dirs:
2914
    if os.path.isdir(dir_name):
2915
      try:
2916
        f_names = utils.ListVisibleFiles(dir_name)
2917
      except EnvironmentError, err:
2918
        logging.exception("Can't list the ExtStorage directory %s: %s",
2919
                          dir_name, err)
2920
        break
2921
      for name in f_names:
2922
        es_path = utils.PathJoin(dir_name, name)
2923
        status, es_inst = bdev.ExtStorageFromDisk(name, base_dir=dir_name)
2924
        if status:
2925
          diagnose = ""
2926
          parameters = es_inst.supported_parameters
2927
        else:
2928
          diagnose = es_inst
2929
          parameters = []
2930
        result.append((name, es_path, status, diagnose, parameters))
2931

    
2932
  return result
2933

    
2934

    
2935
def BlockdevGrow(disk, amount, dryrun, backingstore, excl_stor):
2936
  """Grow a stack of block devices.
2937

2938
  This function is called recursively, with the childrens being the
2939
  first ones to resize.
2940

2941
  @type disk: L{objects.Disk}
2942
  @param disk: the disk to be grown
2943
  @type amount: integer
2944
  @param amount: the amount (in mebibytes) to grow with
2945
  @type dryrun: boolean
2946
  @param dryrun: whether to execute the operation in simulation mode
2947
      only, without actually increasing the size
2948
  @param backingstore: whether to execute the operation on backing storage
2949
      only, or on "logical" storage only; e.g. DRBD is logical storage,
2950
      whereas LVM, file, RBD are backing storage
2951
  @rtype: (status, result)
2952
  @type excl_stor: boolean
2953
  @param excl_stor: Whether exclusive_storage is active
2954
  @return: a tuple with the status of the operation (True/False), and
2955
      the errors message if status is False
2956

2957
  """
2958
  r_dev = _RecursiveFindBD(disk)
2959
  if r_dev is None:
2960
    _Fail("Cannot find block device %s", disk)
2961

    
2962
  try:
2963
    r_dev.Grow(amount, dryrun, backingstore, excl_stor)
2964
  except errors.BlockDeviceError, err:
2965
    _Fail("Failed to grow block device: %s", err, exc=True)
2966

    
2967

    
2968
def BlockdevSnapshot(disk):
2969
  """Create a snapshot copy of a block device.
2970

2971
  This function is called recursively, and the snapshot is actually created
2972
  just for the leaf lvm backend device.
2973

2974
  @type disk: L{objects.Disk}
2975
  @param disk: the disk to be snapshotted
2976
  @rtype: string
2977
  @return: snapshot disk ID as (vg, lv)
2978

2979
  """
2980
  if disk.dev_type == constants.DT_DRBD8:
2981
    if not disk.children:
2982
      _Fail("DRBD device '%s' without backing storage cannot be snapshotted",
2983
            disk.unique_id)
2984
    return BlockdevSnapshot(disk.children[0])
2985
  elif disk.dev_type == constants.DT_PLAIN:
2986
    r_dev = _RecursiveFindBD(disk)
2987
    if r_dev is not None:
2988
      # FIXME: choose a saner value for the snapshot size
2989
      # let's stay on the safe side and ask for the full size, for now
2990
      return r_dev.Snapshot(disk.size)
2991
    else:
2992
      _Fail("Cannot find block device %s", disk)
2993
  else:
2994
    _Fail("Cannot snapshot non-lvm block device '%s' of type '%s'",
2995
          disk.unique_id, disk.dev_type)
2996

    
2997

    
2998
def BlockdevSetInfo(disk, info):
2999
  """Sets 'metadata' information on block devices.
3000

3001
  This function sets 'info' metadata on block devices. Initial
3002
  information is set at device creation; this function should be used
3003
  for example after renames.
3004

3005
  @type disk: L{objects.Disk}
3006
  @param disk: the disk to be grown
3007
  @type info: string
3008
  @param info: new 'info' metadata
3009
  @rtype: (status, result)
3010
  @return: a tuple with the status of the operation (True/False), and
3011
      the errors message if status is False
3012

3013
  """
3014
  r_dev = _RecursiveFindBD(disk)
3015
  if r_dev is None:
3016
    _Fail("Cannot find block device %s", disk)
3017

    
3018
  try:
3019
    r_dev.SetInfo(info)
3020
  except errors.BlockDeviceError, err:
3021
    _Fail("Failed to set information on block device: %s", err, exc=True)
3022

    
3023

    
3024
def FinalizeExport(instance, snap_disks):
3025
  """Write out the export configuration information.
3026

3027
  @type instance: L{objects.Instance}
3028
  @param instance: the instance which we export, used for
3029
      saving configuration
3030
  @type snap_disks: list of L{objects.Disk}
3031
  @param snap_disks: list of snapshot block devices, which
3032
      will be used to get the actual name of the dump file
3033

3034
  @rtype: None
3035

3036
  """
3037
  destdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name + ".new")
3038
  finaldestdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name)
3039

    
3040
  config = objects.SerializableConfigParser()
3041

    
3042
  config.add_section(constants.INISECT_EXP)
3043
  config.set(constants.INISECT_EXP, "version", "0")
3044
  config.set(constants.INISECT_EXP, "timestamp", "%d" % int(time.time()))
3045
  config.set(constants.INISECT_EXP, "source", instance.primary_node)
3046
  config.set(constants.INISECT_EXP, "os", instance.os)
3047
  config.set(constants.INISECT_EXP, "compression", "none")
3048

    
3049
  config.add_section(constants.INISECT_INS)
3050
  config.set(constants.INISECT_INS, "name", instance.name)
3051
  config.set(constants.INISECT_INS, "maxmem", "%d" %
3052
             instance.beparams[constants.BE_MAXMEM])
3053
  config.set(constants.INISECT_INS, "minmem", "%d" %
3054
             instance.beparams[constants.BE_MINMEM])
3055
  # "memory" is deprecated, but useful for exporting to old ganeti versions
3056
  config.set(constants.INISECT_INS, "memory", "%d" %
3057
             instance.beparams[constants.BE_MAXMEM])
3058
  config.set(constants.INISECT_INS, "vcpus", "%d" %
3059
             instance.beparams[constants.BE_VCPUS])
3060
  config.set(constants.INISECT_INS, "disk_template", instance.disk_template)
3061
  config.set(constants.INISECT_INS, "hypervisor", instance.hypervisor)
3062
  config.set(constants.INISECT_INS, "tags", " ".join(instance.GetTags()))
3063

    
3064
  nic_total = 0
3065
  for nic_count, nic in enumerate(instance.nics):
3066
    nic_total += 1
3067
    config.set(constants.INISECT_INS, "nic%d_mac" %
3068
               nic_count, "%s" % nic.mac)
3069
    config.set(constants.INISECT_INS, "nic%d_ip" % nic_count, "%s" % nic.ip)
3070
    config.set(constants.INISECT_INS, "nic%d_network" % nic_count,
3071
               "%s" % nic.network)
3072
    for param in constants.NICS_PARAMETER_TYPES:
3073
      config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param),
3074
                 "%s" % nic.nicparams.get(param, None))
3075
  # TODO: redundant: on load can read nics until it doesn't exist
3076
  config.set(constants.INISECT_INS, "nic_count", "%d" % nic_total)
3077

    
3078
  disk_total = 0
3079
  for disk_count, disk in enumerate(snap_disks):
3080
    if disk:
3081
      disk_total += 1
3082
      config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count,
3083
                 ("%s" % disk.iv_name))
3084
      config.set(constants.INISECT_INS, "disk%d_dump" % disk_count,
3085
                 ("%s" % disk.logical_id[1]))
3086
      config.set(constants.INISECT_INS, "disk%d_size" % disk_count,
3087
                 ("%d" % disk.size))
3088

    
3089
  config.set(constants.INISECT_INS, "disk_count", "%d" % disk_total)
3090

    
3091
  # New-style hypervisor/backend parameters
3092

    
3093
  config.add_section(constants.INISECT_HYP)
3094
  for name, value in instance.hvparams.items():
3095
    if name not in constants.HVC_GLOBALS:
3096
      config.set(constants.INISECT_HYP, name, str(value))
3097

    
3098
  config.add_section(constants.INISECT_BEP)
3099
  for name, value in instance.beparams.items():
3100
    config.set(constants.INISECT_BEP, name, str(value))
3101

    
3102
  config.add_section(constants.INISECT_OSP)
3103
  for name, value in instance.osparams.items():
3104
    config.set(constants.INISECT_OSP, name, str(value))
3105

    
3106
  utils.WriteFile(utils.PathJoin(destdir, constants.EXPORT_CONF_FILE),
3107
                  data=config.Dumps())
3108
  shutil.rmtree(finaldestdir, ignore_errors=True)
3109
  shutil.move(destdir, finaldestdir)
3110

    
3111

    
3112
def ExportInfo(dest):
3113
  """Get export configuration information.
3114

3115
  @type dest: str
3116
  @param dest: directory containing the export
3117

3118
  @rtype: L{objects.SerializableConfigParser}
3119
  @return: a serializable config file containing the
3120
      export info
3121

3122
  """
3123
  cff = utils.PathJoin(dest, constants.EXPORT_CONF_FILE)
3124

    
3125
  config = objects.SerializableConfigParser()
3126
  config.read(cff)
3127

    
3128
  if (not config.has_section(constants.INISECT_EXP) or
3129
      not config.has_section(constants.INISECT_INS)):
3130
    _Fail("Export info file doesn't have the required fields")
3131

    
3132
  return config.Dumps()
3133

    
3134

    
3135
def ListExports():
3136
  """Return a list of exports currently available on this machine.
3137

3138
  @rtype: list
3139
  @return: list of the exports
3140

3141
  """
3142
  if os.path.isdir(pathutils.EXPORT_DIR):
3143
    return sorted(utils.ListVisibleFiles(pathutils.EXPORT_DIR))
3144
  else:
3145
    _Fail("No exports directory")
3146

    
3147

    
3148
def RemoveExport(export):
3149
  """Remove an existing export from the node.
3150

3151
  @type export: str
3152
  @param export: the name of the export to remove
3153
  @rtype: None
3154

3155
  """
3156
  target = utils.PathJoin(pathutils.EXPORT_DIR, export)
3157

    
3158
  try:
3159
    shutil.rmtree(target)
3160
  except EnvironmentError, err:
3161
    _Fail("Error while removing the export: %s", err, exc=True)
3162

    
3163

    
3164
def BlockdevRename(devlist):
3165
  """Rename a list of block devices.
3166

3167
  @type devlist: list of tuples
3168
  @param devlist: list of tuples of the form  (disk, new_unique_id); disk is
3169
      an L{objects.Disk} object describing the current disk, and new
3170
      unique_id is the name we rename it to
3171
  @rtype: boolean
3172
  @return: True if all renames succeeded, False otherwise
3173

3174
  """
3175
  msgs = []
3176
  result = True
3177
  for disk, unique_id in devlist:
3178
    dev = _RecursiveFindBD(disk)
3179
    if dev is None:
3180
      msgs.append("Can't find device %s in rename" % str(disk))
3181
      result = False
3182
      continue
3183
    try:
3184
      old_rpath = dev.dev_path
3185
      dev.Rename(unique_id)
3186
      new_rpath = dev.dev_path
3187
      if old_rpath != new_rpath:
3188
        DevCacheManager.RemoveCache(old_rpath)
3189
        # FIXME: we should add the new cache information here, like:
3190
        # DevCacheManager.UpdateCache(new_rpath, owner, ...)
3191
        # but we don't have the owner here - maybe parse from existing
3192
        # cache? for now, we only lose lvm data when we rename, which
3193
        # is less critical than DRBD or MD
3194
    except errors.BlockDeviceError, err:
3195
      msgs.append("Can't rename device '%s' to '%s': %s" %
3196
                  (dev, unique_id, err))
3197
      logging.exception("Can't rename device '%s' to '%s'", dev, unique_id)
3198
      result = False
3199
  if not result:
3200
    _Fail("; ".join(msgs))
3201

    
3202

    
3203
def _TransformFileStorageDir(fs_dir):
3204
  """Checks whether given file_storage_dir is valid.
3205

3206
  Checks wheter the given fs_dir is within the cluster-wide default
3207
  file_storage_dir or the shared_file_storage_dir, which are stored in
3208
  SimpleStore. Only paths under those directories are allowed.
3209

3210
  @type fs_dir: str
3211
  @param fs_dir: the path to check
3212

3213
  @return: the normalized path if valid, None otherwise
3214

3215
  """
3216
  filestorage.CheckFileStoragePath(fs_dir)
3217

    
3218
  return os.path.normpath(fs_dir)
3219

    
3220

    
3221
def CreateFileStorageDir(file_storage_dir):
3222
  """Create file storage directory.
3223

3224
  @type file_storage_dir: str
3225
  @param file_storage_dir: directory to create
3226

3227
  @rtype: tuple
3228
  @return: tuple with first element a boolean indicating wheter dir
3229
      creation was successful or not
3230

3231
  """
3232
  file_storage_dir = _TransformFileStorageDir(file_storage_dir)
3233
  if os.path.exists(file_storage_dir):
3234
    if not os.path.isdir(file_storage_dir):
3235
      _Fail("Specified storage dir '%s' is not a directory",
3236
            file_storage_dir)
3237
  else:
3238
    try:
3239
      os.makedirs(file_storage_dir, 0750)
3240
    except OSError, err:
3241
      _Fail("Cannot create file storage directory '%s': %s",
3242
            file_storage_dir, err, exc=True)
3243

    
3244

    
3245
def RemoveFileStorageDir(file_storage_dir):
3246
  """Remove file storage directory.
3247

3248
  Remove it only if it's empty. If not log an error and return.
3249

3250
  @type file_storage_dir: str
3251
  @param file_storage_dir: the directory we should cleanup
3252
  @rtype: tuple (success,)
3253
  @return: tuple of one element, C{success}, denoting
3254
      whether the operation was successful
3255

3256
  """
3257
  file_storage_dir = _TransformFileStorageDir(file_storage_dir)
3258
  if os.path.exists(file_storage_dir):
3259
    if not os.path.isdir(file_storage_dir):
3260
      _Fail("Specified Storage directory '%s' is not a directory",
3261
            file_storage_dir)
3262
    # deletes dir only if empty, otherwise we want to fail the rpc call
3263
    try:
3264
      os.rmdir(file_storage_dir)
3265
    except OSError, err:
3266
      _Fail("Cannot remove file storage directory '%s': %s",
3267
            file_storage_dir, err)
3268

    
3269

    
3270
def RenameFileStorageDir(old_file_storage_dir, new_file_storage_dir):
3271
  """Rename the file storage directory.
3272

3273
  @type old_file_storage_dir: str
3274
  @param old_file_storage_dir: the current path
3275
  @type new_file_storage_dir: str
3276
  @param new_file_storage_dir: the name we should rename to
3277
  @rtype: tuple (success,)
3278
  @return: tuple of one element, C{success}, denoting
3279
      whether the operation was successful
3280

3281
  """
3282
  old_file_storage_dir = _TransformFileStorageDir(old_file_storage_dir)
3283
  new_file_storage_dir = _TransformFileStorageDir(new_file_storage_dir)
3284
  if not os.path.exists(new_file_storage_dir):
3285
    if os.path.isdir(old_file_storage_dir):
3286
      try:
3287
        os.rename(old_file_storage_dir, new_file_storage_dir)
3288
      except OSError, err:
3289
        _Fail("Cannot rename '%s' to '%s': %s",
3290
              old_file_storage_dir, new_file_storage_dir, err)
3291
    else:
3292
      _Fail("Specified storage dir '%s' is not a directory",
3293
            old_file_storage_dir)
3294
  else:
3295
    if os.path.exists(old_file_storage_dir):
3296
      _Fail("Cannot rename '%s' to '%s': both locations exist",
3297
            old_file_storage_dir, new_file_storage_dir)
3298

    
3299

    
3300
def _EnsureJobQueueFile(file_name):
3301
  """Checks whether the given filename is in the queue directory.
3302

3303
  @type file_name: str
3304
  @param file_name: the file name we should check
3305
  @rtype: None
3306
  @raises RPCFail: if the file is not valid
3307

3308
  """
3309
  if not utils.IsBelowDir(pathutils.QUEUE_DIR, file_name):
3310
    _Fail("Passed job queue file '%s' does not belong to"
3311
          " the queue directory '%s'", file_name, pathutils.QUEUE_DIR)
3312

    
3313

    
3314
def JobQueueUpdate(file_name, content):
3315
  """Updates a file in the queue directory.
3316

3317
  This is just a wrapper over L{utils.io.WriteFile}, with proper
3318
  checking.
3319

3320
  @type file_name: str
3321
  @param file_name: the job file name
3322
  @type content: str
3323
  @param content: the new job contents
3324
  @rtype: boolean
3325
  @return: the success of the operation
3326

3327
  """
3328
  file_name = vcluster.LocalizeVirtualPath(file_name)
3329

    
3330
  _EnsureJobQueueFile(file_name)
3331
  getents = runtime.GetEnts()
3332

    
3333
  # Write and replace the file atomically
3334
  utils.WriteFile(file_name, data=_Decompress(content), uid=getents.masterd_uid,
3335
                  gid=getents.daemons_gid, mode=constants.JOB_QUEUE_FILES_PERMS)
3336

    
3337

    
3338
def JobQueueRename(old, new):
3339
  """Renames a job queue file.
3340

3341
  This is just a wrapper over os.rename with proper checking.
3342

3343
  @type old: str
3344
  @param old: the old (actual) file name
3345
  @type new: str
3346
  @param new: the desired file name
3347
  @rtype: tuple
3348
  @return: the success of the operation and payload
3349

3350
  """
3351
  old = vcluster.LocalizeVirtualPath(old)
3352
  new = vcluster.LocalizeVirtualPath(new)
3353

    
3354
  _EnsureJobQueueFile(old)
3355
  _EnsureJobQueueFile(new)
3356

    
3357
  getents = runtime.GetEnts()
3358

    
3359
  utils.RenameFile(old, new, mkdir=True, mkdir_mode=0750,
3360
                   dir_uid=getents.masterd_uid, dir_gid=getents.daemons_gid)
3361

    
3362

    
3363
def BlockdevClose(instance_name, disks):
3364
  """Closes the given block devices.
3365

3366
  This means they will be switched to secondary mode (in case of
3367
  DRBD).
3368

3369
  @param instance_name: if the argument is not empty, the symlinks
3370
      of this instance will be removed
3371
  @type disks: list of L{objects.Disk}
3372
  @param disks: the list of disks to be closed
3373
  @rtype: tuple (success, message)
3374
  @return: a tuple of success and message, where success
3375
      indicates the succes of the operation, and message
3376
      which will contain the error details in case we
3377
      failed
3378

3379
  """
3380
  bdevs = []
3381
  for cf in disks:
3382
    rd = _RecursiveFindBD(cf)
3383
    if rd is None:
3384
      _Fail("Can't find device %s", cf)
3385
    bdevs.append(rd)
3386

    
3387
  msg = []
3388
  for rd in bdevs:
3389
    try:
3390
      rd.Close()
3391
    except errors.BlockDeviceError, err:
3392
      msg.append(str(err))
3393
  if msg:
3394
    _Fail("Can't make devices secondary: %s", ",".join(msg))
3395
  else:
3396
    if instance_name:
3397
      _RemoveBlockDevLinks(instance_name, disks)
3398

    
3399

    
3400
def ValidateHVParams(hvname, hvparams):
3401
  """Validates the given hypervisor parameters.
3402

3403
  @type hvname: string
3404
  @param hvname: the hypervisor name
3405
  @type hvparams: dict
3406
  @param hvparams: the hypervisor parameters to be validated
3407
  @rtype: None
3408

3409
  """
3410
  try:
3411
    hv_type = hypervisor.GetHypervisor(hvname)
3412
    hv_type.ValidateParameters(hvparams)
3413
  except errors.HypervisorError, err:
3414
    _Fail(str(err), log=False)
3415

    
3416

    
3417
def _CheckOSPList(os_obj, parameters):
3418
  """Check whether a list of parameters is supported by the OS.
3419

3420
  @type os_obj: L{objects.OS}
3421
  @param os_obj: OS object to check
3422
  @type parameters: list
3423
  @param parameters: the list of parameters to check
3424

3425
  """
3426
  supported = [v[0] for v in os_obj.supported_parameters]
3427
  delta = frozenset(parameters).difference(supported)
3428
  if delta:
3429
    _Fail("The following parameters are not supported"
3430
          " by the OS %s: %s" % (os_obj.name, utils.CommaJoin(delta)))
3431

    
3432

    
3433
def ValidateOS(required, osname, checks, osparams):
3434
  """Validate the given OS' parameters.
3435

3436
  @type required: boolean
3437
  @param required: whether absence of the OS should translate into
3438
      failure or not
3439
  @type osname: string
3440
  @param osname: the OS to be validated
3441
  @type checks: list
3442
  @param checks: list of the checks to run (currently only 'parameters')
3443
  @type osparams: dict
3444
  @param osparams: dictionary with OS parameters
3445
  @rtype: boolean
3446
  @return: True if the validation passed, or False if the OS was not
3447
      found and L{required} was false
3448

3449
  """
3450
  if not constants.OS_VALIDATE_CALLS.issuperset(checks):
3451
    _Fail("Unknown checks required for OS %s: %s", osname,
3452
          set(checks).difference(constants.OS_VALIDATE_CALLS))
3453

    
3454
  name_only = objects.OS.GetName(osname)
3455
  status, tbv = _TryOSFromDisk(name_only, None)
3456

    
3457
  if not status:
3458
    if required:
3459
      _Fail(tbv)
3460
    else:
3461
      return False
3462

    
3463
  if max(tbv.api_versions) < constants.OS_API_V20:
3464
    return True
3465

    
3466
  if constants.OS_VALIDATE_PARAMETERS in checks:
3467
    _CheckOSPList(tbv, osparams.keys())
3468

    
3469
  validate_env = OSCoreEnv(osname, tbv, osparams)
3470
  result = utils.RunCmd([tbv.verify_script] + checks, env=validate_env,
3471
                        cwd=tbv.path, reset_env=True)
3472
  if result.failed:
3473
    logging.error("os validate command '%s' returned error: %s output: %s",
3474
                  result.cmd, result.fail_reason, result.output)
3475
    _Fail("OS validation script failed (%s), output: %s",
3476
          result.fail_reason, result.output, log=False)
3477

    
3478
  return True
3479

    
3480

    
3481
def DemoteFromMC():
3482
  """Demotes the current node from master candidate role.
3483

3484
  """
3485
  # try to ensure we're not the master by mistake
3486
  master, myself = ssconf.GetMasterAndMyself()
3487
  if master == myself:
3488
    _Fail("ssconf status shows I'm the master node, will not demote")
3489

    
3490
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "check", constants.MASTERD])
3491
  if not result.failed:
3492
    _Fail("The master daemon is running, will not demote")
3493

    
3494
  try:
3495
    if os.path.isfile(pathutils.CLUSTER_CONF_FILE):
3496
      utils.CreateBackup(pathutils.CLUSTER_CONF_FILE)
3497
  except EnvironmentError, err:
3498
    if err.errno != errno.ENOENT:
3499
      _Fail("Error while backing up cluster file: %s", err, exc=True)
3500

    
3501
  utils.RemoveFile(pathutils.CLUSTER_CONF_FILE)
3502

    
3503

    
3504
def _GetX509Filenames(cryptodir, name):
3505
  """Returns the full paths for the private key and certificate.
3506

3507
  """
3508
  return (utils.PathJoin(cryptodir, name),
3509
          utils.PathJoin(cryptodir, name, _X509_KEY_FILE),
3510
          utils.PathJoin(cryptodir, name, _X509_CERT_FILE))
3511

    
3512

    
3513
def CreateX509Certificate(validity, cryptodir=pathutils.CRYPTO_KEYS_DIR):
3514
  """Creates a new X509 certificate for SSL/TLS.
3515

3516
  @type validity: int
3517
  @param validity: Validity in seconds
3518
  @rtype: tuple; (string, string)
3519
  @return: Certificate name and public part
3520

3521
  """
3522
  (key_pem, cert_pem) = \
3523
    utils.GenerateSelfSignedX509Cert(netutils.Hostname.GetSysName(),
3524
                                     min(validity, _MAX_SSL_CERT_VALIDITY))
3525

    
3526
  cert_dir = tempfile.mkdtemp(dir=cryptodir,
3527
                              prefix="x509-%s-" % utils.TimestampForFilename())
3528
  try:
3529
    name = os.path.basename(cert_dir)
3530
    assert len(name) > 5
3531

    
3532
    (_, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
3533

    
3534
    utils.WriteFile(key_file, mode=0400, data=key_pem)
3535
    utils.WriteFile(cert_file, mode=0400, data=cert_pem)
3536

    
3537
    # Never return private key as it shouldn't leave the node
3538
    return (name, cert_pem)
3539
  except Exception:
3540
    shutil.rmtree(cert_dir, ignore_errors=True)
3541
    raise
3542

    
3543

    
3544
def RemoveX509Certificate(name, cryptodir=pathutils.CRYPTO_KEYS_DIR):
3545
  """Removes a X509 certificate.
3546

3547
  @type name: string
3548
  @param name: Certificate name
3549

3550
  """
3551
  (cert_dir, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
3552

    
3553
  utils.RemoveFile(key_file)
3554
  utils.RemoveFile(cert_file)
3555

    
3556
  try:
3557
    os.rmdir(cert_dir)
3558
  except EnvironmentError, err:
3559
    _Fail("Cannot remove certificate directory '%s': %s",
3560
          cert_dir, err)
3561

    
3562

    
3563
def _GetImportExportIoCommand(instance, mode, ieio, ieargs):
3564
  """Returns the command for the requested input/output.
3565

3566
  @type instance: L{objects.Instance}
3567
  @param instance: The instance object
3568
  @param mode: Import/export mode
3569
  @param ieio: Input/output type
3570
  @param ieargs: Input/output arguments
3571

3572
  """
3573
  assert mode in (constants.IEM_IMPORT, constants.IEM_EXPORT)
3574

    
3575
  env = None
3576
  prefix = None
3577
  suffix = None
3578
  exp_size = None
3579

    
3580
  if ieio == constants.IEIO_FILE:
3581
    (filename, ) = ieargs
3582

    
3583
    if not utils.IsNormAbsPath(filename):
3584
      _Fail("Path '%s' is not normalized or absolute", filename)
3585

    
3586
    real_filename = os.path.realpath(filename)
3587
    directory = os.path.dirname(real_filename)
3588

    
3589
    if not utils.IsBelowDir(pathutils.EXPORT_DIR, real_filename):
3590
      _Fail("File '%s' is not under exports directory '%s': %s",
3591
            filename, pathutils.EXPORT_DIR, real_filename)
3592

    
3593
    # Create directory
3594
    utils.Makedirs(directory, mode=0750)
3595

    
3596
    quoted_filename = utils.ShellQuote(filename)
3597

    
3598
    if mode == constants.IEM_IMPORT:
3599
      suffix = "> %s" % quoted_filename
3600
    elif mode == constants.IEM_EXPORT:
3601
      suffix = "< %s" % quoted_filename
3602

    
3603
      # Retrieve file size
3604
      try:
3605
        st = os.stat(filename)
3606
      except EnvironmentError, err:
3607
        logging.error("Can't stat(2) %s: %s", filename, err)
3608
      else:
3609
        exp_size = utils.BytesToMebibyte(st.st_size)
3610

    
3611
  elif ieio == constants.IEIO_RAW_DISK:
3612
    (disk, ) = ieargs
3613

    
3614
    real_disk = _OpenRealBD(disk)
3615

    
3616
    if mode == constants.IEM_IMPORT:
3617
      # we use nocreat to fail if the device is not already there or we pass a
3618
      # wrong path; we use notrunc to no attempt truncate on an LV device
3619
      suffix = utils.BuildShellCmd("| dd of=%s conv=nocreat,notrunc bs=%s",
3620
                                   real_disk.dev_path,
3621
                                   str(1024 * 1024)) # 1 MB
3622

    
3623
    elif mode == constants.IEM_EXPORT:
3624
      # the block size on the read dd is 1MiB to match our units
3625
      prefix = utils.BuildShellCmd("dd if=%s bs=%s count=%s |",
3626
                                   real_disk.dev_path,
3627
                                   str(1024 * 1024), # 1 MB
3628
                                   str(disk.size))
3629
      exp_size = disk.size
3630

    
3631
  elif ieio == constants.IEIO_SCRIPT:
3632
    (disk, disk_index, ) = ieargs
3633

    
3634
    assert isinstance(disk_index, (int, long))
3635

    
3636
    inst_os = OSFromDisk(instance.os)
3637
    env = OSEnvironment(instance, inst_os)
3638

    
3639
    if mode == constants.IEM_IMPORT:
3640
      env["IMPORT_DEVICE"] = env["DISK_%d_PATH" % disk_index]
3641
      env["IMPORT_INDEX"] = str(disk_index)
3642
      script = inst_os.import_script
3643

    
3644
    elif mode == constants.IEM_EXPORT:
3645
      real_disk = _OpenRealBD(disk)
3646
      env["EXPORT_DEVICE"] = real_disk.dev_path
3647
      env["EXPORT_INDEX"] = str(disk_index)
3648
      script = inst_os.export_script
3649

    
3650
    # TODO: Pass special environment only to script
3651
    script_cmd = utils.BuildShellCmd("( cd %s && %s; )", inst_os.path, script)
3652

    
3653
    if mode == constants.IEM_IMPORT:
3654
      suffix = "| %s" % script_cmd
3655

    
3656
    elif mode == constants.IEM_EXPORT:
3657
      prefix = "%s |" % script_cmd
3658

    
3659
    # Let script predict size
3660
    exp_size = constants.IE_CUSTOM_SIZE
3661

    
3662
  else:
3663
    _Fail("Invalid %s I/O mode %r", mode, ieio)
3664

    
3665
  return (env, prefix, suffix, exp_size)
3666

    
3667

    
3668
def _CreateImportExportStatusDir(prefix):
3669
  """Creates status directory for import/export.
3670

3671
  """
3672
  return tempfile.mkdtemp(dir=pathutils.IMPORT_EXPORT_DIR,
3673
                          prefix=("%s-%s-" %
3674
                                  (prefix, utils.TimestampForFilename())))
3675

    
3676

    
3677
def StartImportExportDaemon(mode, opts, host, port, instance, component,
3678
                            ieio, ieioargs):
3679
  """Starts an import or export daemon.
3680

3681
  @param mode: Import/output mode
3682
  @type opts: L{objects.ImportExportOptions}
3683
  @param opts: Daemon options
3684
  @type host: string
3685
  @param host: Remote host for export (None for import)
3686
  @type port: int
3687
  @param port: Remote port for export (None for import)
3688
  @type instance: L{objects.Instance}
3689
  @param instance: Instance object
3690
  @type component: string
3691
  @param component: which part of the instance is transferred now,
3692
      e.g. 'disk/0'
3693
  @param ieio: Input/output type
3694
  @param ieioargs: Input/output arguments
3695

3696
  """
3697
  if mode == constants.IEM_IMPORT:
3698
    prefix = "import"
3699

    
3700
    if not (host is None and port is None):
3701
      _Fail("Can not specify host or port on import")
3702

    
3703
  elif mode == constants.IEM_EXPORT:
3704
    prefix = "export"
3705

    
3706
    if host is None or port is None:
3707
      _Fail("Host and port must be specified for an export")
3708

    
3709
  else:
3710
    _Fail("Invalid mode %r", mode)
3711

    
3712
  if (opts.key_name is None) ^ (opts.ca_pem is None):
3713
    _Fail("Cluster certificate can only be used for both key and CA")
3714

    
3715
  (cmd_env, cmd_prefix, cmd_suffix, exp_size) = \
3716
    _GetImportExportIoCommand(instance, mode, ieio, ieioargs)
3717

    
3718
  if opts.key_name is None:
3719
    # Use server.pem
3720
    key_path = pathutils.NODED_CERT_FILE
3721
    cert_path = pathutils.NODED_CERT_FILE
3722
    assert opts.ca_pem is None
3723
  else:
3724
    (_, key_path, cert_path) = _GetX509Filenames(pathutils.CRYPTO_KEYS_DIR,
3725
                                                 opts.key_name)
3726
    assert opts.ca_pem is not None
3727

    
3728
  for i in [key_path, cert_path]:
3729
    if not os.path.exists(i):
3730
      _Fail("File '%s' does not exist" % i)
3731

    
3732
  status_dir = _CreateImportExportStatusDir("%s-%s" % (prefix, component))
3733
  try:
3734
    status_file = utils.PathJoin(status_dir, _IES_STATUS_FILE)
3735
    pid_file = utils.PathJoin(status_dir, _IES_PID_FILE)
3736
    ca_file = utils.PathJoin(status_dir, _IES_CA_FILE)
3737

    
3738
    if opts.ca_pem is None:
3739
      # Use server.pem
3740
      ca = utils.ReadFile(pathutils.NODED_CERT_FILE)
3741
    else:
3742
      ca = opts.ca_pem
3743

    
3744
    # Write CA file
3745
    utils.WriteFile(ca_file, data=ca, mode=0400)
3746

    
3747
    cmd = [
3748
      pathutils.IMPORT_EXPORT_DAEMON,
3749
      status_file, mode,
3750
      "--key=%s" % key_path,
3751
      "--cert=%s" % cert_path,
3752
      "--ca=%s" % ca_file,
3753
      ]
3754

    
3755
    if host:
3756
      cmd.append("--host=%s" % host)
3757

    
3758
    if port:
3759
      cmd.append("--port=%s" % port)
3760

    
3761
    if opts.ipv6:
3762
      cmd.append("--ipv6")
3763
    else:
3764
      cmd.append("--ipv4")
3765

    
3766
    if opts.compress:
3767
      cmd.append("--compress=%s" % opts.compress)
3768

    
3769
    if opts.magic:
3770
      cmd.append("--magic=%s" % opts.magic)
3771

    
3772
    if exp_size is not None:
3773
      cmd.append("--expected-size=%s" % exp_size)
3774

    
3775
    if cmd_prefix:
3776
      cmd.append("--cmd-prefix=%s" % cmd_prefix)
3777

    
3778
    if cmd_suffix:
3779
      cmd.append("--cmd-suffix=%s" % cmd_suffix)
3780

    
3781
    if mode == constants.IEM_EXPORT:
3782
      # Retry connection a few times when connecting to remote peer
3783
      cmd.append("--connect-retries=%s" % constants.RIE_CONNECT_RETRIES)
3784
      cmd.append("--connect-timeout=%s" % constants.RIE_CONNECT_ATTEMPT_TIMEOUT)
3785
    elif opts.connect_timeout is not None:
3786
      assert mode == constants.IEM_IMPORT
3787
      # Overall timeout for establishing connection while listening
3788
      cmd.append("--connect-timeout=%s" % opts.connect_timeout)
3789

    
3790
    logfile = _InstanceLogName(prefix, instance.os, instance.name, component)
3791

    
3792
    # TODO: Once _InstanceLogName uses tempfile.mkstemp, StartDaemon has
3793
    # support for receiving a file descriptor for output
3794
    utils.StartDaemon(cmd, env=cmd_env, pidfile=pid_file,
3795
                      output=logfile)
3796

    
3797
    # The import/export name is simply the status directory name
3798
    return os.path.basename(status_dir)
3799

    
3800
  except Exception:
3801
    shutil.rmtree(status_dir, ignore_errors=True)
3802
    raise
3803

    
3804

    
3805
def GetImportExportStatus(names):
3806
  """Returns import/export daemon status.
3807

3808
  @type names: sequence
3809
  @param names: List of names
3810
  @rtype: List of dicts
3811
  @return: Returns a list of the state of each named import/export or None if a
3812
           status couldn't be read
3813

3814
  """
3815
  result = []
3816

    
3817
  for name in names:
3818
    status_file = utils.PathJoin(pathutils.IMPORT_EXPORT_DIR, name,
3819
                                 _IES_STATUS_FILE)
3820

    
3821
    try:
3822
      data = utils.ReadFile(status_file)
3823
    except EnvironmentError, err:
3824
      if err.errno != errno.ENOENT:
3825
        raise
3826
      data = None
3827

    
3828
    if not data:
3829
      result.append(None)
3830
      continue
3831

    
3832
    result.append(serializer.LoadJson(data))
3833

    
3834
  return result
3835

    
3836

    
3837
def AbortImportExport(name):
3838
  """Sends SIGTERM to a running import/export daemon.
3839

3840
  """
3841
  logging.info("Abort import/export %s", name)
3842

    
3843
  status_dir = utils.PathJoin(pathutils.IMPORT_EXPORT_DIR, name)
3844
  pid = utils.ReadLockedPidFile(utils.PathJoin(status_dir, _IES_PID_FILE))
3845

    
3846
  if pid:
3847
    logging.info("Import/export %s is running with PID %s, sending SIGTERM",
3848
                 name, pid)
3849
    utils.IgnoreProcessNotFound(os.kill, pid, signal.SIGTERM)
3850

    
3851

    
3852
def CleanupImportExport(name):
3853
  """Cleanup after an import or export.
3854

3855
  If the import/export daemon is still running it's killed. Afterwards the
3856
  whole status directory is removed.
3857

3858
  """
3859
  logging.info("Finalizing import/export %s", name)
3860

    
3861
  status_dir = utils.PathJoin(pathutils.IMPORT_EXPORT_DIR, name)
3862

    
3863
  pid = utils.ReadLockedPidFile(utils.PathJoin(status_dir, _IES_PID_FILE))
3864

    
3865
  if pid:
3866
    logging.info("Import/export %s is still running with PID %s",
3867
                 name, pid)
3868
    utils.KillProcess(pid, waitpid=False)
3869

    
3870
  shutil.rmtree(status_dir, ignore_errors=True)
3871

    
3872

    
3873
def _FindDisks(disks):
3874
  """Finds attached L{BlockDev}s for the given disks.
3875

3876
  @type disks: list of L{objects.Disk}
3877
  @param disks: the disk objects we need to find
3878

3879
  @return: list of L{BlockDev} objects or C{None} if a given disk
3880
           was not found or was no attached.
3881

3882
  """
3883
  bdevs = []
3884

    
3885
  for disk in disks:
3886
    rd = _RecursiveFindBD(disk)
3887
    if rd is None:
3888
      _Fail("Can't find device %s", disk)
3889
    bdevs.append(rd)
3890
  return bdevs
3891

    
3892

    
3893
def DrbdDisconnectNet(disks):
3894
  """Disconnects the network on a list of drbd devices.
3895

3896
  """
3897
  bdevs = _FindDisks(disks)
3898

    
3899
  # disconnect disks
3900
  for rd in bdevs:
3901
    try:
3902
      rd.DisconnectNet()
3903
    except errors.BlockDeviceError, err:
3904
      _Fail("Can't change network configuration to standalone mode: %s",
3905
            err, exc=True)
3906

    
3907

    
3908
def DrbdAttachNet(disks, instance_name, multimaster):
3909
  """Attaches the network on a list of drbd devices.
3910

3911
  """
3912
  bdevs = _FindDisks(disks)
3913

    
3914
  if multimaster:
3915
    for idx, rd in enumerate(bdevs):
3916
      try:
3917
        _SymlinkBlockDev(instance_name, rd.dev_path, idx)
3918
      except EnvironmentError, err:
3919
        _Fail("Can't create symlink: %s", err)
3920
  # reconnect disks, switch to new master configuration and if
3921
  # needed primary mode
3922
  for rd in bdevs:
3923
    try:
3924
      rd.AttachNet(multimaster)
3925
    except errors.BlockDeviceError, err:
3926
      _Fail("Can't change network configuration: %s", err)
3927

    
3928
  # wait until the disks are connected; we need to retry the re-attach
3929
  # if the device becomes standalone, as this might happen if the one
3930
  # node disconnects and reconnects in a different mode before the
3931
  # other node reconnects; in this case, one or both of the nodes will
3932
  # decide it has wrong configuration and switch to standalone
3933

    
3934
  def _Attach():
3935
    all_connected = True
3936

    
3937
    for rd in bdevs:
3938
      stats = rd.GetProcStatus()
3939

    
3940
      all_connected = (all_connected and
3941
                       (stats.is_connected or stats.is_in_resync))
3942

    
3943
      if stats.is_standalone:
3944
        # peer had different config info and this node became
3945
        # standalone, even though this should not happen with the
3946
        # new staged way of changing disk configs
3947
        try:
3948
          rd.AttachNet(multimaster)
3949
        except errors.BlockDeviceError, err:
3950
          _Fail("Can't change network configuration: %s", err)
3951

    
3952
    if not all_connected:
3953
      raise utils.RetryAgain()
3954

    
3955
  try:
3956
    # Start with a delay of 100 miliseconds and go up to 5 seconds
3957
    utils.Retry(_Attach, (0.1, 1.5, 5.0), 2 * 60)
3958
  except utils.RetryTimeout:
3959
    _Fail("Timeout in disk reconnecting")
3960

    
3961
  if multimaster:
3962
    # change to primary mode
3963
    for rd in bdevs:
3964
      try:
3965
        rd.Open()
3966
      except errors.BlockDeviceError, err:
3967
        _Fail("Can't change to primary mode: %s", err)
3968

    
3969

    
3970
def DrbdWaitSync(disks):
3971
  """Wait until DRBDs have synchronized.
3972

3973
  """
3974
  def _helper(rd):
3975
    stats = rd.GetProcStatus()
3976
    if not (stats.is_connected or stats.is_in_resync):
3977
      raise utils.RetryAgain()
3978
    return stats
3979

    
3980
  bdevs = _FindDisks(disks)
3981

    
3982
  min_resync = 100
3983
  alldone = True
3984
  for rd in bdevs:
3985
    try:
3986
      # poll each second for 15 seconds
3987
      stats = utils.Retry(_helper, 1, 15, args=[rd])
3988
    except utils.RetryTimeout:
3989
      stats = rd.GetProcStatus()
3990
      # last check
3991
      if not (stats.is_connected or stats.is_in_resync):
3992
        _Fail("DRBD device %s is not in sync: stats=%s", rd, stats)
3993
    alldone = alldone and (not stats.is_in_resync)
3994
    if stats.sync_percent is not None:
3995
      min_resync = min(min_resync, stats.sync_percent)
3996

    
3997
  return (alldone, min_resync)
3998

    
3999

    
4000
def DrbdNeedsActivation(disks):
4001
  """Checks which of the passed disks needs activation and returns their UUIDs.
4002

4003
  """
4004
  faulty_disks = []
4005

    
4006
  for disk in disks:
4007
    rd = _RecursiveFindBD(disk)
4008
    if rd is None:
4009
      faulty_disks.append(disk)
4010
      continue
4011

    
4012
    stats = rd.GetProcStatus()
4013
    if stats.is_standalone or stats.is_diskless:
4014
      faulty_disks.append(disk)
4015

    
4016
  return [disk.uuid for disk in faulty_disks]
4017

    
4018

    
4019
def GetDrbdUsermodeHelper():
4020
  """Returns DRBD usermode helper currently configured.
4021

4022
  """
4023
  try:
4024
    return drbd.DRBD8.GetUsermodeHelper()
4025
  except errors.BlockDeviceError, err:
4026
    _Fail(str(err))
4027

    
4028

    
4029
def PowercycleNode(hypervisor_type, hvparams=None):
4030
  """Hard-powercycle the node.
4031

4032
  Because we need to return first, and schedule the powercycle in the
4033
  background, we won't be able to report failures nicely.
4034

4035
  """
4036
  hyper = hypervisor.GetHypervisor(hypervisor_type)
4037
  try:
4038
    pid = os.fork()
4039
  except OSError:
4040
    # if we can't fork, we'll pretend that we're in the child process
4041
    pid = 0
4042
  if pid > 0:
4043
    return "Reboot scheduled in 5 seconds"
4044
  # ensure the child is running on ram
4045
  try:
4046
    utils.Mlockall()
4047
  except Exception: # pylint: disable=W0703
4048
    pass
4049
  time.sleep(5)
4050
  hyper.PowercycleNode(hvparams=hvparams)
4051

    
4052

    
4053
def _VerifyRestrictedCmdName(cmd):
4054
  """Verifies a restricted command name.
4055

4056
  @type cmd: string
4057
  @param cmd: Command name
4058
  @rtype: tuple; (boolean, string or None)
4059
  @return: The tuple's first element is the status; if C{False}, the second
4060
    element is an error message string, otherwise it's C{None}
4061

4062
  """
4063
  if not cmd.strip():
4064
    return (False, "Missing command name")
4065

    
4066
  if os.path.basename(cmd) != cmd:
4067
    return (False, "Invalid command name")
4068

    
4069
  if not constants.EXT_PLUGIN_MASK.match(cmd):
4070
    return (False, "Command name contains forbidden characters")
4071

    
4072
  return (True, None)
4073

    
4074

    
4075
def _CommonRestrictedCmdCheck(path, owner):
4076
  """Common checks for restricted command file system directories and files.
4077

4078
  @type path: string
4079
  @param path: Path to check
4080
  @param owner: C{None} or tuple containing UID and GID
4081
  @rtype: tuple; (boolean, string or C{os.stat} result)
4082
  @return: The tuple's first element is the status; if C{False}, the second
4083
    element is an error message string, otherwise it's the result of C{os.stat}
4084

4085
  """
4086
  if owner is None:
4087
    # Default to root as owner
4088
    owner = (0, 0)
4089

    
4090
  try:
4091
    st = os.stat(path)
4092
  except EnvironmentError, err:
4093
    return (False, "Can't stat(2) '%s': %s" % (path, err))
4094

    
4095
  if stat.S_IMODE(st.st_mode) & (~_RCMD_MAX_MODE):
4096
    return (False, "Permissions on '%s' are too permissive" % path)
4097

    
4098
  if (st.st_uid, st.st_gid) != owner:
4099
    (owner_uid, owner_gid) = owner
4100
    return (False, "'%s' is not owned by %s:%s" % (path, owner_uid, owner_gid))
4101

    
4102
  return (True, st)
4103

    
4104

    
4105
def _VerifyRestrictedCmdDirectory(path, _owner=None):
4106
  """Verifies restricted command directory.
4107

4108
  @type path: string
4109
  @param path: Path to check
4110
  @rtype: tuple; (boolean, string or None)
4111
  @return: The tuple's first element is the status; if C{False}, the second
4112
    element is an error message string, otherwise it's C{None}
4113

4114
  """
4115
  (status, value) = _CommonRestrictedCmdCheck(path, _owner)
4116

    
4117
  if not status:
4118
    return (False, value)
4119

    
4120
  if not stat.S_ISDIR(value.st_mode):
4121
    return (False, "Path '%s' is not a directory" % path)
4122

    
4123
  return (True, None)
4124

    
4125

    
4126
def _VerifyRestrictedCmd(path, cmd, _owner=None):
4127
  """Verifies a whole restricted command and returns its executable filename.
4128

4129
  @type path: string
4130
  @param path: Directory containing restricted commands
4131
  @type cmd: string
4132
  @param cmd: Command name
4133
  @rtype: tuple; (boolean, string)
4134
  @return: The tuple's first element is the status; if C{False}, the second
4135
    element is an error message string, otherwise the second element is the
4136
    absolute path to the executable
4137

4138
  """
4139
  executable = utils.PathJoin(path, cmd)
4140

    
4141
  (status, msg) = _CommonRestrictedCmdCheck(executable, _owner)
4142

    
4143
  if not status:
4144
    return (False, msg)
4145

    
4146
  if not utils.IsExecutable(executable):
4147
    return (False, "access(2) thinks '%s' can't be executed" % executable)
4148

    
4149
  return (True, executable)
4150

    
4151

    
4152
def _PrepareRestrictedCmd(path, cmd,
4153
                          _verify_dir=_VerifyRestrictedCmdDirectory,
4154
                          _verify_name=_VerifyRestrictedCmdName,
4155
                          _verify_cmd=_VerifyRestrictedCmd):
4156
  """Performs a number of tests on a restricted command.
4157

4158
  @type path: string
4159
  @param path: Directory containing restricted commands
4160
  @type cmd: string
4161
  @param cmd: Command name
4162
  @return: Same as L{_VerifyRestrictedCmd}
4163

4164
  """
4165
  # Verify the directory first
4166
  (status, msg) = _verify_dir(path)
4167
  if status:
4168
    # Check command if everything was alright
4169
    (status, msg) = _verify_name(cmd)
4170

    
4171
  if not status:
4172
    return (False, msg)
4173

    
4174
  # Check actual executable
4175
  return _verify_cmd(path, cmd)
4176

    
4177

    
4178
def RunRestrictedCmd(cmd,
4179
                     _lock_timeout=_RCMD_LOCK_TIMEOUT,
4180
                     _lock_file=pathutils.RESTRICTED_COMMANDS_LOCK_FILE,
4181
                     _path=pathutils.RESTRICTED_COMMANDS_DIR,
4182
                     _sleep_fn=time.sleep,
4183
                     _prepare_fn=_PrepareRestrictedCmd,
4184
                     _runcmd_fn=utils.RunCmd,
4185
                     _enabled=constants.ENABLE_RESTRICTED_COMMANDS):
4186
  """Executes a restricted command after performing strict tests.
4187

4188
  @type cmd: string
4189
  @param cmd: Command name
4190
  @rtype: string
4191
  @return: Command output
4192
  @raise RPCFail: In case of an error
4193

4194
  """
4195
  logging.info("Preparing to run restricted command '%s'", cmd)
4196

    
4197
  if not _enabled:
4198
    _Fail("Restricted commands disabled at configure time")
4199

    
4200
  lock = None
4201
  try:
4202
    cmdresult = None
4203
    try:
4204
      lock = utils.FileLock.Open(_lock_file)
4205
      lock.Exclusive(blocking=True, timeout=_lock_timeout)
4206

    
4207
      (status, value) = _prepare_fn(_path, cmd)
4208

    
4209
      if status:
4210
        cmdresult = _runcmd_fn([value], env={}, reset_env=True,
4211
                               postfork_fn=lambda _: lock.Unlock())
4212
      else:
4213
        logging.error(value)
4214
    except Exception: # pylint: disable=W0703
4215
      # Keep original error in log
4216
      logging.exception("Caught exception")
4217

    
4218
    if cmdresult is None:
4219
      logging.info("Sleeping for %0.1f seconds before returning",
4220
                   _RCMD_INVALID_DELAY)
4221
      _sleep_fn(_RCMD_INVALID_DELAY)
4222

    
4223
      # Do not include original error message in returned error
4224
      _Fail("Executing command '%s' failed" % cmd)
4225
    elif cmdresult.failed or cmdresult.fail_reason:
4226
      _Fail("Restricted command '%s' failed: %s; output: %s",
4227
            cmd, cmdresult.fail_reason, cmdresult.output)
4228
    else:
4229
      return cmdresult.output
4230
  finally:
4231
    if lock is not None:
4232
      # Release lock at last
4233
      lock.Close()
4234
      lock = None
4235

    
4236

    
4237
def SetWatcherPause(until, _filename=pathutils.WATCHER_PAUSEFILE):
4238
  """Creates or removes the watcher pause file.
4239

4240
  @type until: None or number
4241
  @param until: Unix timestamp saying until when the watcher shouldn't run
4242

4243
  """
4244
  if until is None:
4245
    logging.info("Received request to no longer pause watcher")
4246
    utils.RemoveFile(_filename)
4247
  else:
4248
    logging.info("Received request to pause watcher until %s", until)
4249

    
4250
    if not ht.TNumber(until):
4251
      _Fail("Duration must be numeric")
4252

    
4253
    utils.WriteFile(_filename, data="%d\n" % (until, ), mode=0644)
4254

    
4255

    
4256
def ConfigureOVS(ovs_name, ovs_link):
4257
  """Creates a OpenvSwitch on the node.
4258

4259
  This function sets up a OpenvSwitch on the node with given name nad
4260
  connects it via a given eth device.
4261

4262
  @type ovs_name: string
4263
  @param ovs_name: Name of the OpenvSwitch to create.
4264
  @type ovs_link: None or string
4265
  @param ovs_link: Ethernet device for outside connection (can be missing)
4266

4267
  """
4268
  # Initialize the OpenvSwitch
4269
  result = utils.RunCmd(["ovs-vsctl", "add-br", ovs_name])
4270
  if result.failed:
4271
    _Fail("Failed to create openvswitch %s. Script return value: %s, output:"
4272
          " '%s'" % result.exit_code, result.output, log=True)
4273

    
4274
  # And connect it to a physical interface, if given
4275
  if ovs_link:
4276
    result = utils.RunCmd(["ovs-vsctl", "add-port", ovs_name, ovs_link])
4277
    if result.failed:
4278
      _Fail("Failed to connect openvswitch to  interface %s. Script return"
4279
            " value: %s, output: '%s'" % ovs_link, result.exit_code,
4280
            result.output, log=True)
4281

    
4282

    
4283
class HooksRunner(object):
4284
  """Hook runner.
4285

4286
  This class is instantiated on the node side (ganeti-noded) and not
4287
  on the master side.
4288

4289
  """
4290
  def __init__(self, hooks_base_dir=None):
4291
    """Constructor for hooks runner.
4292

4293
    @type hooks_base_dir: str or None
4294
    @param hooks_base_dir: if not None, this overrides the
4295
        L{pathutils.HOOKS_BASE_DIR} (useful for unittests)
4296

4297
    """
4298
    if hooks_base_dir is None:
4299
      hooks_base_dir = pathutils.HOOKS_BASE_DIR
4300
    # yeah, _BASE_DIR is not valid for attributes, we use it like a
4301
    # constant
4302
    self._BASE_DIR = hooks_base_dir # pylint: disable=C0103
4303

    
4304
  def RunLocalHooks(self, node_list, hpath, phase, env):
4305
    """Check that the hooks will be run only locally and then run them.
4306

4307
    """
4308
    assert len(node_list) == 1
4309
    node = node_list[0]
4310
    _, myself = ssconf.GetMasterAndMyself()
4311
    assert node == myself
4312

    
4313
    results = self.RunHooks(hpath, phase, env)
4314

    
4315
    # Return values in the form expected by HooksMaster
4316
    return {node: (None, False, results)}
4317

    
4318
  def RunHooks(self, hpath, phase, env):
4319
    """Run the scripts in the hooks directory.
4320

4321
    @type hpath: str
4322
    @param hpath: the path to the hooks directory which
4323
        holds the scripts
4324
    @type phase: str
4325
    @param phase: either L{constants.HOOKS_PHASE_PRE} or
4326
        L{constants.HOOKS_PHASE_POST}
4327
    @type env: dict
4328
    @param env: dictionary with the environment for the hook
4329
    @rtype: list
4330
    @return: list of 3-element tuples:
4331
      - script path
4332
      - script result, either L{constants.HKR_SUCCESS} or
4333
        L{constants.HKR_FAIL}
4334
      - output of the script
4335

4336
    @raise errors.ProgrammerError: for invalid input
4337
        parameters
4338

4339
    """
4340
    if phase == constants.HOOKS_PHASE_PRE:
4341
      suffix = "pre"
4342
    elif phase == constants.HOOKS_PHASE_POST:
4343
      suffix = "post"
4344
    else:
4345
      _Fail("Unknown hooks phase '%s'", phase)
4346

    
4347
    subdir = "%s-%s.d" % (hpath, suffix)
4348
    dir_name = utils.PathJoin(self._BASE_DIR, subdir)
4349

    
4350
    results = []
4351

    
4352
    if not os.path.isdir(dir_name):
4353
      # for non-existing/non-dirs, we simply exit instead of logging a
4354
      # warning at every operation
4355
      return results
4356

    
4357
    runparts_results = utils.RunParts(dir_name, env=env, reset_env=True)
4358

    
4359
    for (relname, relstatus, runresult) in runparts_results:
4360
      if relstatus == constants.RUNPARTS_SKIP:
4361
        rrval = constants.HKR_SKIP
4362
        output = ""
4363
      elif relstatus == constants.RUNPARTS_ERR:
4364
        rrval = constants.HKR_FAIL
4365
        output = "Hook script execution error: %s" % runresult
4366
      elif relstatus == constants.RUNPARTS_RUN:
4367
        if runresult.failed:
4368
          rrval = constants.HKR_FAIL
4369
        else:
4370
          rrval = constants.HKR_SUCCESS
4371
        output = utils.SafeEncode(runresult.output.strip())
4372
      results.append(("%s/%s" % (subdir, relname), rrval, output))
4373

    
4374
    return results
4375

    
4376

    
4377
class IAllocatorRunner(object):
4378
  """IAllocator runner.
4379

4380
  This class is instantiated on the node side (ganeti-noded) and not on
4381
  the master side.
4382

4383
  """
4384
  @staticmethod
4385
  def Run(name, idata):
4386
    """Run an iallocator script.
4387

4388
    @type name: str
4389
    @param name: the iallocator script name
4390
    @type idata: str
4391
    @param idata: the allocator input data
4392

4393
    @rtype: tuple
4394
    @return: two element tuple of:
4395
       - status
4396
       - either error message or stdout of allocator (for success)
4397

4398
    """
4399
    alloc_script = utils.FindFile(name, constants.IALLOCATOR_SEARCH_PATH,
4400
                                  os.path.isfile)
4401
    if alloc_script is None:
4402
      _Fail("iallocator module '%s' not found in the search path", name)
4403

    
4404
    fd, fin_name = tempfile.mkstemp(prefix="ganeti-iallocator.")
4405
    try:
4406
      os.write(fd, idata)
4407
      os.close(fd)
4408
      result = utils.RunCmd([alloc_script, fin_name])
4409
      if result.failed:
4410
        _Fail("iallocator module '%s' failed: %s, output '%s'",
4411
              name, result.fail_reason, result.output)
4412
    finally:
4413
      os.unlink(fin_name)
4414

    
4415
    return result.stdout
4416

    
4417

    
4418
class DevCacheManager(object):
4419
  """Simple class for managing a cache of block device information.
4420

4421
  """
4422
  _DEV_PREFIX = "/dev/"
4423
  _ROOT_DIR = pathutils.BDEV_CACHE_DIR
4424

    
4425
  @classmethod
4426
  def _ConvertPath(cls, dev_path):
4427
    """Converts a /dev/name path to the cache file name.
4428

4429
    This replaces slashes with underscores and strips the /dev
4430
    prefix. It then returns the full path to the cache file.
4431

4432
    @type dev_path: str
4433
    @param dev_path: the C{/dev/} path name
4434
    @rtype: str
4435
    @return: the converted path name
4436

4437
    """
4438
    if dev_path.startswith(cls._DEV_PREFIX):
4439
      dev_path = dev_path[len(cls._DEV_PREFIX):]
4440
    dev_path = dev_path.replace("/", "_")
4441
    fpath = utils.PathJoin(cls._ROOT_DIR, "bdev_%s" % dev_path)
4442
    return fpath
4443

    
4444
  @classmethod
4445
  def UpdateCache(cls, dev_path, owner, on_primary, iv_name):
4446
    """Updates the cache information for a given device.
4447

4448
    @type dev_path: str
4449
    @param dev_path: the pathname of the device
4450
    @type owner: str
4451
    @param owner: the owner (instance name) of the device
4452
    @type on_primary: bool
4453
    @param on_primary: whether this is the primary
4454
        node nor not
4455
    @type iv_name: str
4456
    @param iv_name: the instance-visible name of the
4457
        device, as in objects.Disk.iv_name
4458

4459
    @rtype: None
4460

4461
    """
4462
    if dev_path is None:
4463
      logging.error("DevCacheManager.UpdateCache got a None dev_path")
4464
      return
4465
    fpath = cls._ConvertPath(dev_path)
4466
    if on_primary:
4467
      state = "primary"
4468
    else:
4469
      state = "secondary"
4470
    if iv_name is None:
4471
      iv_name = "not_visible"
4472
    fdata = "%s %s %s\n" % (str(owner), state, iv_name)
4473
    try:
4474
      utils.WriteFile(fpath, data=fdata)
4475
    except EnvironmentError, err:
4476
      logging.exception("Can't update bdev cache for %s: %s", dev_path, err)
4477

    
4478
  @classmethod
4479
  def RemoveCache(cls, dev_path):
4480
    """Remove data for a dev_path.
4481

4482
    This is just a wrapper over L{utils.io.RemoveFile} with a converted
4483
    path name and logging.
4484

4485
    @type dev_path: str
4486
    @param dev_path: the pathname of the device
4487

4488
    @rtype: None
4489

4490
    """
4491
    if dev_path is None:
4492
      logging.error("DevCacheManager.RemoveCache got a None dev_path")
4493
      return
4494
    fpath = cls._ConvertPath(dev_path)
4495
    try:
4496
      utils.RemoveFile(fpath)
4497
    except EnvironmentError, err:
4498
      logging.exception("Can't update bdev cache for %s: %s", dev_path, err)