Statistics
| Branch: | Tag: | Revision:

root / lib / rpc.py @ e78667fe

History | View | Annotate | Download (20.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Inter-node RPC library.
23

24
"""
25

    
26
# pylint: disable=C0103,R0201,R0904
27
# C0103: Invalid name, since call_ are not valid
28
# R0201: Method could be a function, we keep all rpcs instance methods
29
# as not to change them back and forth between static/instance methods
30
# if they need to start using instance attributes
31
# R0904: Too many public methods
32

    
33
import os
34
import logging
35
import zlib
36
import base64
37
import pycurl
38
import threading
39

    
40
from ganeti import utils
41
from ganeti import objects
42
from ganeti import http
43
from ganeti import serializer
44
from ganeti import constants
45
from ganeti import errors
46
from ganeti import netutils
47
from ganeti import ssconf
48
from ganeti import runtime
49
from ganeti import compat
50
from ganeti import rpc_defs
51

    
52
# Special module generated at build time
53
from ganeti import _generated_rpc
54

    
55
# pylint has a bug here, doesn't see this import
56
import ganeti.http.client  # pylint: disable=W0611
57

    
58

    
59
# Timeout for connecting to nodes (seconds)
60
_RPC_CONNECT_TIMEOUT = 5
61

    
62
_RPC_CLIENT_HEADERS = [
63
  "Content-type: %s" % http.HTTP_APP_JSON,
64
  "Expect:",
65
  ]
66

    
67
# Various time constants for the timeout table
68
_TMO_URGENT = 60 # one minute
69
_TMO_FAST = 5 * 60 # five minutes
70
_TMO_NORMAL = 15 * 60 # 15 minutes
71
_TMO_SLOW = 3600 # one hour
72
_TMO_4HRS = 4 * 3600
73
_TMO_1DAY = 86400
74

    
75
#: Special value to describe an offline host
76
_OFFLINE = object()
77

    
78

    
79
def Init():
80
  """Initializes the module-global HTTP client manager.
81

82
  Must be called before using any RPC function and while exactly one thread is
83
  running.
84

85
  """
86
  # curl_global_init(3) and curl_global_cleanup(3) must be called with only
87
  # one thread running. This check is just a safety measure -- it doesn't
88
  # cover all cases.
89
  assert threading.activeCount() == 1, \
90
         "Found more than one active thread when initializing pycURL"
91

    
92
  logging.info("Using PycURL %s", pycurl.version)
93

    
94
  pycurl.global_init(pycurl.GLOBAL_ALL)
95

    
96

    
97
def Shutdown():
98
  """Stops the module-global HTTP client manager.
99

100
  Must be called before quitting the program and while exactly one thread is
101
  running.
102

103
  """
104
  pycurl.global_cleanup()
105

    
106

    
107
def _ConfigRpcCurl(curl):
108
  noded_cert = str(constants.NODED_CERT_FILE)
109

    
110
  curl.setopt(pycurl.FOLLOWLOCATION, False)
111
  curl.setopt(pycurl.CAINFO, noded_cert)
112
  curl.setopt(pycurl.SSL_VERIFYHOST, 0)
113
  curl.setopt(pycurl.SSL_VERIFYPEER, True)
114
  curl.setopt(pycurl.SSLCERTTYPE, "PEM")
115
  curl.setopt(pycurl.SSLCERT, noded_cert)
116
  curl.setopt(pycurl.SSLKEYTYPE, "PEM")
117
  curl.setopt(pycurl.SSLKEY, noded_cert)
118
  curl.setopt(pycurl.CONNECTTIMEOUT, _RPC_CONNECT_TIMEOUT)
119

    
120

    
121
def RunWithRPC(fn):
122
  """RPC-wrapper decorator.
123

124
  When applied to a function, it runs it with the RPC system
125
  initialized, and it shutsdown the system afterwards. This means the
126
  function must be called without RPC being initialized.
127

128
  """
129
  def wrapper(*args, **kwargs):
130
    Init()
131
    try:
132
      return fn(*args, **kwargs)
133
    finally:
134
      Shutdown()
135
  return wrapper
136

    
137

    
138
def _Compress(data):
139
  """Compresses a string for transport over RPC.
140

141
  Small amounts of data are not compressed.
142

143
  @type data: str
144
  @param data: Data
145
  @rtype: tuple
146
  @return: Encoded data to send
147

148
  """
149
  # Small amounts of data are not compressed
150
  if len(data) < 512:
151
    return (constants.RPC_ENCODING_NONE, data)
152

    
153
  # Compress with zlib and encode in base64
154
  return (constants.RPC_ENCODING_ZLIB_BASE64,
155
          base64.b64encode(zlib.compress(data, 3)))
156

    
157

    
158
class RpcResult(object):
159
  """RPC Result class.
160

161
  This class holds an RPC result. It is needed since in multi-node
162
  calls we can't raise an exception just because one one out of many
163
  failed, and therefore we use this class to encapsulate the result.
164

165
  @ivar data: the data payload, for successful results, or None
166
  @ivar call: the name of the RPC call
167
  @ivar node: the name of the node to which we made the call
168
  @ivar offline: whether the operation failed because the node was
169
      offline, as opposed to actual failure; offline=True will always
170
      imply failed=True, in order to allow simpler checking if
171
      the user doesn't care about the exact failure mode
172
  @ivar fail_msg: the error message if the call failed
173

174
  """
175
  def __init__(self, data=None, failed=False, offline=False,
176
               call=None, node=None):
177
    self.offline = offline
178
    self.call = call
179
    self.node = node
180

    
181
    if offline:
182
      self.fail_msg = "Node is marked offline"
183
      self.data = self.payload = None
184
    elif failed:
185
      self.fail_msg = self._EnsureErr(data)
186
      self.data = self.payload = None
187
    else:
188
      self.data = data
189
      if not isinstance(self.data, (tuple, list)):
190
        self.fail_msg = ("RPC layer error: invalid result type (%s)" %
191
                         type(self.data))
192
        self.payload = None
193
      elif len(data) != 2:
194
        self.fail_msg = ("RPC layer error: invalid result length (%d), "
195
                         "expected 2" % len(self.data))
196
        self.payload = None
197
      elif not self.data[0]:
198
        self.fail_msg = self._EnsureErr(self.data[1])
199
        self.payload = None
200
      else:
201
        # finally success
202
        self.fail_msg = None
203
        self.payload = data[1]
204

    
205
    for attr_name in ["call", "data", "fail_msg",
206
                      "node", "offline", "payload"]:
207
      assert hasattr(self, attr_name), "Missing attribute %s" % attr_name
208

    
209
  @staticmethod
210
  def _EnsureErr(val):
211
    """Helper to ensure we return a 'True' value for error."""
212
    if val:
213
      return val
214
    else:
215
      return "No error information"
216

    
217
  def Raise(self, msg, prereq=False, ecode=None):
218
    """If the result has failed, raise an OpExecError.
219

220
    This is used so that LU code doesn't have to check for each
221
    result, but instead can call this function.
222

223
    """
224
    if not self.fail_msg:
225
      return
226

    
227
    if not msg: # one could pass None for default message
228
      msg = ("Call '%s' to node '%s' has failed: %s" %
229
             (self.call, self.node, self.fail_msg))
230
    else:
231
      msg = "%s: %s" % (msg, self.fail_msg)
232
    if prereq:
233
      ec = errors.OpPrereqError
234
    else:
235
      ec = errors.OpExecError
236
    if ecode is not None:
237
      args = (msg, ecode)
238
    else:
239
      args = (msg, )
240
    raise ec(*args) # pylint: disable=W0142
241

    
242

    
243
def _SsconfResolver(node_list, _,
244
                    ssc=ssconf.SimpleStore,
245
                    nslookup_fn=netutils.Hostname.GetIP):
246
  """Return addresses for given node names.
247

248
  @type node_list: list
249
  @param node_list: List of node names
250
  @type ssc: class
251
  @param ssc: SimpleStore class that is used to obtain node->ip mappings
252
  @type nslookup_fn: callable
253
  @param nslookup_fn: function use to do NS lookup
254
  @rtype: list of tuple; (string, string)
255
  @return: List of tuples containing node name and IP address
256

257
  """
258
  ss = ssc()
259
  iplist = ss.GetNodePrimaryIPList()
260
  family = ss.GetPrimaryIPFamily()
261
  ipmap = dict(entry.split() for entry in iplist)
262

    
263
  result = []
264
  for node in node_list:
265
    ip = ipmap.get(node)
266
    if ip is None:
267
      ip = nslookup_fn(node, family=family)
268
    result.append((node, ip))
269

    
270
  return result
271

    
272

    
273
class _StaticResolver:
274
  def __init__(self, addresses):
275
    """Initializes this class.
276

277
    """
278
    self._addresses = addresses
279

    
280
  def __call__(self, hosts, _):
281
    """Returns static addresses for hosts.
282

283
    """
284
    assert len(hosts) == len(self._addresses)
285
    return zip(hosts, self._addresses)
286

    
287

    
288
def _CheckConfigNode(name, node, accept_offline_node):
289
  """Checks if a node is online.
290

291
  @type name: string
292
  @param name: Node name
293
  @type node: L{objects.Node} or None
294
  @param node: Node object
295

296
  """
297
  if node is None:
298
    # Depend on DNS for name resolution
299
    ip = name
300
  elif node.offline and not accept_offline_node:
301
    ip = _OFFLINE
302
  else:
303
    ip = node.primary_ip
304
  return (name, ip)
305

    
306

    
307
def _NodeConfigResolver(single_node_fn, all_nodes_fn, hosts, opts):
308
  """Calculate node addresses using configuration.
309

310
  """
311
  accept_offline_node = (opts is rpc_defs.ACCEPT_OFFLINE_NODE)
312

    
313
  assert accept_offline_node or opts is None, "Unknown option"
314

    
315
  # Special case for single-host lookups
316
  if len(hosts) == 1:
317
    (name, ) = hosts
318
    return [_CheckConfigNode(name, single_node_fn(name), accept_offline_node)]
319
  else:
320
    all_nodes = all_nodes_fn()
321
    return [_CheckConfigNode(name, all_nodes.get(name, None),
322
                             accept_offline_node)
323
            for name in hosts]
324

    
325

    
326
class _RpcProcessor:
327
  def __init__(self, resolver, port, lock_monitor_cb=None):
328
    """Initializes this class.
329

330
    @param resolver: callable accepting a list of hostnames, returning a list
331
      of tuples containing name and IP address (IP address can be the name or
332
      the special value L{_OFFLINE} to mark offline machines)
333
    @type port: int
334
    @param port: TCP port
335
    @param lock_monitor_cb: Callable for registering with lock monitor
336

337
    """
338
    self._resolver = resolver
339
    self._port = port
340
    self._lock_monitor_cb = lock_monitor_cb
341

    
342
  @staticmethod
343
  def _PrepareRequests(hosts, port, procedure, body, read_timeout):
344
    """Prepares requests by sorting offline hosts into separate list.
345

346
    @type body: dict
347
    @param body: a dictionary with per-host body data
348

349
    """
350
    results = {}
351
    requests = {}
352

    
353
    assert isinstance(body, dict)
354
    assert len(body) == len(hosts)
355
    assert compat.all(isinstance(v, str) for v in body.values())
356
    assert frozenset(map(compat.fst, hosts)) == frozenset(body.keys()), \
357
        "%s != %s" % (hosts, body.keys())
358

    
359
    for (name, ip) in hosts:
360
      if ip is _OFFLINE:
361
        # Node is marked as offline
362
        results[name] = RpcResult(node=name, offline=True, call=procedure)
363
      else:
364
        requests[name] = \
365
          http.client.HttpClientRequest(str(ip), port,
366
                                        http.HTTP_PUT, str("/%s" % procedure),
367
                                        headers=_RPC_CLIENT_HEADERS,
368
                                        post_data=body[name],
369
                                        read_timeout=read_timeout,
370
                                        nicename="%s/%s" % (name, procedure),
371
                                        curl_config_fn=_ConfigRpcCurl)
372

    
373
    return (results, requests)
374

    
375
  @staticmethod
376
  def _CombineResults(results, requests, procedure):
377
    """Combines pre-computed results for offline hosts with actual call results.
378

379
    """
380
    for name, req in requests.items():
381
      if req.success and req.resp_status_code == http.HTTP_OK:
382
        host_result = RpcResult(data=serializer.LoadJson(req.resp_body),
383
                                node=name, call=procedure)
384
      else:
385
        # TODO: Better error reporting
386
        if req.error:
387
          msg = req.error
388
        else:
389
          msg = req.resp_body
390

    
391
        logging.error("RPC error in %s on node %s: %s", procedure, name, msg)
392
        host_result = RpcResult(data=msg, failed=True, node=name,
393
                                call=procedure)
394

    
395
      results[name] = host_result
396

    
397
    return results
398

    
399
  def __call__(self, hosts, procedure, body, read_timeout, resolver_opts,
400
               _req_process_fn=http.client.ProcessRequests):
401
    """Makes an RPC request to a number of nodes.
402

403
    @type hosts: sequence
404
    @param hosts: Hostnames
405
    @type procedure: string
406
    @param procedure: Request path
407
    @type body: dictionary
408
    @param body: dictionary with request bodies per host
409
    @type read_timeout: int or None
410
    @param read_timeout: Read timeout for request
411

412
    """
413
    assert read_timeout is not None, \
414
      "Missing RPC read timeout for procedure '%s'" % procedure
415

    
416
    (results, requests) = \
417
      self._PrepareRequests(self._resolver(hosts, resolver_opts), self._port,
418
                            procedure, body, read_timeout)
419

    
420
    _req_process_fn(requests.values(), lock_monitor_cb=self._lock_monitor_cb)
421

    
422
    assert not frozenset(results).intersection(requests)
423

    
424
    return self._CombineResults(results, requests, procedure)
425

    
426

    
427
class _RpcClientBase:
428
  def __init__(self, resolver, encoder_fn, lock_monitor_cb=None):
429
    """Initializes this class.
430

431
    """
432
    self._proc = _RpcProcessor(resolver,
433
                               netutils.GetDaemonPort(constants.NODED),
434
                               lock_monitor_cb=lock_monitor_cb)
435
    self._encoder = compat.partial(self._EncodeArg, encoder_fn)
436

    
437
  @staticmethod
438
  def _EncodeArg(encoder_fn, (argkind, value)):
439
    """Encode argument.
440

441
    """
442
    if argkind is None:
443
      return value
444
    else:
445
      return encoder_fn(argkind)(value)
446

    
447
  def _Call(self, cdef, node_list, args):
448
    """Entry point for automatically generated RPC wrappers.
449

450
    """
451
    (procedure, _, resolver_opts, timeout, argdefs,
452
     prep_fn, postproc_fn, _) = cdef
453

    
454
    if callable(timeout):
455
      read_timeout = timeout(args)
456
    else:
457
      read_timeout = timeout
458

    
459
    if callable(resolver_opts):
460
      req_resolver_opts = resolver_opts(args)
461
    else:
462
      req_resolver_opts = resolver_opts
463

    
464
    if len(args) != len(argdefs):
465
      raise errors.ProgrammerError("Number of passed arguments doesn't match")
466

    
467
    enc_args = map(self._encoder, zip(map(compat.snd, argdefs), args))
468
    if prep_fn is None:
469
      # for a no-op prep_fn, we serialise the body once, and then we
470
      # reuse it in the dictionary values
471
      body = serializer.DumpJson(enc_args)
472
      pnbody = dict((n, body) for n in node_list)
473
    else:
474
      # for a custom prep_fn, we pass the encoded arguments and the
475
      # node name to the prep_fn, and we serialise its return value
476
      assert callable(prep_fn)
477
      pnbody = dict((n, serializer.DumpJson(prep_fn(n, enc_args)))
478
                    for n in node_list)
479

    
480
    result = self._proc(node_list, procedure, pnbody, read_timeout,
481
                        req_resolver_opts)
482

    
483
    if postproc_fn:
484
      return dict(map(lambda (key, value): (key, postproc_fn(value)),
485
                      result.items()))
486
    else:
487
      return result
488

    
489

    
490
def _ObjectToDict(value):
491
  """Converts an object to a dictionary.
492

493
  @note: See L{objects}.
494

495
  """
496
  return value.ToDict()
497

    
498

    
499
def _ObjectListToDict(value):
500
  """Converts a list of L{objects} to dictionaries.
501

502
  """
503
  return map(_ObjectToDict, value)
504

    
505

    
506
def _EncodeNodeToDiskDict(value):
507
  """Encodes a dictionary with node name as key and disk objects as values.
508

509
  """
510
  return dict((name, _ObjectListToDict(disks))
511
              for name, disks in value.items())
512

    
513

    
514
def _PrepareFileUpload(filename):
515
  """Loads a file and prepares it for an upload to nodes.
516

517
  """
518
  data = _Compress(utils.ReadFile(filename))
519
  st = os.stat(filename)
520
  getents = runtime.GetEnts()
521
  return [filename, data, st.st_mode, getents.LookupUid(st.st_uid),
522
          getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime]
523

    
524

    
525
def _PrepareFinalizeExportDisks(snap_disks):
526
  """Encodes disks for finalizing export.
527

528
  """
529
  flat_disks = []
530

    
531
  for disk in snap_disks:
532
    if isinstance(disk, bool):
533
      flat_disks.append(disk)
534
    else:
535
      flat_disks.append(disk.ToDict())
536

    
537
  return flat_disks
538

    
539

    
540
def _EncodeImportExportIO((ieio, ieioargs)):
541
  """Encodes import/export I/O information.
542

543
  """
544
  if ieio == constants.IEIO_RAW_DISK:
545
    assert len(ieioargs) == 1
546
    return (ieio, (ieioargs[0].ToDict(), ))
547

    
548
  if ieio == constants.IEIO_SCRIPT:
549
    assert len(ieioargs) == 2
550
    return (ieio, (ieioargs[0].ToDict(), ieioargs[1]))
551

    
552
  return (ieio, ieioargs)
553

    
554

    
555
def _EncodeBlockdevRename(value):
556
  """Encodes information for renaming block devices.
557

558
  """
559
  return [(d.ToDict(), uid) for d, uid in value]
560

    
561

    
562
#: Generic encoders
563
_ENCODERS = {
564
  rpc_defs.ED_OBJECT_DICT: _ObjectToDict,
565
  rpc_defs.ED_OBJECT_DICT_LIST: _ObjectListToDict,
566
  rpc_defs.ED_NODE_TO_DISK_DICT: _EncodeNodeToDiskDict,
567
  rpc_defs.ED_FILE_DETAILS: _PrepareFileUpload,
568
  rpc_defs.ED_COMPRESS: _Compress,
569
  rpc_defs.ED_FINALIZE_EXPORT_DISKS: _PrepareFinalizeExportDisks,
570
  rpc_defs.ED_IMPEXP_IO: _EncodeImportExportIO,
571
  rpc_defs.ED_BLOCKDEV_RENAME: _EncodeBlockdevRename,
572
  }
573

    
574

    
575
class RpcRunner(_RpcClientBase,
576
                _generated_rpc.RpcClientDefault,
577
                _generated_rpc.RpcClientBootstrap,
578
                _generated_rpc.RpcClientConfig):
579
  """RPC runner class.
580

581
  """
582
  def __init__(self, context):
583
    """Initialized the RPC runner.
584

585
    @type context: C{masterd.GanetiContext}
586
    @param context: Ganeti context
587

588
    """
589
    self._cfg = context.cfg
590

    
591
    encoders = _ENCODERS.copy()
592

    
593
    # Add encoders requiring configuration object
594
    encoders.update({
595
      rpc_defs.ED_INST_DICT: self._InstDict,
596
      rpc_defs.ED_INST_DICT_HVP_BEP: self._InstDictHvpBep,
597
      rpc_defs.ED_INST_DICT_OSP: self._InstDictOsp,
598
      })
599

    
600
    # Resolver using configuration
601
    resolver = compat.partial(_NodeConfigResolver, self._cfg.GetNodeInfo,
602
                              self._cfg.GetAllNodesInfo)
603

    
604
    # Pylint doesn't recognize multiple inheritance properly, see
605
    # <http://www.logilab.org/ticket/36586> and
606
    # <http://www.logilab.org/ticket/35642>
607
    # pylint: disable=W0233
608
    _RpcClientBase.__init__(self, resolver, encoders.get,
609
                            lock_monitor_cb=context.glm.AddToLockMonitor)
610
    _generated_rpc.RpcClientConfig.__init__(self)
611
    _generated_rpc.RpcClientBootstrap.__init__(self)
612
    _generated_rpc.RpcClientDefault.__init__(self)
613

    
614
  def _InstDict(self, instance, hvp=None, bep=None, osp=None):
615
    """Convert the given instance to a dict.
616

617
    This is done via the instance's ToDict() method and additionally
618
    we fill the hvparams with the cluster defaults.
619

620
    @type instance: L{objects.Instance}
621
    @param instance: an Instance object
622
    @type hvp: dict or None
623
    @param hvp: a dictionary with overridden hypervisor parameters
624
    @type bep: dict or None
625
    @param bep: a dictionary with overridden backend parameters
626
    @type osp: dict or None
627
    @param osp: a dictionary with overridden os parameters
628
    @rtype: dict
629
    @return: the instance dict, with the hvparams filled with the
630
        cluster defaults
631

632
    """
633
    idict = instance.ToDict()
634
    cluster = self._cfg.GetClusterInfo()
635
    idict["hvparams"] = cluster.FillHV(instance)
636
    if hvp is not None:
637
      idict["hvparams"].update(hvp)
638
    idict["beparams"] = cluster.FillBE(instance)
639
    if bep is not None:
640
      idict["beparams"].update(bep)
641
    idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams)
642
    if osp is not None:
643
      idict["osparams"].update(osp)
644
    for nic in idict["nics"]:
645
      nic['nicparams'] = objects.FillDict(
646
        cluster.nicparams[constants.PP_DEFAULT],
647
        nic['nicparams'])
648
    return idict
649

    
650
  def _InstDictHvpBep(self, (instance, hvp, bep)):
651
    """Wrapper for L{_InstDict}.
652

653
    """
654
    return self._InstDict(instance, hvp=hvp, bep=bep)
655

    
656
  def _InstDictOsp(self, (instance, osparams)):
657
    """Wrapper for L{_InstDict}.
658

659
    """
660
    return self._InstDict(instance, osp=osparams)
661

    
662

    
663
class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue):
664
  """RPC wrappers for job queue.
665

666
  """
667
  def __init__(self, context, address_list):
668
    """Initializes this class.
669

670
    """
671
    if address_list is None:
672
      resolver = _SsconfResolver
673
    else:
674
      # Caller provided an address list
675
      resolver = _StaticResolver(address_list)
676

    
677
    _RpcClientBase.__init__(self, resolver, _ENCODERS.get,
678
                            lock_monitor_cb=context.glm.AddToLockMonitor)
679
    _generated_rpc.RpcClientJobQueue.__init__(self)
680

    
681

    
682
class BootstrapRunner(_RpcClientBase, _generated_rpc.RpcClientBootstrap):
683
  """RPC wrappers for bootstrapping.
684

685
  """
686
  def __init__(self):
687
    """Initializes this class.
688

689
    """
690
    _RpcClientBase.__init__(self, _SsconfResolver, _ENCODERS.get)
691
    _generated_rpc.RpcClientBootstrap.__init__(self)
692

    
693

    
694
class ConfigRunner(_RpcClientBase, _generated_rpc.RpcClientConfig):
695
  """RPC wrappers for L{config}.
696

697
  """
698
  def __init__(self, context, address_list):
699
    """Initializes this class.
700

701
    """
702
    if context:
703
      lock_monitor_cb = context.glm.AddToLockMonitor
704
    else:
705
      lock_monitor_cb = None
706

    
707
    if address_list is None:
708
      resolver = _SsconfResolver
709
    else:
710
      # Caller provided an address list
711
      resolver = _StaticResolver(address_list)
712

    
713
    _RpcClientBase.__init__(self, resolver, _ENCODERS.get,
714
                            lock_monitor_cb=lock_monitor_cb)
715
    _generated_rpc.RpcClientConfig.__init__(self)