Statistics
| Branch: | Tag: | Revision:

root / lib / rpc.py @ 68a856ef

History | View | Annotate | Download (20.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Inter-node RPC library.
23

24
"""
25

    
26
# pylint: disable=C0103,R0201,R0904
27
# C0103: Invalid name, since call_ are not valid
28
# R0201: Method could be a function, we keep all rpcs instance methods
29
# as not to change them back and forth between static/instance methods
30
# if they need to start using instance attributes
31
# R0904: Too many public methods
32

    
33
import os
34
import logging
35
import zlib
36
import base64
37
import pycurl
38
import threading
39

    
40
from ganeti import utils
41
from ganeti import objects
42
from ganeti import http
43
from ganeti import serializer
44
from ganeti import constants
45
from ganeti import errors
46
from ganeti import netutils
47
from ganeti import ssconf
48
from ganeti import runtime
49
from ganeti import compat
50
from ganeti import rpc_defs
51

    
52
# Special module generated at build time
53
from ganeti import _generated_rpc
54

    
55
# pylint has a bug here, doesn't see this import
56
import ganeti.http.client  # pylint: disable=W0611
57

    
58

    
59
# Timeout for connecting to nodes (seconds)
60
_RPC_CONNECT_TIMEOUT = 5
61

    
62
_RPC_CLIENT_HEADERS = [
63
  "Content-type: %s" % http.HTTP_APP_JSON,
64
  "Expect:",
65
  ]
66

    
67
# Various time constants for the timeout table
68
_TMO_URGENT = 60 # one minute
69
_TMO_FAST = 5 * 60 # five minutes
70
_TMO_NORMAL = 15 * 60 # 15 minutes
71
_TMO_SLOW = 3600 # one hour
72
_TMO_4HRS = 4 * 3600
73
_TMO_1DAY = 86400
74

    
75
#: Special value to describe an offline host
76
_OFFLINE = object()
77

    
78

    
79
def Init():
80
  """Initializes the module-global HTTP client manager.
81

82
  Must be called before using any RPC function and while exactly one thread is
83
  running.
84

85
  """
86
  # curl_global_init(3) and curl_global_cleanup(3) must be called with only
87
  # one thread running. This check is just a safety measure -- it doesn't
88
  # cover all cases.
89
  assert threading.activeCount() == 1, \
90
         "Found more than one active thread when initializing pycURL"
91

    
92
  logging.info("Using PycURL %s", pycurl.version)
93

    
94
  pycurl.global_init(pycurl.GLOBAL_ALL)
95

    
96

    
97
def Shutdown():
98
  """Stops the module-global HTTP client manager.
99

100
  Must be called before quitting the program and while exactly one thread is
101
  running.
102

103
  """
104
  pycurl.global_cleanup()
105

    
106

    
107
def _ConfigRpcCurl(curl):
108
  noded_cert = str(constants.NODED_CERT_FILE)
109

    
110
  curl.setopt(pycurl.FOLLOWLOCATION, False)
111
  curl.setopt(pycurl.CAINFO, noded_cert)
112
  curl.setopt(pycurl.SSL_VERIFYHOST, 0)
113
  curl.setopt(pycurl.SSL_VERIFYPEER, True)
114
  curl.setopt(pycurl.SSLCERTTYPE, "PEM")
115
  curl.setopt(pycurl.SSLCERT, noded_cert)
116
  curl.setopt(pycurl.SSLKEYTYPE, "PEM")
117
  curl.setopt(pycurl.SSLKEY, noded_cert)
118
  curl.setopt(pycurl.CONNECTTIMEOUT, _RPC_CONNECT_TIMEOUT)
119

    
120

    
121
def RunWithRPC(fn):
122
  """RPC-wrapper decorator.
123

124
  When applied to a function, it runs it with the RPC system
125
  initialized, and it shutsdown the system afterwards. This means the
126
  function must be called without RPC being initialized.
127

128
  """
129
  def wrapper(*args, **kwargs):
130
    Init()
131
    try:
132
      return fn(*args, **kwargs)
133
    finally:
134
      Shutdown()
135
  return wrapper
136

    
137

    
138
def _Compress(data):
139
  """Compresses a string for transport over RPC.
140

141
  Small amounts of data are not compressed.
142

143
  @type data: str
144
  @param data: Data
145
  @rtype: tuple
146
  @return: Encoded data to send
147

148
  """
149
  # Small amounts of data are not compressed
150
  if len(data) < 512:
151
    return (constants.RPC_ENCODING_NONE, data)
152

    
153
  # Compress with zlib and encode in base64
154
  return (constants.RPC_ENCODING_ZLIB_BASE64,
155
          base64.b64encode(zlib.compress(data, 3)))
156

    
157

    
158
class RpcResult(object):
159
  """RPC Result class.
160

161
  This class holds an RPC result. It is needed since in multi-node
162
  calls we can't raise an exception just because one one out of many
163
  failed, and therefore we use this class to encapsulate the result.
164

165
  @ivar data: the data payload, for successful results, or None
166
  @ivar call: the name of the RPC call
167
  @ivar node: the name of the node to which we made the call
168
  @ivar offline: whether the operation failed because the node was
169
      offline, as opposed to actual failure; offline=True will always
170
      imply failed=True, in order to allow simpler checking if
171
      the user doesn't care about the exact failure mode
172
  @ivar fail_msg: the error message if the call failed
173

174
  """
175
  def __init__(self, data=None, failed=False, offline=False,
176
               call=None, node=None):
177
    self.offline = offline
178
    self.call = call
179
    self.node = node
180

    
181
    if offline:
182
      self.fail_msg = "Node is marked offline"
183
      self.data = self.payload = None
184
    elif failed:
185
      self.fail_msg = self._EnsureErr(data)
186
      self.data = self.payload = None
187
    else:
188
      self.data = data
189
      if not isinstance(self.data, (tuple, list)):
190
        self.fail_msg = ("RPC layer error: invalid result type (%s)" %
191
                         type(self.data))
192
        self.payload = None
193
      elif len(data) != 2:
194
        self.fail_msg = ("RPC layer error: invalid result length (%d), "
195
                         "expected 2" % len(self.data))
196
        self.payload = None
197
      elif not self.data[0]:
198
        self.fail_msg = self._EnsureErr(self.data[1])
199
        self.payload = None
200
      else:
201
        # finally success
202
        self.fail_msg = None
203
        self.payload = data[1]
204

    
205
    for attr_name in ["call", "data", "fail_msg",
206
                      "node", "offline", "payload"]:
207
      assert hasattr(self, attr_name), "Missing attribute %s" % attr_name
208

    
209
  @staticmethod
210
  def _EnsureErr(val):
211
    """Helper to ensure we return a 'True' value for error."""
212
    if val:
213
      return val
214
    else:
215
      return "No error information"
216

    
217
  def Raise(self, msg, prereq=False, ecode=None):
218
    """If the result has failed, raise an OpExecError.
219

220
    This is used so that LU code doesn't have to check for each
221
    result, but instead can call this function.
222

223
    """
224
    if not self.fail_msg:
225
      return
226

    
227
    if not msg: # one could pass None for default message
228
      msg = ("Call '%s' to node '%s' has failed: %s" %
229
             (self.call, self.node, self.fail_msg))
230
    else:
231
      msg = "%s: %s" % (msg, self.fail_msg)
232
    if prereq:
233
      ec = errors.OpPrereqError
234
    else:
235
      ec = errors.OpExecError
236
    if ecode is not None:
237
      args = (msg, ecode)
238
    else:
239
      args = (msg, )
240
    raise ec(*args) # pylint: disable=W0142
241

    
242

    
243
def _SsconfResolver(node_list, _,
244
                    ssc=ssconf.SimpleStore,
245
                    nslookup_fn=netutils.Hostname.GetIP):
246
  """Return addresses for given node names.
247

248
  @type node_list: list
249
  @param node_list: List of node names
250
  @type ssc: class
251
  @param ssc: SimpleStore class that is used to obtain node->ip mappings
252
  @type nslookup_fn: callable
253
  @param nslookup_fn: function use to do NS lookup
254
  @rtype: list of tuple; (string, string)
255
  @return: List of tuples containing node name and IP address
256

257
  """
258
  ss = ssc()
259
  iplist = ss.GetNodePrimaryIPList()
260
  family = ss.GetPrimaryIPFamily()
261
  ipmap = dict(entry.split() for entry in iplist)
262

    
263
  result = []
264
  for node in node_list:
265
    ip = ipmap.get(node)
266
    if ip is None:
267
      ip = nslookup_fn(node, family=family)
268
    result.append((node, ip))
269

    
270
  return result
271

    
272

    
273
class _StaticResolver:
274
  def __init__(self, addresses):
275
    """Initializes this class.
276

277
    """
278
    self._addresses = addresses
279

    
280
  def __call__(self, hosts, _):
281
    """Returns static addresses for hosts.
282

283
    """
284
    assert len(hosts) == len(self._addresses)
285
    return zip(hosts, self._addresses)
286

    
287

    
288
def _CheckConfigNode(name, node, accept_offline_node):
289
  """Checks if a node is online.
290

291
  @type name: string
292
  @param name: Node name
293
  @type node: L{objects.Node} or None
294
  @param node: Node object
295

296
  """
297
  if node is None:
298
    # Depend on DNS for name resolution
299
    ip = name
300
  elif node.offline and not accept_offline_node:
301
    ip = _OFFLINE
302
  else:
303
    ip = node.primary_ip
304
  return (name, ip)
305

    
306

    
307
def _NodeConfigResolver(single_node_fn, all_nodes_fn, hosts, opts):
308
  """Calculate node addresses using configuration.
309

310
  """
311
  accept_offline_node = (opts is rpc_defs.ACCEPT_OFFLINE_NODE)
312

    
313
  assert accept_offline_node or opts is None, "Unknown option"
314

    
315
  # Special case for single-host lookups
316
  if len(hosts) == 1:
317
    (name, ) = hosts
318
    return [_CheckConfigNode(name, single_node_fn(name), accept_offline_node)]
319
  else:
320
    all_nodes = all_nodes_fn()
321
    return [_CheckConfigNode(name, all_nodes.get(name, None),
322
                             accept_offline_node)
323
            for name in hosts]
324

    
325

    
326
class _RpcProcessor:
327
  def __init__(self, resolver, port, lock_monitor_cb=None):
328
    """Initializes this class.
329

330
    @param resolver: callable accepting a list of hostnames, returning a list
331
      of tuples containing name and IP address (IP address can be the name or
332
      the special value L{_OFFLINE} to mark offline machines)
333
    @type port: int
334
    @param port: TCP port
335
    @param lock_monitor_cb: Callable for registering with lock monitor
336

337
    """
338
    self._resolver = resolver
339
    self._port = port
340
    self._lock_monitor_cb = lock_monitor_cb
341

    
342
  @staticmethod
343
  def _PrepareRequests(hosts, port, procedure, body, read_timeout):
344
    """Prepares requests by sorting offline hosts into separate list.
345

346
    @type body: dict
347
    @param body: a dictionary with per-host body data
348

349
    """
350
    results = {}
351
    requests = {}
352

    
353
    assert isinstance(body, dict)
354
    assert len(body) == len(hosts)
355
    assert compat.all(isinstance(v, str) for v in body.values())
356
    assert frozenset(map(compat.fst, hosts)) == frozenset(body.keys()), \
357
        "%s != %s" % (hosts, body.keys())
358

    
359
    for (name, ip) in hosts:
360
      if ip is _OFFLINE:
361
        # Node is marked as offline
362
        results[name] = RpcResult(node=name, offline=True, call=procedure)
363
      else:
364
        requests[name] = \
365
          http.client.HttpClientRequest(str(ip), port,
366
                                        http.HTTP_PUT, str("/%s" % procedure),
367
                                        headers=_RPC_CLIENT_HEADERS,
368
                                        post_data=body[name],
369
                                        read_timeout=read_timeout,
370
                                        nicename="%s/%s" % (name, procedure),
371
                                        curl_config_fn=_ConfigRpcCurl)
372

    
373
    return (results, requests)
374

    
375
  @staticmethod
376
  def _CombineResults(results, requests, procedure):
377
    """Combines pre-computed results for offline hosts with actual call results.
378

379
    """
380
    for name, req in requests.items():
381
      if req.success and req.resp_status_code == http.HTTP_OK:
382
        host_result = RpcResult(data=serializer.LoadJson(req.resp_body),
383
                                node=name, call=procedure)
384
      else:
385
        # TODO: Better error reporting
386
        if req.error:
387
          msg = req.error
388
        else:
389
          msg = req.resp_body
390

    
391
        logging.error("RPC error in %s on node %s: %s", procedure, name, msg)
392
        host_result = RpcResult(data=msg, failed=True, node=name,
393
                                call=procedure)
394

    
395
      results[name] = host_result
396

    
397
    return results
398

    
399
  def __call__(self, hosts, procedure, body, read_timeout, resolver_opts,
400
               _req_process_fn=http.client.ProcessRequests):
401
    """Makes an RPC request to a number of nodes.
402

403
    @type hosts: sequence
404
    @param hosts: Hostnames
405
    @type procedure: string
406
    @param procedure: Request path
407
    @type body: dictionary
408
    @param body: dictionary with request bodies per host
409
    @type read_timeout: int or None
410
    @param read_timeout: Read timeout for request
411

412
    """
413
    assert read_timeout is not None, \
414
      "Missing RPC read timeout for procedure '%s'" % procedure
415

    
416
    (results, requests) = \
417
      self._PrepareRequests(self._resolver(hosts, resolver_opts), self._port,
418
                            procedure, body, read_timeout)
419

    
420
    _req_process_fn(requests.values(), lock_monitor_cb=self._lock_monitor_cb)
421

    
422
    assert not frozenset(results).intersection(requests)
423

    
424
    return self._CombineResults(results, requests, procedure)
425

    
426

    
427
class _RpcClientBase:
428
  def __init__(self, resolver, encoder_fn, lock_monitor_cb=None):
429
    """Initializes this class.
430

431
    """
432
    self._proc = _RpcProcessor(resolver,
433
                               netutils.GetDaemonPort(constants.NODED),
434
                               lock_monitor_cb=lock_monitor_cb)
435
    self._encoder = compat.partial(self._EncodeArg, encoder_fn)
436

    
437
  @staticmethod
438
  def _EncodeArg(encoder_fn, (argkind, value)):
439
    """Encode argument.
440

441
    """
442
    if argkind is None:
443
      return value
444
    else:
445
      return encoder_fn(argkind)(value)
446

    
447
  def _Call(self, cdef, node_list, args):
448
    """Entry point for automatically generated RPC wrappers.
449

450
    """
451
    (procedure, _, resolver_opts, timeout, argdefs,
452
     prep_fn, postproc_fn, _) = cdef
453

    
454
    if callable(timeout):
455
      read_timeout = timeout(args)
456
    else:
457
      read_timeout = timeout
458

    
459
    if callable(resolver_opts):
460
      req_resolver_opts = resolver_opts(args)
461
    else:
462
      req_resolver_opts = resolver_opts
463

    
464
    enc_args = map(self._encoder, zip(map(compat.snd, argdefs), args))
465
    if prep_fn is None:
466
      # for a no-op prep_fn, we serialise the body once, and then we
467
      # reuse it in the dictionary values
468
      body = serializer.DumpJson(enc_args)
469
      pnbody = dict((n, body) for n in node_list)
470
    else:
471
      # for a custom prep_fn, we pass the encoded arguments and the
472
      # node name to the prep_fn, and we serialise its return value
473
      assert(callable(prep_fn))
474
      pnbody = dict((n, serializer.DumpJson(prep_fn(n, enc_args)))
475
                    for n in node_list)
476

    
477
    result = self._proc(node_list, procedure, pnbody, read_timeout,
478
                        req_resolver_opts)
479

    
480
    if postproc_fn:
481
      return dict(map(lambda (key, value): (key, postproc_fn(value)),
482
                      result.items()))
483
    else:
484
      return result
485

    
486

    
487
def _ObjectToDict(value):
488
  """Converts an object to a dictionary.
489

490
  @note: See L{objects}.
491

492
  """
493
  return value.ToDict()
494

    
495

    
496
def _ObjectListToDict(value):
497
  """Converts a list of L{objects} to dictionaries.
498

499
  """
500
  return map(_ObjectToDict, value)
501

    
502

    
503
def _EncodeNodeToDiskDict(value):
504
  """Encodes a dictionary with node name as key and disk objects as values.
505

506
  """
507
  return dict((name, _ObjectListToDict(disks))
508
              for name, disks in value.items())
509

    
510

    
511
def _PrepareFileUpload(filename):
512
  """Loads a file and prepares it for an upload to nodes.
513

514
  """
515
  data = _Compress(utils.ReadFile(filename))
516
  st = os.stat(filename)
517
  getents = runtime.GetEnts()
518
  return [filename, data, st.st_mode, getents.LookupUid(st.st_uid),
519
          getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime]
520

    
521

    
522
def _PrepareFinalizeExportDisks(snap_disks):
523
  """Encodes disks for finalizing export.
524

525
  """
526
  flat_disks = []
527

    
528
  for disk in snap_disks:
529
    if isinstance(disk, bool):
530
      flat_disks.append(disk)
531
    else:
532
      flat_disks.append(disk.ToDict())
533

    
534
  return flat_disks
535

    
536

    
537
def _EncodeImportExportIO((ieio, ieioargs)):
538
  """Encodes import/export I/O information.
539

540
  """
541
  if ieio == constants.IEIO_RAW_DISK:
542
    assert len(ieioargs) == 1
543
    return (ieio, (ieioargs[0].ToDict(), ))
544

    
545
  if ieio == constants.IEIO_SCRIPT:
546
    assert len(ieioargs) == 2
547
    return (ieio, (ieioargs[0].ToDict(), ieioargs[1]))
548

    
549
  return (ieio, ieioargs)
550

    
551

    
552
def _EncodeBlockdevRename(value):
553
  """Encodes information for renaming block devices.
554

555
  """
556
  return [(d.ToDict(), uid) for d, uid in value]
557

    
558

    
559
#: Generic encoders
560
_ENCODERS = {
561
  rpc_defs.ED_OBJECT_DICT: _ObjectToDict,
562
  rpc_defs.ED_OBJECT_DICT_LIST: _ObjectListToDict,
563
  rpc_defs.ED_NODE_TO_DISK_DICT: _EncodeNodeToDiskDict,
564
  rpc_defs.ED_FILE_DETAILS: _PrepareFileUpload,
565
  rpc_defs.ED_COMPRESS: _Compress,
566
  rpc_defs.ED_FINALIZE_EXPORT_DISKS: _PrepareFinalizeExportDisks,
567
  rpc_defs.ED_IMPEXP_IO: _EncodeImportExportIO,
568
  rpc_defs.ED_BLOCKDEV_RENAME: _EncodeBlockdevRename,
569
  }
570

    
571

    
572
class RpcRunner(_RpcClientBase,
573
                _generated_rpc.RpcClientDefault,
574
                _generated_rpc.RpcClientBootstrap,
575
                _generated_rpc.RpcClientConfig):
576
  """RPC runner class.
577

578
  """
579
  def __init__(self, context):
580
    """Initialized the RPC runner.
581

582
    @type context: C{masterd.GanetiContext}
583
    @param context: Ganeti context
584

585
    """
586
    self._cfg = context.cfg
587

    
588
    encoders = _ENCODERS.copy()
589

    
590
    # Add encoders requiring configuration object
591
    encoders.update({
592
      rpc_defs.ED_INST_DICT: self._InstDict,
593
      rpc_defs.ED_INST_DICT_HVP_BEP: self._InstDictHvpBep,
594
      rpc_defs.ED_INST_DICT_OSP: self._InstDictOsp,
595
      })
596

    
597
    # Resolver using configuration
598
    resolver = compat.partial(_NodeConfigResolver, self._cfg.GetNodeInfo,
599
                              self._cfg.GetAllNodesInfo)
600

    
601
    # Pylint doesn't recognize multiple inheritance properly, see
602
    # <http://www.logilab.org/ticket/36586> and
603
    # <http://www.logilab.org/ticket/35642>
604
    # pylint: disable=W0233
605
    _RpcClientBase.__init__(self, resolver, encoders.get,
606
                            lock_monitor_cb=context.glm.AddToLockMonitor)
607
    _generated_rpc.RpcClientConfig.__init__(self)
608
    _generated_rpc.RpcClientBootstrap.__init__(self)
609
    _generated_rpc.RpcClientDefault.__init__(self)
610

    
611
  def _InstDict(self, instance, hvp=None, bep=None, osp=None):
612
    """Convert the given instance to a dict.
613

614
    This is done via the instance's ToDict() method and additionally
615
    we fill the hvparams with the cluster defaults.
616

617
    @type instance: L{objects.Instance}
618
    @param instance: an Instance object
619
    @type hvp: dict or None
620
    @param hvp: a dictionary with overridden hypervisor parameters
621
    @type bep: dict or None
622
    @param bep: a dictionary with overridden backend parameters
623
    @type osp: dict or None
624
    @param osp: a dictionary with overridden os parameters
625
    @rtype: dict
626
    @return: the instance dict, with the hvparams filled with the
627
        cluster defaults
628

629
    """
630
    idict = instance.ToDict()
631
    cluster = self._cfg.GetClusterInfo()
632
    idict["hvparams"] = cluster.FillHV(instance)
633
    if hvp is not None:
634
      idict["hvparams"].update(hvp)
635
    idict["beparams"] = cluster.FillBE(instance)
636
    if bep is not None:
637
      idict["beparams"].update(bep)
638
    idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams)
639
    if osp is not None:
640
      idict["osparams"].update(osp)
641
    for nic in idict["nics"]:
642
      nic['nicparams'] = objects.FillDict(
643
        cluster.nicparams[constants.PP_DEFAULT],
644
        nic['nicparams'])
645
    return idict
646

    
647
  def _InstDictHvpBep(self, (instance, hvp, bep)):
648
    """Wrapper for L{_InstDict}.
649

650
    """
651
    return self._InstDict(instance, hvp=hvp, bep=bep)
652

    
653
  def _InstDictOsp(self, (instance, osparams)):
654
    """Wrapper for L{_InstDict}.
655

656
    """
657
    return self._InstDict(instance, osp=osparams)
658

    
659

    
660
class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue):
661
  """RPC wrappers for job queue.
662

663
  """
664
  def __init__(self, context, address_list):
665
    """Initializes this class.
666

667
    """
668
    if address_list is None:
669
      resolver = _SsconfResolver
670
    else:
671
      # Caller provided an address list
672
      resolver = _StaticResolver(address_list)
673

    
674
    _RpcClientBase.__init__(self, resolver, _ENCODERS.get,
675
                            lock_monitor_cb=context.glm.AddToLockMonitor)
676
    _generated_rpc.RpcClientJobQueue.__init__(self)
677

    
678

    
679
class BootstrapRunner(_RpcClientBase, _generated_rpc.RpcClientBootstrap):
680
  """RPC wrappers for bootstrapping.
681

682
  """
683
  def __init__(self):
684
    """Initializes this class.
685

686
    """
687
    _RpcClientBase.__init__(self, _SsconfResolver, _ENCODERS.get)
688
    _generated_rpc.RpcClientBootstrap.__init__(self)
689

    
690

    
691
class ConfigRunner(_RpcClientBase, _generated_rpc.RpcClientConfig):
692
  """RPC wrappers for L{config}.
693

694
  """
695
  def __init__(self, context, address_list):
696
    """Initializes this class.
697

698
    """
699
    if context:
700
      lock_monitor_cb = context.glm.AddToLockMonitor
701
    else:
702
      lock_monitor_cb = None
703

    
704
    if address_list is None:
705
      resolver = _SsconfResolver
706
    else:
707
      # Caller provided an address list
708
      resolver = _StaticResolver(address_list)
709

    
710
    _RpcClientBase.__init__(self, resolver, _ENCODERS.get,
711
                            lock_monitor_cb=lock_monitor_cb)
712
    _generated_rpc.RpcClientConfig.__init__(self)