1 # Copyright 2011-2013 GRNET S.A. All rights reserved.
3 # Redistribution and use in source and binary forms, with or
4 # without modification, are permitted provided that the following
7 # 1. Redistributions of source code must retain the above
8 # copyright notice, this list of conditions and the following
11 # 2. Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following
13 # disclaimer in the documentation and/or other materials
14 # provided with the distribution.
16 # THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
17 # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
20 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26 # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 # POSSIBILITY OF SUCH DAMAGE.
29 # The views and conclusions contained in the software and
30 # documentation are those of the authors and should not be
31 # interpreted as representing official policies, either expressed
32 # or implied, of GRNET S.A.
34 from threading import enumerate as activethreads
37 from hashlib import new as newhashlib
39 from StringIO import StringIO
41 from binascii import hexlify
43 from kamaki.clients import SilentEvent, sendlog
44 from kamaki.clients.pithos.rest_api import PithosRestClient
45 from kamaki.clients.storage import ClientError
46 from kamaki.clients.utils import path4url, filter_in
49 def _pithos_hash(block, blockhash):
50 h = newhashlib(blockhash)
51 h.update(block.rstrip('\x00'))
55 def _range_up(start, end, a_range):
57 (rstart, rend) = a_range.split('-')
58 (rstart, rend) = (int(rstart), int(rend))
59 if rstart > end or rend < start:
68 class PithosClient(PithosRestClient):
69 """Synnefo Pithos+ API client"""
71 def __init__(self, base_url, token, account=None, container=None):
72 super(PithosClient, self).__init__(base_url, token, account, container)
74 def purge_container(self, container=None):
75 """Delete an empty container and destroy associated blocks
77 cnt_back_up = self.container
79 self.container = container or cnt_back_up
80 self.container_delete(until=unicode(time()))
82 self.container = cnt_back_up
84 def upload_object_unchunked(
89 content_encoding=None,
90 content_disposition=None,
95 :param obj: (str) remote object path
97 :param f: open file descriptor
99 :param withHashFile: (bool)
101 :param size: (int) size of data to upload
105 :param content_encoding: (str)
107 :param content_disposition: (str)
109 :param content_type: (str)
111 :param sharing: {'read':[user and/or grp names],
112 'write':[usr and/or grp names]}
114 :param public: (bool)
116 :returns: (dict) created object metadata
118 self._assert_container()
124 data = json.dumps(json.loads(data))
126 raise ClientError('"%s" is not json-formated' % f.name, 1)
128 msg = '"%s" is not a valid hashmap file' % f.name
129 raise ClientError(msg, 1)
132 data = f.read(size) if size else f.read()
137 content_encoding=content_encoding,
138 content_disposition=content_disposition,
139 content_type=content_type,
145 def create_object_by_manifestation(
148 content_encoding=None,
149 content_disposition=None,
154 :param obj: (str) remote object path
158 :param content_encoding: (str)
160 :param content_disposition: (str)
162 :param content_type: (str)
164 :param sharing: {'read':[user and/or grp names],
165 'write':[usr and/or grp names]}
167 :param public: (bool)
169 :returns: (dict) created object metadata
171 self._assert_container()
176 content_encoding=content_encoding,
177 content_disposition=content_disposition,
178 content_type=content_type,
181 manifest='%s/%s' % (self.container, obj))
184 # upload_* auxiliary methods
185 def _put_block_async(self, data, hash, upload_gen=None):
186 event = SilentEvent(method=self._put_block, data=data, hash=hash)
190 def _put_block(self, data, hash):
191 r = self.container_post(
193 content_type='application/octet-stream',
194 content_length=len(data),
197 assert r.json[0] == hash, 'Local hash does not match server'
199 def _get_file_block_info(self, fileobj, size=None, cache=None):
201 :param fileobj: (file descriptor) source
203 :param size: (int) size of data to upload from source
205 :param cache: (dict) if provided, cache container info response to
206 avoid redundant calls
208 if isinstance(cache, dict):
210 meta = cache[self.container]
212 meta = self.get_container_info()
213 cache[self.container] = meta
215 meta = self.get_container_info()
216 blocksize = int(meta['x-container-block-size'])
217 blockhash = meta['x-container-block-hash']
218 size = size if size is not None else fstat(fileobj.fileno()).st_size
219 nblocks = 1 + (size - 1) // blocksize
220 return (blocksize, blockhash, size, nblocks)
222 def _create_or_get_missing_hashes(
229 if_etag_not_match=None,
230 content_encoding=None,
231 content_disposition=None,
239 content_type=content_type,
241 if_etag_match=if_etag_match,
242 if_etag_not_match=if_etag_not_match,
243 content_encoding=content_encoding,
244 content_disposition=content_disposition,
245 permissions=permissions,
248 return (None if r.status_code == 201 else r.json), r.headers
250 def _calculate_blocks_for_upload(
251 self, blocksize, blockhash, size, nblocks, hashes, hmap, fileobj,
255 hash_gen = hash_cb(nblocks)
258 for i in range(nblocks):
259 block = fileobj.read(min(blocksize, size - offset))
261 hash = _pithos_hash(block, blockhash)
263 hmap[hash] = (offset, bytes)
267 msg = 'Failed to calculate uploaded blocks:'
268 ' Offset and object size do not match'
269 assert offset == size, msg
271 def _upload_missing_blocks(self, missing, hmap, fileobj, upload_gen=None):
272 """upload missing blocks asynchronously"""
274 self._init_thread_limit()
279 offset, bytes = hmap[hash]
281 data = fileobj.read(bytes)
282 r = self._put_block_async(data, hash, upload_gen)
284 unfinished = self._watch_thread_limit(flying)
285 for thread in set(flying).difference(unfinished):
287 failures.append(thread)
290 ClientError) and thread.exception.status == 502:
291 self.POOLSIZE = self._thread_limit
292 elif thread.isAlive():
293 flying.append(thread)
301 for thread in flying:
304 failures.append(thread)
311 return [failure.kwargs['hash'] for failure in failures]
321 content_encoding=None,
322 content_disposition=None,
326 container_info_cache=None):
327 """Upload an object using multiple connections (threads)
329 :param obj: (str) remote object path
331 :param f: open file descriptor (rb)
333 :param hash_cb: optional progress.bar object for calculating hashes
335 :param upload_cb: optional progress.bar object for uploading
339 :param if_etag_match: (str) Push that value to if-match header at file
342 :param if_not_exist: (bool) If true, the file will be uploaded ONLY if
343 it does not exist remotely, otherwise the operation will fail.
344 Involves the case of an object with the same path is created while
345 the object is being uploaded.
347 :param content_encoding: (str)
349 :param content_disposition: (str)
351 :param content_type: (str)
353 :param sharing: {'read':[user and/or grp names],
354 'write':[usr and/or grp names]}
356 :param public: (bool)
358 :param container_info_cache: (dict) if given, avoid redundant calls to
359 server for container info (block size and hash information)
361 self._assert_container()
365 blocksize, blockhash, size, nblocks) = self._get_file_block_info(
366 f, size, container_info_cache)
367 (hashes, hmap, offset) = ([], {}, 0)
369 content_type = 'application/octet-stream'
371 self._calculate_blocks_for_upload(
378 hashmap = dict(bytes=size, hashes=hashes)
379 missing, obj_headers = self._create_or_get_missing_hashes(
381 content_type=content_type,
383 if_etag_match=if_etag_match,
384 if_etag_not_match='*' if if_not_exist else None,
385 content_encoding=content_encoding,
386 content_disposition=content_disposition,
394 upload_gen = upload_cb(len(missing))
395 for i in range(len(missing), len(hashmap['hashes']) + 1):
406 sendlog.info('%s blocks missing' % len(missing))
407 num_of_blocks = len(missing)
408 missing = self._upload_missing_blocks(
414 if num_of_blocks == len(missing):
417 num_of_blocks = len(missing)
422 '%s blocks failed to upload' % len(missing),
424 except KeyboardInterrupt:
425 sendlog.info('- - - wait for threads to finish')
426 for thread in activethreads():
434 content_type=content_type,
435 if_etag_match=if_etag_match,
436 if_etag_not_match='*' if if_not_exist else None,
444 # download_* auxiliary methods
445 def _get_remote_blocks_info(self, obj, **restargs):
446 #retrieve object hashmap
447 myrange = restargs.pop('data_range', None)
448 hashmap = self.get_object_hashmap(obj, **restargs)
449 restargs['data_range'] = myrange
450 blocksize = int(hashmap['block_size'])
451 blockhash = hashmap['block_hash']
452 total_size = hashmap['bytes']
453 #assert total_size/blocksize + 1 == len(hashmap['hashes'])
455 for i, h in enumerate(hashmap['hashes']):
456 # map_dict[h] = i CHAGE
458 map_dict[h].append(i)
461 return (blocksize, blockhash, total_size, hashmap['hashes'], map_dict)
463 def _dump_blocks_sync(
464 self, obj, remote_hashes, blocksize, total_size, dst, range,
466 for blockid, blockhash in enumerate(remote_hashes):
468 start = blocksize * blockid
469 is_last = start + blocksize > total_size
470 end = (total_size - 1) if is_last else (start + blocksize - 1)
471 (start, end) = _range_up(start, end, range)
472 args['data_range'] = 'bytes=%s-%s' % (start, end)
473 r = self.object_get(obj, success=(200, 206), **args)
478 def _get_block_async(self, obj, **args):
479 event = SilentEvent(self.object_get, obj, success=(200, 206), **args)
483 def _hash_from_file(self, fp, start, size, blockhash):
485 block = fp.read(size)
486 h = newhashlib(blockhash)
487 h.update(block.strip('\x00'))
488 return hexlify(h.digest())
490 def _thread2file(self, flying, blockids, local_file, offset=0, **restargs):
491 """write the results of a greenleted rest call to a file
493 :param offset: the offset of the file up to blocksize
494 - e.g. if the range is 10-100, all blocks will be written to
497 for key, g in flying.items():
502 block = g.value.content
503 for block_start in blockids[key]:
504 local_file.seek(block_start + offset)
505 local_file.write(block)
511 def _dump_blocks_async(
512 self, obj, remote_hashes, blocksize, total_size, local_file,
513 blockhash=None, resume=False, filerange=None, **restargs):
514 file_size = fstat(local_file.fileno()).st_size if resume else 0
516 blockid_dict = dict()
518 if filerange is not None:
519 rstart = int(filerange.split('-')[0])
520 offset = rstart if blocksize > rstart else rstart % blocksize
522 self._init_thread_limit()
523 for block_hash, blockids in remote_hashes.items():
524 blockids = [blk * blocksize for blk in blockids]
525 unsaved = [blk for blk in blockids if not (
526 blk < file_size and block_hash == self._hash_from_file(
527 local_file, blk, blocksize, blockhash))]
528 self._cb_next(len(blockids) - len(unsaved))
531 self._watch_thread_limit(flying.values())
533 flying, blockid_dict, local_file, offset,
535 end = total_size - 1 if (
536 key + blocksize > total_size) else key + blocksize - 1
537 start, end = _range_up(key, end, filerange)
541 restargs['async_headers'] = {
542 'Range': 'bytes=%s-%s' % (start, end)}
543 flying[key] = self._get_block_async(obj, **restargs)
544 blockid_dict[key] = unsaved
546 for thread in flying.values():
548 self._thread2file(flying, blockid_dict, local_file, offset, **restargs)
558 if_modified_since=None,
559 if_unmodified_since=None):
560 """Download an object (multiple connections, random blocks)
562 :param obj: (str) remote object path
564 :param dst: open file descriptor (wb+)
566 :param download_cb: optional progress.bar object for downloading
568 :param version: (str) file version
570 :param resume: (bool) if set, preserve already downloaded file parts
572 :param range_str: (str) from, to are file positions (int) in bytes
574 :param if_match: (str)
576 :param if_none_match: (str)
578 :param if_modified_since: (str) formated date
580 :param if_unmodified_since: (str) formated date"""
583 data_range=None if range_str is None else 'bytes=%s' % range_str,
585 if_none_match=if_none_match,
586 if_modified_since=if_modified_since,
587 if_unmodified_since=if_unmodified_since)
594 remote_hashes) = self._get_remote_blocks_info(obj, **restargs)
595 assert total_size >= 0
598 self.progress_bar_gen = download_cb(len(hash_list))
602 self._dump_blocks_sync(
611 self._dump_blocks_async(
622 dst.truncate(total_size)
626 def download_to_string(
633 if_modified_since=None,
634 if_unmodified_since=None):
635 """Download an object to a string (multiple connections). This method
636 uses threads for http requests, but stores all content in memory.
638 :param obj: (str) remote object path
640 :param download_cb: optional progress.bar object for downloading
642 :param version: (str) file version
644 :param range_str: (str) from, to are file positions (int) in bytes
646 :param if_match: (str)
648 :param if_none_match: (str)
650 :param if_modified_since: (str) formated date
652 :param if_unmodified_since: (str) formated date
654 :returns: (str) the whole object contents
658 data_range=None if range_str is None else 'bytes=%s' % range_str,
660 if_none_match=if_none_match,
661 if_modified_since=if_modified_since,
662 if_unmodified_since=if_unmodified_since)
669 remote_hashes) = self._get_remote_blocks_info(obj, **restargs)
670 assert total_size >= 0
673 self.progress_bar_gen = download_cb(len(hash_list))
676 num_of_blocks = len(remote_hashes)
677 ret = [''] * num_of_blocks
678 self._init_thread_limit()
680 for blockid, blockhash in enumerate(remote_hashes):
681 start = blocksize * blockid
682 is_last = start + blocksize > total_size
683 end = (total_size - 1) if is_last else (start + blocksize - 1)
684 (start, end) = _range_up(start, end, range_str)
686 self._watch_thread_limit(flying.values())
687 flying[blockid] = self._get_block_async(obj, **restargs)
688 for runid, thread in flying.items():
689 if (blockid + 1) == num_of_blocks:
691 elif thread.isAlive():
694 raise thread.exception
695 ret[runid] = thread.value.content
700 #Command Progress Bar method
701 def _cb_next(self, step=1):
702 if hasattr(self, 'progress_bar_gen'):
704 for i in xrange(step):
705 self.progress_bar_gen.next()
709 def _complete_cb(self):
712 self.progress_bar_gen.next()
716 def get_object_hashmap(
721 if_modified_since=None,
722 if_unmodified_since=None,
725 :param obj: (str) remote object path
727 :param if_match: (str)
729 :param if_none_match: (str)
731 :param if_modified_since: (str) formated date
733 :param if_unmodified_since: (str) formated date
735 :param data_range: (str) from-to where from and to are integers
736 denoting file positions in bytes
745 if_etag_match=if_match,
746 if_etag_not_match=if_none_match,
747 if_modified_since=if_modified_since,
748 if_unmodified_since=if_unmodified_since,
749 data_range=data_range)
750 except ClientError as err:
751 if err.status == 304 or err.status == 412:
756 def set_account_group(self, group, usernames):
760 :param usernames: (list)
762 self.account_post(update=True, groups={group: usernames})
764 def del_account_group(self, group):
768 self.account_post(update=True, groups={group: []})
770 def get_account_info(self, until=None):
772 :param until: (str) formated date
776 r = self.account_head(until=until)
777 if r.status_code == 401:
778 raise ClientError("No authorization", status=401)
781 def get_account_quota(self):
786 self.get_account_info(),
787 'X-Account-Policy-Quota',
790 def get_account_versioning(self):
795 self.get_account_info(),
796 'X-Account-Policy-Versioning',
799 def get_account_meta(self, until=None):
801 :meta until: (str) formated date
805 return filter_in(self.get_account_info(until=until), 'X-Account-Meta-')
807 def get_account_group(self):
811 return filter_in(self.get_account_info(), 'X-Account-Group-')
813 def set_account_meta(self, metapairs):
815 :param metapairs: (dict) {key1:val1, key2:val2, ...}
817 assert(type(metapairs) is dict)
818 self.account_post(update=True, metadata=metapairs)
820 def del_account_meta(self, metakey):
822 :param metakey: (str) metadatum key
824 self.account_post(update=True, metadata={metakey: ''})
827 def set_account_quota(self, quota):
831 self.account_post(update=True, quota=quota)
834 def set_account_versioning(self, versioning):
836 "param versioning: (str)
838 self.account_post(update=True, versioning=versioning)
840 def list_containers(self):
844 r = self.account_get()
847 def del_container(self, until=None, delimiter=None):
849 :param until: (str) formated date
851 :param delimiter: (str) with / empty container
853 :raises ClientError: 404 Container does not exist
855 :raises ClientError: 409 Container is not empty
857 self._assert_container()
858 r = self.container_delete(
861 success=(204, 404, 409))
862 if r.status_code == 404:
864 'Container "%s" does not exist' % self.container,
866 elif r.status_code == 409:
868 'Container "%s" is not empty' % self.container,
871 def get_container_versioning(self, container=None):
873 :param container: (str)
877 cnt_back_up = self.container
879 self.container = container or cnt_back_up
881 self.get_container_info(),
882 'X-Container-Policy-Versioning')
884 self.container = cnt_back_up
886 def get_container_limit(self, container=None):
888 :param container: (str)
892 cnt_back_up = self.container
894 self.container = container or cnt_back_up
896 self.get_container_info(),
897 'X-Container-Policy-Quota')
899 self.container = cnt_back_up
901 def get_container_info(self, until=None):
903 :param until: (str) formated date
907 :raises ClientError: 404 Container not found
910 r = self.container_head(until=until)
911 except ClientError as err:
912 err.details.append('for container %s' % self.container)
916 def get_container_meta(self, until=None):
918 :param until: (str) formated date
923 self.get_container_info(until=until),
926 def get_container_object_meta(self, until=None):
928 :param until: (str) formated date
933 self.get_container_info(until=until),
934 'X-Container-Object-Meta')
936 def set_container_meta(self, metapairs):
938 :param metapairs: (dict) {key1:val1, key2:val2, ...}
940 assert(type(metapairs) is dict)
941 self.container_post(update=True, metadata=metapairs)
943 def del_container_meta(self, metakey):
945 :param metakey: (str) metadatum key
947 self.container_post(update=True, metadata={metakey: ''})
949 def set_container_limit(self, limit):
953 self.container_post(update=True, quota=limit)
955 def set_container_versioning(self, versioning):
957 :param versioning: (str)
959 self.container_post(update=True, versioning=versioning)
961 def del_object(self, obj, until=None, delimiter=None):
963 :param obj: (str) remote object path
965 :param until: (str) formated date
967 :param delimiter: (str)
969 self._assert_container()
970 self.object_delete(obj, until=until, delimiter=delimiter)
972 def set_object_meta(self, obj, metapairs):
974 :param obj: (str) remote object path
976 :param metapairs: (dict) {key1:val1, key2:val2, ...}
978 assert(type(metapairs) is dict)
979 self.object_post(obj, update=True, metadata=metapairs)
981 def del_object_meta(self, obj, metakey):
983 :param obj: (str) remote object path
985 :param metakey: (str) metadatum key
987 self.object_post(obj, update=True, metadata={metakey: ''})
989 def publish_object(self, obj):
991 :param obj: (str) remote object path
993 :returns: (str) access url
995 self.object_post(obj, update=True, public=True)
996 info = self.get_object_info(obj)
997 pref, sep, rest = self.base_url.partition('//')
998 base = rest.split('/')[0]
999 return '%s%s%s/%s' % (pref, sep, base, info['x-object-public'])
1001 def unpublish_object(self, obj):
1003 :param obj: (str) remote object path
1005 self.object_post(obj, update=True, public=False)
1007 def get_object_info(self, obj, version=None):
1009 :param obj: (str) remote object path
1011 :param version: (str)
1016 r = self.object_head(obj, version=version)
1018 except ClientError as ce:
1019 if ce.status == 404:
1020 raise ClientError('Object %s not found' % obj, status=404)
1023 def get_object_meta(self, obj, version=None):
1025 :param obj: (str) remote object path
1027 :param version: (str)
1032 self.get_object_info(obj, version=version),
1035 def get_object_sharing(self, obj):
1037 :param obj: (str) remote object path
1042 self.get_object_info(obj),
1047 perms = r['x-object-sharing'].split(';')
1052 raise ClientError('Incorrect reply format')
1053 (key, val) = perm.strip().split('=')
1057 def set_object_sharing(
1059 read_permition=False, write_permition=False):
1060 """Give read/write permisions to an object.
1062 :param obj: (str) remote object path
1064 :param read_permition: (list - bool) users and user groups that get
1065 read permition for this object - False means all previous read
1066 permissions will be removed
1068 :param write_perimition: (list - bool) of users and user groups to get
1069 write permition for this object - False means all previous write
1070 permissions will be removed
1073 perms = dict(read=read_permition or '', write=write_permition or '')
1074 self.object_post(obj, update=True, permissions=perms)
1076 def del_object_sharing(self, obj):
1078 :param obj: (str) remote object path
1080 self.set_object_sharing(obj)
1082 def append_object(self, obj, source_file, upload_cb=None):
1084 :param obj: (str) remote object path
1086 :param source_file: open file descriptor
1088 :param upload_db: progress.bar for uploading
1091 self._assert_container()
1092 meta = self.get_container_info()
1093 blocksize = int(meta['x-container-block-size'])
1094 filesize = fstat(source_file.fileno()).st_size
1095 nblocks = 1 + (filesize - 1) // blocksize
1098 upload_gen = upload_cb(nblocks)
1100 for i in range(nblocks):
1101 block = source_file.read(min(blocksize, filesize - offset))
1102 offset += len(block)
1106 content_range='bytes */*',
1107 content_type='application/octet-stream',
1108 content_length=len(block),
1114 def truncate_object(self, obj, upto_bytes):
1116 :param obj: (str) remote object path
1118 :param upto_bytes: max number of bytes to leave on file
1123 content_range='bytes 0-%s/*' % upto_bytes,
1124 content_type='application/octet-stream',
1125 object_bytes=upto_bytes,
1126 source_object=path4url(self.container, obj))
1128 def overwrite_object(self, obj, start, end, source_file, upload_cb=None):
1129 """Overwrite a part of an object from local source file
1131 :param obj: (str) remote object path
1133 :param start: (int) position in bytes to start overwriting from
1135 :param end: (int) position in bytes to stop overwriting at
1137 :param source_file: open file descriptor
1139 :param upload_db: progress.bar for uploading
1142 r = self.get_object_info(obj)
1143 rf_size = int(r['content-length'])
1144 if rf_size < int(start):
1146 'Range start exceeds file size',
1148 elif rf_size < int(end):
1150 'Range end exceeds file size',
1152 self._assert_container()
1153 meta = self.get_container_info()
1154 blocksize = int(meta['x-container-block-size'])
1155 filesize = fstat(source_file.fileno()).st_size
1156 datasize = int(end) - int(start) + 1
1157 nblocks = 1 + (datasize - 1) // blocksize
1160 upload_gen = upload_cb(nblocks)
1162 for i in range(nblocks):
1163 read_size = min(blocksize, filesize - offset, datasize - offset)
1164 block = source_file.read(read_size)
1168 content_type='application/octet-stream',
1169 content_length=len(block),
1170 content_range='bytes %s-%s/*' % (
1172 start + offset + len(block) - 1),
1174 offset += len(block)
1180 self, src_container, src_object, dst_container,
1182 source_version=None,
1183 source_account=None,
1188 :param src_container: (str) source container
1190 :param src_object: (str) source object path
1192 :param dst_container: (str) destination container
1194 :param dst_object: (str) destination object path
1196 :param source_version: (str) source object version
1198 :param source_account: (str) account to copy from
1200 :param public: (bool)
1202 :param content_type: (str)
1204 :param delimiter: (str)
1206 self._assert_account()
1207 self.container = dst_container
1208 src_path = path4url(src_container, src_object)
1210 dst_object or src_object,
1214 source_version=source_version,
1215 source_account=source_account,
1217 content_type=content_type,
1218 delimiter=delimiter)
1221 self, src_container, src_object, dst_container,
1223 source_account=None,
1224 source_version=None,
1229 :param src_container: (str) source container
1231 :param src_object: (str) source object path
1233 :param dst_container: (str) destination container
1235 :param dst_object: (str) destination object path
1237 :param source_account: (str) account to move from
1239 :param source_version: (str) source object version
1241 :param public: (bool)
1243 :param content_type: (str)
1245 :param delimiter: (str)
1247 self._assert_account()
1248 self.container = dst_container
1249 dst_object = dst_object or src_object
1250 src_path = path4url(src_container, src_object)
1256 source_account=source_account,
1257 source_version=source_version,
1259 content_type=content_type,
1260 delimiter=delimiter)
1262 def get_sharing_accounts(self, limit=None, marker=None, *args, **kwargs):
1263 """Get accounts that share with self.account
1267 :param marker: (str)
1271 self._assert_account()
1273 self.set_param('format', 'json')
1274 self.set_param('limit', limit, iff=limit is not None)
1275 self.set_param('marker', marker, iff=marker is not None)
1278 success = kwargs.pop('success', (200, 204))
1279 r = self.get(path, *args, success=success, **kwargs)
1282 def get_object_versionlist(self, obj):
1284 :param obj: (str) remote object path
1288 self._assert_container()
1289 r = self.object_get(obj, format='json', version='list')
1290 return r.json['versions']