Merge commit 'v0.9.0' into packaging
authorKostas Papadimitriou <kpap@grnet.gr>
Wed, 22 Feb 2012 10:36:13 +0000 (12:36 +0200)
committerKostas Papadimitriou <kpap@grnet.gr>
Wed, 22 Feb 2012 10:36:13 +0000 (12:36 +0200)
Conflicts:
pithos/__init__.py
pithos/api/management/commands/__init__.py
pithos/backends/lib/rabbitmq/__init__.py
pithos/backends/lib/rabbitmq/queue.py
pithos/lib/user.py
pithos/middleware/user.py
pithos/settings.py
pithos/ui/__init__.py
pithos/ui/urls.py
pithos/ui/views.py
snf-pithos-app/pithos/api/settings.py
snf-pithos-app/pithos/middleware/log.py
snf-pithos-app/pithos/middleware/user.py
snf-pithos-app/pithos/ui/settings.py
snf-pithos-app/pithos/ui/templates/feedback_mail.txt
snf-pithos-app/pithos/ui/views.py
snf-pithos-lib/pithos/lib/__init__.py
snf-pithos-tools/pithos/__init__.py
snf-pithos-tools/pithos/tools/dispatcher.py

48 files changed:
README.upgrade
docs/source/adminguide.rst
docs/source/devguide.rst
htdocs/index.html
htdocs/okeanos.html [new file with mode: 0644]
htdocs/site_media/css/site.css
htdocs/site_media/images/okeanos-logo-alpha.png [new file with mode: 0644]
snf-pithos-app/pithos/api/functions.py
snf-pithos-app/pithos/api/management/__init__.py [new file with mode: 0644]
snf-pithos-app/pithos/api/management/commands/__init__.py [new file with mode: 0644]
snf-pithos-app/pithos/api/management/commands/storagequota.py [new file with mode: 0644]
snf-pithos-app/pithos/api/public.py
snf-pithos-app/pithos/api/settings.py
snf-pithos-app/pithos/api/synnefo_settings.py [new file with mode: 0644]
snf-pithos-app/pithos/api/urls.py
snf-pithos-app/pithos/api/util.py
snf-pithos-app/pithos/middleware/__init__.py
snf-pithos-app/pithos/middleware/log.py
snf-pithos-app/pithos/ui/templates/feedback_mail.txt [deleted file]
snf-pithos-app/pithos/urls.py
snf-pithos-backend/pithos/backends/base.py
snf-pithos-backend/pithos/backends/lib/hashfiler/blocker.py
snf-pithos-backend/pithos/backends/lib/hashfiler/mapper.py
snf-pithos-backend/pithos/backends/lib/hashfiler/store.py
snf-pithos-backend/pithos/backends/lib/rabbitmq/__init__.py [moved from snf-pithos-app/pithos/ui/views.py with 60% similarity]
snf-pithos-backend/pithos/backends/lib/rabbitmq/queue.py [new file with mode: 0644]
snf-pithos-backend/pithos/backends/lib/sqlalchemy/__init__.py
snf-pithos-backend/pithos/backends/lib/sqlalchemy/groups.py
snf-pithos-backend/pithos/backends/lib/sqlalchemy/node.py
snf-pithos-backend/pithos/backends/lib/sqlalchemy/permissions.py
snf-pithos-backend/pithos/backends/lib/sqlalchemy/public.py
snf-pithos-backend/pithos/backends/lib/sqlalchemy/xfeatures.py
snf-pithos-backend/pithos/backends/lib/sqlite/__init__.py
snf-pithos-backend/pithos/backends/lib/sqlite/node.py
snf-pithos-backend/pithos/backends/lib/sqlite/permissions.py
snf-pithos-backend/pithos/backends/lib/sqlite/public.py
snf-pithos-backend/pithos/backends/lib/sqlite/xfeatures.py
snf-pithos-backend/pithos/backends/modular.py
snf-pithos-lib/pithos/lib/client.py
snf-pithos-lib/pithos/lib/dictconfig.py [new file with mode: 0644]
snf-pithos-lib/pithos/lib/queue.py [new file with mode: 0755]
snf-pithos-lib/pithos/lib/user.py [moved from snf-pithos-app/pithos/middleware/user.py with 65% similarity]
snf-pithos-lib/pithos/lib/util.py
snf-pithos-tools/pithos/tools/dispatcher.py
snf-pithos-tools/pithos/tools/fs.py
snf-pithos-tools/pithos/tools/sh.py
snf-pithos-tools/pithos/tools/sync.py
snf-pithos-tools/pithos/tools/test.py

index 3560795..896e637 100644 (file)
@@ -63,3 +63,7 @@ UPGRADE
     mysql> alter table versions add column `uuid` varchar(64) DEFAULT '' NOT NULL after `muser`;
     mysql> update versions v, tmp_uuids u set v.`uuid` = u.`uuid` where v.`node` = u.`node`;
     mysql> create index idx_versions_node_uuid on versions(uuid);
+
+0.8.2 -> 0.9.0
+--------------
+* No upgrade path provided. Please reinstall and reconfigure.
index 98434c9..4e8ae5e 100644 (file)
@@ -64,6 +64,16 @@ Edit ``/etc/apache2/sites-available/pithos`` (change the ``ServerName`` directiv
     CustomLog ${APACHE_LOG_DIR}/pithos.access.log combined
   </VirtualHost>
 
+To disable non-SSL connections, ``/etc/apache2/sites-available/pithos`` should be::
+
+  <VirtualHost *:80>
+    ServerAdmin webmaster@pithos.dev.grnet.gr
+    ServerName pithos.dev.grnet.gr
+
+    RewriteEngine On
+    RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI}
+  </VirtualHost>
+
 Edit ``/etc/apache2/sites-available/pithos-ssl`` (assuming files in ``/etc/ssl/private/pithos.dev.grnet.gr.key`` and ``/etc/ssl/certs/pithos.dev.grnet.gr.crt`` - change the ``ServerName`` directive)::
 
   <IfModule mod_ssl.c>
index 3df27dc..d034a65 100644 (file)
@@ -27,6 +27,8 @@ Document Revisions
 =========================  ================================
 Revision                   Description
 =========================  ================================
+0.9 (Feb 17, 2012)         Change permissions model.
+\                          Do not include user-defined metadata in account/container/object listings.
 0.8 (Jan 24, 2012)         Update allowed versioning values.
 \                          Change policy/meta formatting in JSON/XML replies.
 \                          Document that all non-ASCII characters in headers should be URL-encoded.
@@ -276,7 +278,7 @@ The reply is a list of container names. Account headers (as in a ``HEAD`` reques
 Cross-user requests are not allowed to use ``until`` and only include the account/container modification dates in the reply.
 
 If a ``format=xml`` or ``format=json`` argument is given, extended information on the containers will be returned, serialized in the chosen format.
-For each container, the information will include all container metadata (names will be in lower case and with hyphens replaced with underscores):
+For each container, the information will include all container metadata, except user-defined (names will be in lower case and with hyphens replaced with underscores):
 
 ===========================  ============================
 Name                         Description
@@ -286,8 +288,7 @@ count                        The number of objects inside the container
 bytes                        The total size of the objects inside the container
 last_modified                The last container modification date (regardless of ``until``)
 x_container_until_timestamp  The last container modification date until the timestamp provided
-x_container_policy_*         Container behavior and limits
-x_container_meta_*           Optional user defined metadata
+x_container_policy           Container behavior and limits
 ===========================  ============================
 
 Example ``format=json`` reply:
@@ -298,8 +299,7 @@ Example ``format=json`` reply:
     "bytes": 62452,
     "count": 8374,
     "last_modified": "2011-12-02T08:10:41.565891+00:00",
-    "x_container_policy": {"quota": "53687091200", "versioning": "auto"},
-    "x_container_meta": {"a": "b", "1": "2"}}, ...]
+    "x_container_policy": {"quota": "53687091200", "versioning": "auto"}}, ...]
 
 Example ``format=xml`` reply:
 
@@ -316,15 +316,11 @@ Example ``format=xml`` reply:
         <key>quota</key><value>53687091200</value>
         <key>versioning</key><value>auto</value>
       </x_container_policy>
-      <x_container_meta>
-        <key>a</key><value>b</value>
-        <key>1</key><value>2</value>
-      </x_container_meta>
     </container>
     <container>...</container>
   </account>
 
-For more examples of container details returned in JSON/XML formats refer to the OOS API documentation. In addition to the OOS API, Pithos returns all fields. Policy and metadata values are grouped and returned as key-value pairs.
+For more examples of container details returned in JSON/XML formats refer to the OOS API documentation. In addition to the OOS API, Pithos returns policy fields, grouped as key-value pairs.
 
 ===========================  =====================
 Return Code                  Description
@@ -470,7 +466,7 @@ Last-Modified                The last container modification date
 ===========================  ===============================
 
 If a ``format=xml`` or ``format=json`` argument is given, extended information on the objects will be returned, serialized in the chosen format.
-For each object, the information will include all object metadata (names will be in lower case and with hyphens replaced with underscores):
+For each object, the information will include all object metadata, except user-defined (names will be in lower case and with hyphens replaced with underscores). User-defined metadata includes ``X-Object-Meta-*``, ``X-Object-Manifest``, ``Content-Disposition`` and ``Content-Encoding`` keys. Also, sharing directives will only be included with the actual shared objects (inherited permissions are not calculated):
 
 ==========================  ======================================
 Name                        Description
@@ -479,23 +475,18 @@ name                        The name of the object
 hash                        The ETag of the object
 bytes                       The size of the object
 content_type                The MIME content type of the object
-content_encoding            The encoding of the object (optional)
-content-disposition         The presentation style of the object (optional)
 last_modified               The last object modification date (regardless of version)
 x_object_hash               The Merkle hash
 x_object_uuid               The object's UUID
 x_object_version            The object's version identifier
 x_object_version_timestamp  The object's version timestamp
 x_object_modified_by        The user that committed the object's version
-x_object_manifest           Object parts prefix in ``<container>/<object>`` form (optional)
 x_object_sharing            Object permissions (optional)
-x_object_shared_by          Object inheriting permissions (optional)
 x_object_allowed_to         Allowed actions on object (optional)
 x_object_public             Object's publicly accessible URI (optional)
-x_object_meta_*             Optional user defined metadata
 ==========================  ======================================
 
-Sharing metadata will only be returned if there is no ``until`` parameter defined.
+Sharing metadata and last modification timestamp will only be returned if there is no ``until`` parameter defined.
 
 Extended replies may also include virtual directory markers in separate sections of the ``json`` or ``xml`` results.
 Virtual directory markers are only included when ``delimiter`` is explicitly set. They correspond to the substrings up to and including the first occurrence of the delimiter.
@@ -511,7 +502,6 @@ Example ``format=json`` reply:
     "hash": "d41d8cd98f00b204e9800998ecf8427e",
     "content_type": "application/octet-stream",
     "last_modified": "2011-12-02T08:10:41.565891+00:00",
-    "x_object_meta": {"asdf": "qwerty"},
     "x_object_hash": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
     "x_object_uuid": "8ed9af1b-c948-4bb6-82b0-48344f5c822c",
     "x_object_version": 98,
@@ -530,9 +520,6 @@ Example ``format=xml`` reply:
       <hash>d41d8cd98f00b204e9800998ecf8427e</hash>
       <content_type>application/octet-stream</content_type>
       <last_modified>2011-12-02T08:10:41.565891+00:00</last_modified>
-      <x_object_meta>
-        <key>asdf</key><value>qwerty</value>
-      </x_object_meta>
       <x_object_hash>e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855</x_object_hash>
       <x_object_uuid>8ed9af1b-c948-4bb6-82b0-48344f5c822c</x_object_uuid>
       <x_object_version>98</x_object_version>
@@ -542,7 +529,7 @@ Example ``format=xml`` reply:
     <object>...</object>
   </container>
 
-For more examples of container details returned in JSON/XML formats refer to the OOS API documentation. In addition to the OOS API, Pithos returns all fields. Metadata values are grouped and returned as key-value pairs.
+For more examples of container details returned in JSON/XML formats refer to the OOS API documentation. In addition to the OOS API, Pithos returns more fields that should help with synchronization.
 
 ===========================  ===============================
 Return Code                  Description
@@ -863,7 +850,7 @@ The ``X-Object-Sharing`` header may include either a ``read=...`` comma-separate
 Return Code                     Description
 ==============================  ==============================
 201 (Created)                   The object has been created
-409 (Conflict)                  The object can not be created from the provided hashmap, or there are conflicting permissions (a list of missing hashes, or a list of conflicting sharing paths will be included in the reply)
+409 (Conflict)                  The object can not be created from the provided hashmap (a list of missing hashes will be included in the reply)
 411 (Length Required)           Missing ``Content-Length`` or ``Content-Type`` in the request
 413 (Request Entity Too Large)  Insufficient quota to complete the request
 422 (Unprocessable Entity)      The MD5 checksum of the data written to the storage system does not match the (optionally) supplied ETag value
@@ -913,7 +900,6 @@ X-Object-Version            The object's new version
 Return Code                     Description
 ==============================  ==============================
 201 (Created)                   The object has been created
-409 (Conflict)                  There are conflicting permissions (a list of conflicting sharing paths will be included in the reply)
 413 (Request Entity Too Large)  Insufficient quota to complete the request
 ==============================  ==============================
 
@@ -991,7 +977,6 @@ Return Code                     Description
 ==============================  ==============================
 202 (Accepted)                  The request has been accepted (not a data update)
 204 (No Content)                The request succeeded (data updated)
-409 (Conflict)                  There are conflicting permissions (a list of conflicting sharing paths will be included in the reply)
 411 (Length Required)           Missing ``Content-Length`` in the request
 413 (Request Entity Too Large)  Insufficient quota to complete the request
 416 (Range Not Satisfiable)     The supplied range is invalid
@@ -1004,7 +989,7 @@ The ``POST`` method can also be used for creating an object via a standard HTML
     <input type="submit">
   </form>
 
-This will create/override the object with the given name, as if using ``PUT``. The ``Content-Type`` of the object will be set to the value of the corresponding header sent in the part of the request containing the data (usually, automatically handled by the browser). Metadata, sharing and other object attributes can not be set this way.
+This will create/override the object with the given name, as if using ``PUT``. The ``Content-Type`` of the object will be set to the value of the corresponding header sent in the part of the request containing the data (usually, automatically handled by the browser). Metadata, sharing and other object attributes can not be set this way. The response will contain the object's ETag.
 
 ==========================  ===============================
 Reply Header Name           Value
@@ -1045,7 +1030,7 @@ Return Code                  Description
 Sharing and Public Objects
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-Read and write control in Pithos is managed by setting appropriate permissions with the ``X-Object-Sharing`` header. The permissions are applied using prefix-based inheritance. Thus, each set of authorization directives is applied to all objects sharing the same prefix with the object where the corresponding ``X-Object-Sharing`` header is defined. For simplicity, nested/overlapping permissions are not allowed. Setting ``X-Object-Sharing`` will fail, if the object is already "covered", or another object with a longer common-prefix name already has permissions. When retrieving an object, the ``X-Object-Shared-By`` header reports where it gets its permissions from. If not present, the object is the actual source of authorization directives.
+Read and write control in Pithos is managed by setting appropriate permissions with the ``X-Object-Sharing`` header. The permissions are applied using directory-based inheritance. A directory is an object with the corresponding content type. The default delimiter is ``/``. Thus, each set of authorization directives is applied to all objects in the directory object where the corresponding ``X-Object-Sharing`` header is defined. If there are nested/overlapping permissions, the closest to the object is applied. When retrieving an object, the ``X-Object-Shared-By`` header reports where it gets its permissions from. If not present, the object is the actual source of authorization directives.
 
 A user may ``GET`` another account or container. The result will include a limited reply, containing only the allowed containers or objects respectively. A top-level request with an authentication token, will return a list of allowed accounts, so the user can easily find out which other users share objects. The ``X-Object-Allowed-To`` header lists the actions allowed on an object, if it does not belong to the requesting user.
 
@@ -1077,7 +1062,7 @@ List of differences from the OOS API:
 * Headers ``X-Container-Block-*`` at the container level, exposing the underlying storage characteristics.
 * All metadata replies, at all levels, include latest modification information.
 * At all levels, a ``HEAD`` or ``GET`` request may use ``If-Modified-Since`` and ``If-Unmodified-Since`` headers.
-* Container/object lists include all associated metadata if the reply is of type JSON/XML. Some names are kept to their OOS API equivalents for compatibility.
+* Container/object lists include more fields if the reply is of type JSON/XML. Some names are kept to their OOS API equivalents for compatibility.
 * Option to include only shared containers/objects in listings.
 * Object metadata allowed, in addition to ``X-Object-Meta-*``: ``Content-Encoding``, ``Content-Disposition``, ``X-Object-Manifest``. These are all replaced with every update operation, except if using the ``update`` parameter (in which case individual keys can also be deleted). Deleting meta by providing empty values also works when copying/moving an object.
 * Multi-range object ``GET`` support as outlined in RFC2616.
@@ -1093,7 +1078,7 @@ List of differences from the OOS API:
 * Time-variant account/container listings via the ``until`` parameter.
 * Object versions - parameter ``version`` in ``HEAD``/``GET`` (list versions with ``GET``), ``X-Object-Version-*`` meta in replies, ``X-Source-Version`` in ``PUT``/``COPY``.
 * Sharing/publishing with ``X-Object-Sharing``, ``X-Object-Public`` at the object level. Cross-user operations are allowed - controlled by sharing directives. Available actions in cross-user requests are reported with ``X-Object-Allowed-To``. Permissions may include groups defined with ``X-Account-Group-*`` at the account level. These apply to the object - not its versions.
-* Support for prefix-based inheritance when enforcing permissions. Parent object carrying the authorization directives is reported in ``X-Object-Shared-By``.
+* Support for directory-based inheritance when enforcing permissions. Parent object carrying the authorization directives is reported in ``X-Object-Shared-By``.
 * Copy and move between accounts with ``X-Source-Account`` and ``Destination-Account`` headers.
 * Large object support with ``X-Object-Manifest``.
 * Trace the user that created/modified an object with ``X-Object-Modified-By``.
index b1e3865..b1460f3 100644 (file)
   
   <link rel="stylesheet" href="site_media/css/site.css">
   <script src="site_media/js/libs/modernizr-1.7.min.js"></script>
+  <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js"></script>
+  <script>
+    var CLOUDBAR_ACTIVE_SERVICE = 'cloud';
+    var CLOUDBAR_LOCATION = "/im/static/im/cloudbar/";
+
+    $(document).ready(function(){
+      $.getScript(CLOUDBAR_LOCATION + 'cloudbar.js');
+    })
+  </script>
 </head>
 <body id="index">
     <div class="container">
@@ -48,7 +57,7 @@
     </div>
     <div class="inner-bottom">
         <a class="gotoapp" 
-            href="/login?next=/ui" 
+            href="/ui" 
             title="Enter Pithos+">Είσοδος</a>
     </div>
     <div class="downloads clearfix">
diff --git a/htdocs/okeanos.html b/htdocs/okeanos.html
new file mode 100644 (file)
index 0000000..a6cd4bd
--- /dev/null
@@ -0,0 +1,84 @@
+<!doctype html>
+<!--[if lt IE 7 ]> <html lang="en" class="no-js ie6"> <![endif]-->
+<!--[if IE 7 ]>    <html lang="en" class="no-js ie7"> <![endif]-->
+<!--[if IE 8 ]>    <html lang="en" class="no-js ie8"> <![endif]-->
+<!--[if (gte IE 9)|!(IE)]><!--> <html lang="en" class="no-js"> <!--<![endif]-->
+<head>
+    <meta charset="utf-8">
+
+  <!-- Always force latest IE rendering engine (even in intranet) & Chrome Frame
+       Remove this if you use the .htaccess -->
+  <meta http-equiv="X-UA-Compatible" content="chrome=1">
+
+  <title>~okeanos</title>
+  <meta name="description" content="Pithos+ GRNet Service">
+  <meta name="author" content="Kostas Papadimitriou <kpap@grnet.gr>">
+
+  <!--  Mobile viewport optimized: j.mp/bplateviewport -->
+  <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
+
+    <!-- Place favicon.ico & apple-touch-icon.png
+        in the root of your domain and delete these references -->
+  <link rel="shortcut icon" href="/favicon.ico">
+  <link rel="apple-touch-icon" href="/apple-touch-icon.png">
+  
+  <link rel="stylesheet" href="site_media/css/site.css">
+  <script src="site_media/js/libs/modernizr-1.7.min.js"></script>
+  <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js"></script>
+  <script>
+    var CLOUDBAR_ACTIVE_SERVICE = 'okeanos';
+    var CLOUDBAR_LOCATION = "/im/static/im/cloudbar/";
+
+    $(document).ready(function(){
+      $.getScript(CLOUDBAR_LOCATION + 'cloudbar.js');
+    })
+  </script>
+</head>
+<body id="index" style="background-position: 0px -200px">
+    <div class="container">
+        <div class="logo">
+            <a href="#" title="Pithos+"><img 
+                src="site_media/images/okeanos-logo-alpha.png" 
+                alt="~okeanos" 
+                title="~okeanos" /></a>
+        </div>
+
+        <div class="inner box-round-mid box-shadow-mid clearfix">
+            <div class="cols clearfix">
+                <div class="main-content">
+
+    <div class="intro-text page text">
+        <p>
+        Η έκδοση alpha 2 της υπηρεσίας ~okeanos θα είναι σύντομα διαθέσιμη.<br /><br />
+        Αν έχετε λογαριασμό θα λάβετε ενημερωτικό μήνυμα στο email σας.<br />
+        Αν δεν έχετε, φτιάξτε έναν <a href="/im">τώρα</a>!
+        </p>
+    </div>
+</div>
+
+            </div>
+        </div>
+    </div>
+
+</div>
+</div>
+
+<div class="footer">
+    
+<div class="bottom-text">
+    <p>&copy; 2011 <a target="_blank" href="http://www.grnet.gr/">Εθνικό Δίκτυο Έρευνας και Τεχνολογίας</a></p>
+</div>
+</div>
+  
+              
+      <script src="site_media/js/libs/jquery-1.5.1.min.js"></script>
+      <script src="site_media/js/libs/jquery.cookie.js"></script>
+      <script src="site_media/js/intro-hover.js"></script>
+      <script src="site_media/js/main.js"></script>
+
+                        <!--[if lt IE 7 ]>
+      <script src="site_media/js/libs/dd_belatedpng.js"></script>
+      <script>DD_belatedPNG.fix('img, .png_bg');</script>
+      <![endif]-->
+</body>
+</html>
index 6d6e525..24d69e4 100644 (file)
@@ -136,7 +136,7 @@ body.alt {
 .container {
   width: 870px;
   margin: 0 auto;
-  margin-top: 30px;
+  margin-top: 20px;
 }
 .inner {
   margin-top: 2.5em;
@@ -2805,7 +2805,6 @@ body, html {
 }
 .container {
   width: 670px;
-  margin-top: 0;
 }
 .container .logo img {
   margin: 0;
diff --git a/htdocs/site_media/images/okeanos-logo-alpha.png b/htdocs/site_media/images/okeanos-logo-alpha.png
new file mode 100644 (file)
index 0000000..43d28d4
Binary files /dev/null and b/htdocs/site_media/images/okeanos-logo-alpha.png differ
index 30581b2..d90d5c9 100644 (file)
@@ -31,8 +31,7 @@
 # interpreted as representing official policies, either expressed
 # or implied, of GRNET S.A.
 
-import logging
-import hashlib
+from xml.dom import minidom
 
 from django.conf import settings
 from django.http import HttpResponse
@@ -40,8 +39,9 @@ from django.template.loader import render_to_string
 from django.utils import simplejson as json
 from django.utils.http import parse_etags
 from django.utils.encoding import smart_str
-from xml.dom import minidom
+from django.views.decorators.csrf import csrf_exempt
 
+from pithos.lib.user import get_user
 from pithos.lib.filter import parse_filters
 
 from pithos.api.faults import (Fault, NotModified, BadRequest, Unauthorized, Forbidden, ItemNotFound, Conflict,
@@ -53,12 +53,18 @@ from pithos.api.util import (json_encode_decimal, rename_meta_key, format_header
     copy_or_move_object, get_int_parameter, get_content_length, get_content_range, socket_read_iterator,
     SaveToBackendHandler, object_data_response, put_object_block, hashmap_md5, simple_list_response, api_method)
 from pithos.backends.base import NotAllowedError, QuotaError
+from pithos.api.settings import AUTHENTICATION_URL, AUTHENTICATION_USERS
+
+import logging
+import hashlib
 
 
 logger = logging.getLogger(__name__)
 
 
+@csrf_exempt
 def top_demux(request):
+    get_user(request, AUTHENTICATION_URL, AUTHENTICATION_USERS)
     if request.method == 'GET':
         if getattr(request, 'user', None) is not None:
             return account_list(request)
@@ -66,7 +72,9 @@ def top_demux(request):
     else:
         return method_not_allowed(request)
 
+@csrf_exempt
 def account_demux(request, v_account):
+    get_user(request, AUTHENTICATION_URL, AUTHENTICATION_USERS)
     if request.method == 'HEAD':
         return account_meta(request, v_account)
     elif request.method == 'POST':
@@ -76,7 +84,9 @@ def account_demux(request, v_account):
     else:
         return method_not_allowed(request)
 
+@csrf_exempt
 def container_demux(request, v_account, v_container):
+    get_user(request, AUTHENTICATION_URL, AUTHENTICATION_USERS)
     if request.method == 'HEAD':
         return container_meta(request, v_account, v_container)
     elif request.method == 'PUT':
@@ -90,7 +100,9 @@ def container_demux(request, v_account, v_container):
     else:
         return method_not_allowed(request)
 
+@csrf_exempt
 def object_demux(request, v_account, v_container, v_object):
+    get_user(request, AUTHENTICATION_URL, AUTHENTICATION_USERS)
     if request.method == 'HEAD':
         return object_meta(request, v_account, v_container, v_object)
     elif request.method == 'GET':
@@ -160,18 +172,13 @@ def account_list(request):
         if x == request.user_uniq:
             continue
         try:
-            meta = request.backend.get_account_meta(request.user_uniq, x, 'pithos')
+            meta = request.backend.get_account_meta(request.user_uniq, x, 'pithos', include_user_defined=False)
             groups = request.backend.get_account_groups(request.user_uniq, x)
         except NotAllowedError:
             raise Forbidden('Not allowed')
         else:
             rename_meta_key(meta, 'modified', 'last_modified')
             rename_meta_key(meta, 'until_timestamp', 'x_account_until_timestamp')
-            m = dict([(k[15:], v) for k, v in meta.iteritems() if k.startswith('X-Account-Meta-')])
-            for k in m:
-                del(meta['X-Account-Meta-' + k])
-            if m:
-                meta['X-Account-Meta'] = printable_header_dict(m)
             if groups:
                 meta['X-Account-Group'] = printable_header_dict(dict([(k, ','.join(v)) for k, v in groups.iteritems()]))
             account_meta.append(printable_header_dict(meta))
@@ -282,7 +289,7 @@ def container_list(request, v_account):
     for x in containers:
         try:
             meta = request.backend.get_container_meta(request.user_uniq, v_account,
-                                                        x, 'pithos', until)
+                                                        x, 'pithos', until, include_user_defined=False)
             policy = request.backend.get_container_policy(request.user_uniq,
                                                             v_account, x)
         except NotAllowedError:
@@ -292,11 +299,6 @@ def container_list(request, v_account):
         else:
             rename_meta_key(meta, 'modified', 'last_modified')
             rename_meta_key(meta, 'until_timestamp', 'x_container_until_timestamp')
-            m = dict([(k[17:], v) for k, v in meta.iteritems() if k.startswith('X-Container-Meta-')])
-            for k in m:
-                del(meta['X-Container-Meta-' + k])
-            if m:
-                meta['X-Container-Meta'] = printable_header_dict(m)
             if policy:
                 meta['X-Container-Policy'] = printable_header_dict(dict([(k, v) for k, v in policy.iteritems()]))
             container_meta.append(printable_header_dict(meta))
@@ -320,7 +322,7 @@ def container_meta(request, v_account, v_container):
     try:
         meta = request.backend.get_container_meta(request.user_uniq, v_account,
                                                     v_container, 'pithos', until)
-        meta['object_meta'] = request.backend.list_object_meta(request.user_uniq,
+        meta['object_meta'] = request.backend.list_container_meta(request.user_uniq,
                                                 v_account, v_container, 'pithos', until)
         policy = request.backend.get_container_policy(request.user_uniq, v_account,
                                                         v_container)
@@ -457,7 +459,7 @@ def object_list(request, v_account, v_container):
     try:
         meta = request.backend.get_container_meta(request.user_uniq, v_account,
                                                     v_container, 'pithos', until)
-        meta['object_meta'] = request.backend.list_object_meta(request.user_uniq,
+        meta['object_meta'] = request.backend.list_container_meta(request.user_uniq,
                                                 v_account, v_container, 'pithos', until)
         policy = request.backend.get_container_policy(request.user_uniq, v_account,
                                                         v_container)
@@ -509,16 +511,16 @@ def object_list(request, v_account, v_container):
     if 'shared' in request.GET:
         shared = True
     
-    try:
-        objects = request.backend.list_objects(request.user_uniq, v_account,
-                                    v_container, prefix, delimiter, marker,
-                                    limit, virtual, 'pithos', keys, shared, until)
-    except NotAllowedError:
-        raise Forbidden('Not allowed')
-    except NameError:
-        raise ItemNotFound('Container does not exist')
-    
     if request.serialization == 'text':
+        try:
+            objects = request.backend.list_objects(request.user_uniq, v_account,
+                                        v_container, prefix, delimiter, marker,
+                                        limit, virtual, 'pithos', keys, shared, until)
+        except NotAllowedError:
+            raise Forbidden('Not allowed')
+        except NameError:
+            raise ItemNotFound('Container does not exist')
+        
         if len(objects) == 0:
             # The cloudfiles python bindings expect 200 if json/xml.
             response.status_code = 204
@@ -527,43 +529,51 @@ def object_list(request, v_account, v_container):
         response.content = '\n'.join([x[0] for x in objects]) + '\n'
         return response
     
+    try:
+        objects = request.backend.list_object_meta(request.user_uniq, v_account,
+                                    v_container, prefix, delimiter, marker,
+                                    limit, virtual, 'pithos', keys, shared, until)
+        object_permissions = {}
+        object_public = {}
+        if until is None:
+            name_idx = len('/'.join((v_account, v_container, '')))
+            for x in request.backend.list_object_permissions(request.user_uniq,
+                                    v_account, v_container, prefix):
+                object = x[name_idx:]
+                object_permissions[object] = request.backend.get_object_permissions(
+                                    request.user_uniq, v_account, v_container, object)
+            for k, v in request.backend.list_object_public(request.user_uniq,
+                                    v_account, v_container, prefix).iteritems():
+                object_public[k[name_idx:]] = v
+    except NotAllowedError:
+        raise Forbidden('Not allowed')
+    except NameError:
+        raise ItemNotFound('Container does not exist')
+    
     object_meta = []
-    for x in objects:
-        if x[1] is None:
+    for meta in objects:
+        if len(meta) == 1:
             # Virtual objects/directories.
-            object_meta.append({'subdir': x[0]})
+            object_meta.append(meta)
         else:
-            try:
-                meta = request.backend.get_object_meta(request.user_uniq, v_account,
-                                                        v_container, x[0], 'pithos', x[1])
-                if until is None:
-                    permissions = request.backend.get_object_permissions(
-                                    request.user_uniq, v_account, v_container, x[0])
-                    public = request.backend.get_object_public(request.user_uniq,
-                                                v_account, v_container, x[0])
-                else:
-                    permissions = None
-                    public = None
-            except NotAllowedError:
-                raise Forbidden('Not allowed')
-            except NameError:
-                pass
+            rename_meta_key(meta, 'hash', 'x_object_hash') # Will be replaced by checksum.
+            rename_meta_key(meta, 'checksum', 'hash')
+            rename_meta_key(meta, 'type', 'content_type')
+            rename_meta_key(meta, 'uuid', 'x_object_uuid')
+            if until is not None and 'modified' in meta:
+                del(meta['modified'])
             else:
-                rename_meta_key(meta, 'hash', 'x_object_hash') # Will be replaced by ETag.
-                rename_meta_key(meta, 'ETag', 'hash')
-                rename_meta_key(meta, 'uuid', 'x_object_uuid')
                 rename_meta_key(meta, 'modified', 'last_modified')
-                rename_meta_key(meta, 'modified_by', 'x_object_modified_by')
-                rename_meta_key(meta, 'version', 'x_object_version')
-                rename_meta_key(meta, 'version_timestamp', 'x_object_version_timestamp')
-                m = dict([(k[14:], v) for k, v in meta.iteritems() if k.startswith('X-Object-Meta-')])
-                for k in m:
-                    del(meta['X-Object-Meta-' + k])
-                if m:
-                    meta['X-Object-Meta'] = printable_header_dict(m)
-                update_sharing_meta(request, permissions, v_account, v_container, x[0], meta)
+            rename_meta_key(meta, 'modified_by', 'x_object_modified_by')
+            rename_meta_key(meta, 'version', 'x_object_version')
+            rename_meta_key(meta, 'version_timestamp', 'x_object_version_timestamp')
+            permissions = object_permissions.get(meta['name'], None)
+            if permissions:
+                update_sharing_meta(request, permissions, v_account, v_container, meta['name'], meta)
+            public = object_public.get(meta['name'], None)
+            if public:
                 update_public_meta(public, meta)
-                object_meta.append(printable_header_dict(meta))
+            object_meta.append(printable_header_dict(meta))
     if request.serialization == 'xml':
         data = render_to_string('objects.xml', {'container': v_container, 'objects': object_meta})
     elif request.serialization  == 'json':
@@ -609,7 +619,7 @@ def object_meta(request, v_account, v_container, v_object):
         validate_matching_preconditions(request, meta)
     except NotModified:
         response = HttpResponse(status=304)
-        response['ETag'] = meta['ETag']
+        response['ETag'] = meta['checksum']
         return response
     
     response = HttpResponse(status=200)
@@ -678,12 +688,16 @@ def object_read(request, v_account, v_container, v_object):
         validate_matching_preconditions(request, meta)
     except NotModified:
         response = HttpResponse(status=304)
-        response['ETag'] = meta['ETag']
+        response['ETag'] = meta['checksum']
         return response
     
+    hashmap_reply = False
+    if 'hashmap' in request.GET and request.serialization != 'text':
+        hashmap_reply = True
+    
     sizes = []
     hashmaps = []
-    if 'X-Object-Manifest' in meta:
+    if 'X-Object-Manifest' in meta and not hashmap_reply:
         try:
             src_container, src_name = split_container_object_string('/' + meta['X-Object-Manifest'])
             objects = request.backend.list_objects(request.user_uniq, v_account,
@@ -721,7 +735,7 @@ def object_read(request, v_account, v_container, v_object):
             raise ItemNotFound('Version does not exist')
     
     # Reply with the hashmap.
-    if 'hashmap' in request.GET and request.serialization != 'text':
+    if hashmap_reply:
         size = sum(sizes)
         hashmap = sum(hashmaps, [])
         d = {
@@ -791,12 +805,12 @@ def object_write(request, v_account, v_container, v_object):
         response['X-Object-Version'] = version_id
         return response
     
-    meta, permissions, public = get_object_headers(request)
+    content_type, meta, permissions, public = get_object_headers(request)
     content_length = -1
     if request.META.get('HTTP_TRANSFER_ENCODING') != 'chunked':
         content_length = get_content_length(request)
     # Should be BadRequest, but API says otherwise.
-    if 'Content-Type' not in meta:
+    if not content_type:
         raise LengthRequired('Missing Content-Type header')
     
     if 'hashmap' in request.GET:
@@ -829,6 +843,8 @@ def object_write(request, v_account, v_container, v_object):
                     hashmap.append(hash.firstChild.data)
             except:
                 raise BadRequest('Invalid data formatting')
+        
+        checksum = '' # Do not set to None (will copy previous value).
     else:
         md5 = hashlib.md5()
         size = 0
@@ -841,15 +857,15 @@ def object_write(request, v_account, v_container, v_object):
             hashmap.append(request.backend.put_block(data))
             md5.update(data)
         
-        meta['ETag'] = md5.hexdigest().lower()
+        checksum = md5.hexdigest().lower()
         etag = request.META.get('HTTP_ETAG')
-        if etag and parse_etags(etag)[0].lower() != meta['ETag']:
+        if etag and parse_etags(etag)[0].lower() != checksum:
             raise UnprocessableEntity('Object ETag does not match')
     
     try:
         version_id = request.backend.update_object_hashmap(request.user_uniq,
-                        v_account, v_container, v_object, size, hashmap,
-                        'pithos', meta, True, permissions)
+                        v_account, v_container, v_object, size, content_type,
+                        hashmap, checksum, 'pithos', meta, True, permissions)
     except NotAllowedError:
         raise Forbidden('Not allowed')
     except IndexError, e:
@@ -858,18 +874,14 @@ def object_write(request, v_account, v_container, v_object):
         raise ItemNotFound('Container does not exist')
     except ValueError:
         raise BadRequest('Invalid sharing header')
-    except AttributeError, e:
-        raise Conflict(simple_list_response(request, e.data))
     except QuotaError:
         raise RequestEntityTooLarge('Quota exceeded')
-    if 'ETag' not in meta:
+    if not checksum:
         # Update the MD5 after the hashmap, as there may be missing hashes.
-        # TODO: This will create a new version, even if done synchronously...
-        etag = hashmap_md5(request, hashmap, size)
-        meta.update({'ETag': etag}) # Update ETag.
+        checksum = hashmap_md5(request, hashmap, size)
         try:
-            version_id = request.backend.update_object_meta(request.user_uniq,
-                            v_account, v_container, v_object, 'pithos', {'ETag': etag}, False)
+            version_id = request.backend.update_object_checksum(request.user_uniq,
+                            v_account, v_container, v_object, version_id, checksum)
         except NotAllowedError:
             raise Forbidden('Not allowed')
     if public is not None:
@@ -882,7 +894,8 @@ def object_write(request, v_account, v_container, v_object):
             raise ItemNotFound('Object does not exist')
     
     response = HttpResponse(status=201)
-    response['ETag'] = meta['ETag']
+    if checksum:
+        response['ETag'] = checksum
     response['X-Object-Version'] = version_id
     return response
 
@@ -899,14 +912,11 @@ def object_write_form(request, v_account, v_container, v_object):
         raise BadRequest('Missing X-Object-Data field')
     file = request.FILES['X-Object-Data']
     
-    meta = {}
-    meta['Content-Type'] = file.content_type
-    meta['ETag'] = file.etag
-    
+    checksum = file.etag
     try:
         version_id = request.backend.update_object_hashmap(request.user_uniq,
-                        v_account, v_container, v_object, file.size, file.hashmap,
-                        'pithos', meta, True)
+                        v_account, v_container, v_object, file.size, file.content_type,
+                        file.hashmap, checksum, 'pithos', {}, True)
     except NotAllowedError:
         raise Forbidden('Not allowed')
     except NameError:
@@ -915,8 +925,9 @@ def object_write_form(request, v_account, v_container, v_object):
         raise RequestEntityTooLarge('Quota exceeded')
     
     response = HttpResponse(status=201)
-    response['ETag'] = meta['ETag']
+    response['ETag'] = checksum
     response['X-Object-Version'] = version_id
+    response.content = checksum
     return response
 
 @api_method('COPY', format_allowed=True)
@@ -1001,10 +1012,7 @@ def object_update(request, v_account, v_container, v_object):
     #                       forbidden (403),
     #                       badRequest (400)
     
-    meta, permissions, public = get_object_headers(request)
-    content_type = meta.get('Content-Type')
-    if content_type:
-        del(meta['Content-Type']) # Do not allow changing the Content-Type.
+    content_type, meta, permissions, public = get_object_headers(request)
     
     try:
         prev_meta = request.backend.get_object_meta(request.user_uniq, v_account,
@@ -1018,14 +1026,9 @@ def object_update(request, v_account, v_container, v_object):
     if request.META.get('HTTP_IF_MATCH') or request.META.get('HTTP_IF_NONE_MATCH'):
         validate_matching_preconditions(request, prev_meta)
     
-    # If replacing, keep previous values of 'Content-Type' and 'ETag'.
     replace = True
     if 'update' in request.GET:
         replace = False
-    if replace:
-        for k in ('Content-Type', 'ETag'):
-            if k in prev_meta:
-                meta[k] = prev_meta[k]
     
     # A Content-Type or X-Source-Object header indicates data updates.
     src_object = request.META.get('HTTP_X_SOURCE_OBJECT')
@@ -1043,8 +1046,6 @@ def object_update(request, v_account, v_container, v_object):
                 raise ItemNotFound('Object does not exist')
             except ValueError:
                 raise BadRequest('Invalid sharing header')
-            except AttributeError, e:
-                raise Conflict(simple_list_response(request, e.data))
         if public is not None:
             try:
                 request.backend.update_object_public(request.user_uniq, v_account,
@@ -1178,19 +1179,17 @@ def object_update(request, v_account, v_container, v_object):
     if dest_bytes is not None and dest_bytes < size:
         size = dest_bytes
         hashmap = hashmap[:(int((size - 1) / request.backend.block_size) + 1)]
-    meta.update({'ETag': hashmap_md5(request, hashmap, size)}) # Update ETag.
+    checksum = hashmap_md5(request, hashmap, size)
     try:
         version_id = request.backend.update_object_hashmap(request.user_uniq,
-                        v_account, v_container, v_object, size, hashmap,
-                        'pithos', meta, replace, permissions)
+                        v_account, v_container, v_object, size, prev_meta['type'],
+                        hashmap, checksum, 'pithos', meta, replace, permissions)
     except NotAllowedError:
         raise Forbidden('Not allowed')
     except NameError:
         raise ItemNotFound('Container does not exist')
     except ValueError:
         raise BadRequest('Invalid sharing header')
-    except AttributeError, e:
-        raise Conflict(simple_list_response(request, e.data))
     except QuotaError:
         raise RequestEntityTooLarge('Quota exceeded')
     if public is not None:
@@ -1203,7 +1202,7 @@ def object_update(request, v_account, v_container, v_object):
             raise ItemNotFound('Object does not exist')
     
     response = HttpResponse(status=204)
-    response['ETag'] = meta['ETag']
+    response['ETag'] = checksum
     response['X-Object-Version'] = version_id
     return response
 
diff --git a/snf-pithos-app/pithos/api/management/__init__.py b/snf-pithos-app/pithos/api/management/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/snf-pithos-app/pithos/api/management/commands/__init__.py b/snf-pithos-app/pithos/api/management/commands/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/snf-pithos-app/pithos/api/management/commands/storagequota.py b/snf-pithos-app/pithos/api/management/commands/storagequota.py
new file mode 100644 (file)
index 0000000..e792549
--- /dev/null
@@ -0,0 +1,80 @@
+# Copyright 2012 GRNET S.A. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or
+# without modification, are permitted provided that the following
+# conditions are met:
+#
+#   1. Redistributions of source code must retain the above
+#      copyright notice, this list of conditions and the following
+#      disclaimer.
+#
+#   2. Redistributions in binary form must reproduce the above
+#      copyright notice, this list of conditions and the following
+#      disclaimer in the documentation and/or other materials
+#      provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and
+# documentation are those of the authors and should not be
+# interpreted as representing official policies, either expressed
+# or implied, of GRNET S.A.
+
+from optparse import make_option
+
+from django.core.management.base import BaseCommand, CommandError
+
+from pithos.api.settings import (BACKEND_DB_MODULE, BACKEND_DB_CONNECTION,
+                                    BACKEND_BLOCK_MODULE, BACKEND_BLOCK_PATH,
+                                    BACKEND_QUEUE_MODULE, BACKEND_QUEUE_CONNECTION,
+                                    BACKEND_QUOTA, BACKEND_VERSIONING)
+from pithos.backends import connect_backend
+
+
+class Command(BaseCommand):
+    args = "<user>"
+    help = "Get/set a user's quota"
+    
+    option_list = BaseCommand.option_list + (
+        make_option('--set-quota',
+            dest='quota',
+            metavar='BYTES',
+            help="Set user's quota"),
+        )
+    
+    def handle(self, *args, **options):
+        if len(args) != 1:
+            raise CommandError("Please provide a user")
+        
+        user = args[0]
+        quota = options.get('quota')
+        if quota is not None:
+            try:
+                quota = int(quota)
+            except ValueError:
+                raise CommandError("Invalid quota")
+        
+        backend = connect_backend(db_module=BACKEND_DB_MODULE,
+                                  db_connection=BACKEND_DB_CONNECTION,
+                                  block_module=BACKEND_BLOCK_MODULE,
+                                  block_path=BACKEND_BLOCK_PATH,
+                                  queue_module=BACKEND_QUEUE_MODULE,
+                                  queue_connection=BACKEND_QUEUE_CONNECTION)
+        backend.default_policy['quota'] = BACKEND_QUOTA
+        backend.default_policy['versioning'] = BACKEND_VERSIONING
+        if quota is not None:
+            backend.update_account_policy(user, user, {'quota': quota})
+        else:
+            self.stdout.write("Quota for %s: %s\n" % (user, backend.get_account_policy(user, user)['quota']))
+        backend.close()
index b11a693..49c32fb 100644 (file)
 import logging
 
 from django.http import HttpResponse
+from django.views.decorators.csrf import csrf_exempt
+
+from pithos.lib.user import get_user
 
 from pithos.api.faults import (Fault, BadRequest, ItemNotFound)
 from pithos.api.util import (put_object_headers, update_manifest_meta,
     validate_modification_preconditions, validate_matching_preconditions,
     object_data_response, api_method)
 from pithos.api.short_url import decode_url
+from pithos.api.settings import AUTHENTICATION_URL, AUTHENTICATION_USERS
 
 
 logger = logging.getLogger(__name__)
 
 
+@csrf_exempt
 def public_demux(request, v_public):
+    get_user(request, AUTHENTICATION_URL, AUTHENTICATION_USERS)
     if request.method == 'HEAD':
         return public_meta(request, v_public)
     elif request.method == 'GET':
index 57f5a78..16e5575 100644 (file)
@@ -1,10 +1,11 @@
 #coding=utf8
+from django.conf import settings
+from os.path import abspath, dirname, join
 
-USE_X_FORWARDED_HOST = False
+PROJECT_PATH = getattr(settings, 'PROJECT_PATH', dirname(dirname(abspath(__file__))))
 
-# Either set local users here, or a remote host.
-# To disable local users set to None.
-AUTHENTICATION_USERS = {
+# Set local users, or a remote host. To disable local users set them to None.
+sample_users = {
     '0000': 'test',
     '0001': 'verigak',
     '0002': 'chazapis',
@@ -17,7 +18,22 @@ AUTHENTICATION_USERS = {
     '0009': 'διογένης'
 }
 
-# Where astakos is hosted.
-AUTHENTICATION_HOST = '127.0.0.1:10000'
+AUTHENTICATION_URL = getattr(settings, 'PITHOS_AUTHENTICATION_URL', 'http://127.0.0.1:8000/im/authenticate')
+AUTHENTICATION_USERS = getattr(settings, 'PITHOS_AUTHENTICATION_USERS', sample_users)
+
+# SQLAlchemy (choose SQLite/MySQL/PostgreSQL).
+BACKEND_DB_MODULE = getattr(settings, 'PITHOS_BACKEND_DB_MODULE', 'pithos.backends.lib.sqlalchemy')
+BACKEND_DB_CONNECTION = getattr(settings, 'PITHOS_BACKEND_DB_CONNECTION', 'sqlite:///' + join(PROJECT_PATH, 'backend.db'))
+
+# Block storage.
+BACKEND_BLOCK_MODULE = getattr(settings, 'PITHOS_BACKEND_BLOCK_MODULE', 'pithos.backends.lib.hashfiler')
+BACKEND_BLOCK_PATH = getattr(settings, 'PITHOS_BACKEND_BLOCK_PATH', join(PROJECT_PATH, 'data/'))
+
+# Queue for billing.
+BACKEND_QUEUE_MODULE = getattr(settings, 'PITHOS_BACKEND_QUEUE_MODULE', None) # Example: 'pithos.backends.lib.rabbitmq'
+BACKEND_QUEUE_CONNECTION = getattr(settings, 'PITHOS_BACKEND_QUEUE_CONNECTION', None) # Example: 'rabbitmq://guest:guest@localhost:5672/pithos'
+
+# Default setting for new accounts.
+BACKEND_QUOTA = getattr(settings, 'PITHOS_BACKEND_QUOTA', 50 * 1024 * 1024 * 1024)
+BACKEND_VERSIONING = getattr(settings, 'PITHOS_BACKEND_VERSIONING', 'auto')
 
-TEST = False
diff --git a/snf-pithos-app/pithos/api/synnefo_settings.py b/snf-pithos-app/pithos/api/synnefo_settings.py
new file mode 100644 (file)
index 0000000..328eb7a
--- /dev/null
@@ -0,0 +1,133 @@
+#coding=utf8
+# Django settings for pithos project.
+
+from os.path import abspath, dirname, exists, join
+
+PROJECT_PATH = dirname(abspath(__file__))
+
+DEBUG = True
+TEMPLATE_DEBUG = DEBUG
+
+ADMINS = (
+    # ('Your Name', 'your_email@domain.com'),
+)
+
+MANAGERS = ADMINS
+
+DATABASES = {
+    'default': {
+        'ENGINE': 'django.db.backends.sqlite3',
+        'NAME': join(PROJECT_PATH, 'pithos.db')
+    }
+}
+
+# Local time zone for this installation. Choices can be found here:
+# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
+# although not all choices may be available on all operating systems.
+# If running in a Windows environment this must be set to the same as your
+# system time zone.
+TIME_ZONE = 'UTC'
+
+# Language code for this installation. All choices can be found here:
+# http://www.i18nguy.com/unicode/language-identifiers.html
+LANGUAGE_CODE = 'en-us'
+
+SITE_ID = 1
+
+# If you set this to False, Django will make some optimizations so as not
+# to load the internationalization machinery.
+USE_I18N = True
+
+# If you set this to False, Django will not format dates, numbers and
+# calendars according to the current locale
+USE_L10N = True
+
+# Absolute path to the directory that holds media.
+# Example: "/home/media/media.lawrence.com/"
+MEDIA_ROOT = ''
+
+# URL that handles the media served from MEDIA_ROOT. Make sure to use a
+# trailing slash if there is a path component (optional in other cases).
+# Examples: "http://media.lawrence.com", "http://example.com/media/"
+MEDIA_URL = ''
+
+# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
+# trailing slash.
+# Examples: "http://foo.com/media/", "/media/".
+ADMIN_MEDIA_PREFIX = '/media/'
+
+# Make this unique, and don't share it with anybody.
+SECRET_KEY = '$j0cdrfm*0sc2j+e@@2f-&3-_@2=^!z#+b-8o4_i10@2%ev7si'
+
+# List of callables that know how to import templates from various sources.
+TEMPLATE_LOADERS = (
+    'django.template.loaders.filesystem.Loader',
+    'django.template.loaders.app_directories.Loader',
+)
+
+MIDDLEWARE_CLASSES = (
+    'django.middleware.common.CommonMiddleware',
+    #'django.middleware.csrf.CsrfViewMiddleware',
+    #'django.contrib.sessions.middleware.SessionMiddleware',
+    #'django.contrib.auth.middleware.AuthenticationMiddleware',
+    #'django.contrib.messages.middleware.MessageMiddleware',
+    'pithos.middleware.LoggingConfigMiddleware',
+    'pithos.middleware.SecureMiddleware'
+)
+
+ROOT_URLCONF = 'pithos.urls'
+
+TEMPLATE_DIRS = (
+    # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
+    # Always use forward slashes, even on Windows.
+    # Don't forget to use absolute paths, not relative paths.
+)
+
+# Setup logging (use this name for the setting to avoid conflicts with django > 1.2.x).
+LOGGING_SETUP = {
+    'version': 1,
+    'disable_existing_loggers': True,
+    'formatters': {
+        'simple': {
+            'format': '%(message)s'
+        },
+        'verbose': {
+            'format': '%(asctime)s [%(levelname)s] %(name)s %(message)s'
+        },
+    },
+    'handlers': {
+        'null': {
+            'class': 'logging.NullHandler',
+        },
+        'console': {
+            'class': 'logging.StreamHandler',
+            'formatter': 'verbose'
+        },
+        'file': {
+            'class': 'logging.FileHandler',
+            'formatter': 'verbose'
+        },
+    },
+    'loggers': {
+        'pithos': {
+            'handlers': ['console'],
+            'level': 'DEBUG' if DEBUG else 'INFO'
+        },
+    }
+}
+
+# The server is behind a proxy (apache and gunicorn setup).
+USE_X_FORWARDED_HOST = False
+
+# Set umask (needed for gunicorn setup).
+#umask(0077)
+
+conf = join(PROJECT_PATH, 'settings.local')
+if exists(conf):
+    execfile(conf)
+elif exists('/etc/pithos/settings.local'):
+    execfile('/etc/pithos/settings.local')
+
+INSTALLED_APPS = (
+    'pithos.api',
+)
index aa9af55..367a47d 100644 (file)
 # interpreted as representing official policies, either expressed
 # or implied, of GRNET S.A.
 
-from django.conf.urls.defaults import *
+from django.conf.urls.defaults import include, patterns
 
 # TODO: This only works when in this order.
-urlpatterns = patterns('pithos.api.functions',
+api_urlpatterns = patterns('pithos.api.functions',
     (r'^$', 'top_demux'),
     (r'^(?P<v_account>.+?)/(?P<v_container>.+?)/(?P<v_object>.+?)$', 'object_demux'),
     (r'^(?P<v_account>.+?)/(?P<v_container>.+?)/?$', 'container_demux'),
     (r'^(?P<v_account>.+?)/?$', 'account_demux')
 )
+
+urlpatterns = patterns('',
+    (r'^v1(?:$|/)', include(api_urlpatterns)),
+    (r'^v1\.0(?:$|/)', include(api_urlpatterns)),
+    (r'^public/(?P<v_public>.+?)/?$', 'pithos.api.public.public_demux'),
+    (r'^login/?$', 'pithos.api.login.redirect_to_login_service')
+)
index 5e80dec..ec731d1 100644 (file)
@@ -54,6 +54,10 @@ from pithos.api.faults import (Fault, NotModified, BadRequest, Unauthorized, For
                                 Conflict, LengthRequired, PreconditionFailed, RequestEntityTooLarge,
                                 RangeNotSatisfiable, InternalServerError, NotImplemented)
 from pithos.api.short_url import encode_url
+from pithos.api.settings import (BACKEND_DB_MODULE, BACKEND_DB_CONNECTION,
+                                    BACKEND_BLOCK_MODULE, BACKEND_BLOCK_PATH,
+                                    BACKEND_QUEUE_MODULE, BACKEND_QUEUE_CONNECTION,
+                                    BACKEND_QUOTA, BACKEND_VERSIONING)
 from pithos.backends import connect_backend
 from pithos.backends.base import NotAllowedError, QuotaError
 
@@ -100,7 +104,7 @@ def printable_header_dict(d):
     Format 'last_modified' timestamp.
     """
     
-    if 'last_modified' in d:
+    if 'last_modified' in d and d['last_modified']:
         d['last_modified'] = isoformat(datetime.fromtimestamp(d['last_modified']))
     return dict([(k.lower().replace('-', '_'), v) for k, v in d.iteritems()])
 
@@ -115,8 +119,18 @@ def get_header_prefix(request, prefix):
     # TODO: Document or remove '~' replacing.
     return dict([(format_header_key(k[5:]), v.replace('~', '')) for k, v in request.META.iteritems() if k.startswith(prefix) and len(k) > len(prefix)])
 
+def check_meta_headers(meta):
+    if len(meta) > 90:
+        raise BadRequest('Too many headers.')
+    for k, v in meta.iteritems():
+        if len(k) > 128:
+            raise BadRequest('Header name too large.')
+        if len(v) > 256:
+            raise BadRequest('Header value too large.')
+
 def get_account_headers(request):
     meta = get_header_prefix(request, 'X-Account-Meta-')
+    check_meta_headers(meta)
     groups = {}
     for k, v in get_header_prefix(request, 'X-Account-Group-').iteritems():
         n = k[16:].lower()
@@ -147,6 +161,7 @@ def put_account_headers(response, meta, groups, policy):
 
 def get_container_headers(request):
     meta = get_header_prefix(request, 'X-Container-Meta-')
+    check_meta_headers(meta)
     policy = dict([(k[19:].lower(), v.replace(' ', '')) for k, v in get_header_prefix(request, 'X-Container-Policy-').iteritems()])
     return meta, policy
 
@@ -168,22 +183,21 @@ def put_container_headers(request, response, meta, policy):
         response[smart_str(format_header_key('X-Container-Policy-' + k), strings_only=True)] = smart_str(v, strings_only=True)
 
 def get_object_headers(request):
+    content_type = request.META.get('CONTENT_TYPE', None)
     meta = get_header_prefix(request, 'X-Object-Meta-')
-    if request.META.get('CONTENT_TYPE'):
-        meta['Content-Type'] = request.META['CONTENT_TYPE']
+    check_meta_headers(meta)
     if request.META.get('HTTP_CONTENT_ENCODING'):
         meta['Content-Encoding'] = request.META['HTTP_CONTENT_ENCODING']
     if request.META.get('HTTP_CONTENT_DISPOSITION'):
         meta['Content-Disposition'] = request.META['HTTP_CONTENT_DISPOSITION']
     if request.META.get('HTTP_X_OBJECT_MANIFEST'):
         meta['X-Object-Manifest'] = request.META['HTTP_X_OBJECT_MANIFEST']
-    return meta, get_sharing(request), get_public(request)
+    return content_type, meta, get_sharing(request), get_public(request)
 
 def put_object_headers(response, meta, restricted=False):
-    if 'ETag' in meta:
-        response['ETag'] = meta['ETag']
+    response['ETag'] = meta['checksum']
     response['Content-Length'] = meta['bytes']
-    response['Content-Type'] = meta.get('Content-Type', 'application/octet-stream')
+    response['Content-Type'] = meta.get('type', 'application/octet-stream')
     response['Last-Modified'] = http_date(int(meta['modified']))
     if not restricted:
         response['X-Object-Hash'] = meta['hash']
@@ -216,8 +230,7 @@ def update_manifest_meta(request, v_account, meta):
             for x in objects:
                 src_meta = request.backend.get_object_meta(request.user_uniq,
                                         v_account, src_container, x[0], 'pithos', x[1])
-                if 'ETag' in src_meta:
-                    etag += src_meta['ETag']
+                etag += src_meta['checksum']
                 bytes += src_meta['bytes']
         except:
             # Ignore errors.
@@ -225,7 +238,7 @@ def update_manifest_meta(request, v_account, meta):
         meta['bytes'] = bytes
         md5 = hashlib.md5()
         md5.update(etag)
-        meta['ETag'] = md5.hexdigest().lower()
+        meta['checksum'] = md5.hexdigest().lower()
 
 def update_sharing_meta(request, permissions, v_account, v_container, v_object, meta):
     if permissions is None:
@@ -272,7 +285,9 @@ def validate_modification_preconditions(request, meta):
 def validate_matching_preconditions(request, meta):
     """Check that the ETag conforms with the preconditions set."""
     
-    etag = meta.get('ETag', None)
+    etag = meta['checksum']
+    if not etag:
+        etag = None
     
     if_match = request.META.get('HTTP_IF_MATCH')
     if if_match is not None:
@@ -305,25 +320,23 @@ def copy_or_move_object(request, src_account, src_container, src_name, dest_acco
     
     if 'ignore_content_type' in request.GET and 'CONTENT_TYPE' in request.META:
         del(request.META['CONTENT_TYPE'])
-    meta, permissions, public = get_object_headers(request)
+    content_type, meta, permissions, public = get_object_headers(request)
     src_version = request.META.get('HTTP_X_SOURCE_VERSION')
     try:
         if move:
             version_id = request.backend.move_object(request.user_uniq, src_account, src_container, src_name,
                                                         dest_account, dest_container, dest_name,
-                                                        'pithos', meta, False, permissions)
+                                                        content_type, 'pithos', meta, False, permissions)
         else:
             version_id = request.backend.copy_object(request.user_uniq, src_account, src_container, src_name,
                                                         dest_account, dest_container, dest_name,
-                                                        'pithos', meta, False, permissions, src_version)
+                                                        content_type, 'pithos', meta, False, permissions, src_version)
     except NotAllowedError:
         raise Forbidden('Not allowed')
     except (NameError, IndexError):
         raise ItemNotFound('Container or object does not exist')
     except ValueError:
         raise BadRequest('Invalid sharing header')
-    except AttributeError, e:
-        raise Conflict(simple_list_response(request, e.data))
     except QuotaError:
         raise RequestEntityTooLarge('Quota exceeded')
     if public is not None:
@@ -647,7 +660,10 @@ class ObjectWrapper(object):
             
             # Get the data from the block.
             bo = self.offset % self.backend.block_size
-            bl = min(self.length, len(self.block) - bo)
+            bs = self.backend.block_size
+            if self.block_index == len(self.hashmaps[self.file_index]) - 1:
+                bs = self.sizes[self.file_index] % self.backend.block_size
+            bl = min(self.length, bs - bo)
             data = self.block[bo:bo + bl]
             self.offset += bl
             self.length -= bl
@@ -712,7 +728,7 @@ def object_data_response(request, sizes, hashmaps, meta, public=False):
                     ranges = [(0, size)]
                     ret = 200
             except ValueError:
-                if if_range != meta['ETag']:
+                if if_range != meta['checksum']:
                     ranges = [(0, size)]
                     ret = 200
     
@@ -752,11 +768,10 @@ def hashmap_md5(request, hashmap, size):
     md5 = hashlib.md5()
     bs = request.backend.block_size
     for bi, hash in enumerate(hashmap):
-        data = request.backend.get_block(hash)
+        data = request.backend.get_block(hash) # Blocks come in padded.
         if bi == len(hashmap) - 1:
-            bs = size % bs
-        pad = bs - min(len(data), bs)
-        md5.update(data + ('\x00' * pad))
+            data = data[:size % bs]
+        md5.update(data)
     return md5.hexdigest().lower()
 
 def simple_list_response(request, l):
@@ -768,25 +783,20 @@ def simple_list_response(request, l):
         return json.dumps(l)
 
 def get_backend():
-    backend = connect_backend(db_module=settings.BACKEND_DB_MODULE,
-                              db_connection=settings.BACKEND_DB_CONNECTION,
-                              block_module=settings.BACKEND_BLOCK_MODULE,
-                              block_path=settings.BACKEND_BLOCK_PATH)
-    backend.default_policy['quota'] = settings.BACKEND_QUOTA
-    backend.default_policy['versioning'] = settings.BACKEND_VERSIONING
+    backend = connect_backend(db_module=BACKEND_DB_MODULE,
+                              db_connection=BACKEND_DB_CONNECTION,
+                              block_module=BACKEND_BLOCK_MODULE,
+                              block_path=BACKEND_BLOCK_PATH,
+                              queue_module=BACKEND_QUEUE_MODULE,
+                              queue_connection=BACKEND_QUEUE_CONNECTION)
+    backend.default_policy['quota'] = BACKEND_QUOTA
+    backend.default_policy['versioning'] = BACKEND_VERSIONING
     return backend
 
 def update_request_headers(request):
     # Handle URL-encoded keys and values.
-    # Handle URL-encoded keys and values.
     meta = dict([(k, v) for k, v in request.META.iteritems() if k.startswith('HTTP_')])
-    if len(meta) > 90:
-        raise BadRequest('Too many headers.')
     for k, v in meta.iteritems():
-        if len(k) > 128:
-            raise BadRequest('Header name too large.')
-        if len(v) > 256:
-            raise BadRequest('Header value too large.')
         try:
             k.decode('ascii')
             v.decode('ascii')
@@ -816,12 +826,9 @@ def update_response_headers(request, response):
             k.startswith('X-Object-') or k.startswith('Content-')):
             del(response[k])
             response[quote(k)] = quote(v, safe='/=,:@; ')
-    
-    if settings.TEST:
-        response['Date'] = format_date_time(time())
 
 def render_fault(request, fault):
-    if isinstance(fault, InternalServerError) and (settings.DEBUG or settings.TEST):
+    if isinstance(fault, InternalServerError) and settings.DEBUG:
         fault.details = format_exc(fault)
     
     request.serialization = 'text'
index cd04a1d..91edaab 100644 (file)
@@ -1,3 +1,2 @@
 from log import LoggingConfigMiddleware
 from secure import SecureMiddleware
-from user import UserMiddleware
index 1db8682..d343567 100644 (file)
@@ -3,16 +3,16 @@
 # Redistribution and use in source and binary forms, with or
 # without modification, are permitted provided that the following
 # conditions are met:
-#
+# 
 #   1. Redistributions of source code must retain the above
 #      copyright notice, this list of conditions and the following
 #      disclaimer.
-#
+# 
 #   2. Redistributions in binary form must reproduce the above
 #      copyright notice, this list of conditions and the following
 #      disclaimer in the documentation and/or other materials
 #      provided with the distribution.
-#
+# 
 # THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
 # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
@@ -25,7 +25,7 @@
 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
 # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
-#
+# 
 # The views and conclusions contained in the software and
 # documentation are those of the authors and should not be
 # interpreted as representing official policies, either expressed
 from django.conf import settings
 from django.core.exceptions import MiddlewareNotUsed
 
+from pithos.lib.dictconfig import dictConfig
+
 import logging
 
 
+class NullHandler(logging.Handler):
+    def emit(self, record):
+        pass
+
+
 class LoggingConfigMiddleware:
     def __init__(self):
         '''Initialise the logging setup from settings, called on first request.'''
-        args = {}
-        args['level'] = logging.DEBUG if getattr(settings, 'DEBUG', False) else logging.INFO
-        if getattr(settings, 'LOGFILE', None):
-            args['filename'] = settings.LOGFILE
-        args['format'] = '%(asctime)s [%(levelname)s] %(name)s %(message)s'
-        args['datefmt'] = '%Y-%m-%d %H:%M:%S'
-        logging.basicConfig(**args)
+        logging_setting = getattr(settings, 'LOGGING_SETUP', None)
+        if logging_setting:
+            # Disable handlers that are not used by any logger.
+            active_handlers = set()
+            loggers = logging_setting.get('loggers', {})
+            for logger in loggers.values():
+                active_handlers.update(logger.get('handlers', []))
+            handlers = logging_setting.get('handlers', {})
+            for handler in handlers:
+                if handler not in active_handlers:
+                    handlers[handler] = {'class': 'logging.NullHandler'}
+            
+            logging.NullHandler = NullHandler
+            dictConfig(logging_setting)
         raise MiddlewareNotUsed('Logging setup only.')
diff --git a/snf-pithos-app/pithos/ui/templates/feedback_mail.txt b/snf-pithos-app/pithos/ui/templates/feedback_mail.txt
deleted file mode 100644 (file)
index 77a55f1..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-Feedback message:
-{{ message }}
-
-User info:
-ID: {{ request.user.id }}
-Email: {{ request.user.uniq }}
-
-User application data:
-{{ data|safe }}
-
index b6ef990..3442897 100644 (file)
@@ -35,9 +35,5 @@ from django.conf.urls.defaults import include, patterns
 
 
 urlpatterns = patterns('',
-    (r'^v1(?:$|/)', include('pithos.api.urls')),
-    (r'^v1\.0(?:$|/)', include('pithos.api.urls')),
-    (r'^public/(?P<v_public>.+?)/?$', 'pithos.api.public.public_demux'),
-    (r'^login/?$', 'pithos.api.login.redirect_to_login_service'),
-    (r'^ui', include('pithos.ui.urls'))
+    (r'', include('pithos.api.urls')),
 )
index daf789d..d0ea038 100644 (file)
@@ -74,7 +74,7 @@ class BaseBackend(object):
         """
         return []
     
-    def get_account_meta(self, user, account, domain, until=None):
+    def get_account_meta(self, user, account, domain, until=None, include_user_defined=True):
         """Return a dictionary with the account metadata for the domain.
         
         The keys returned are all user-defined, except:
@@ -185,7 +185,17 @@ class BaseBackend(object):
         """
         return []
     
-    def get_container_meta(self, user, account, container, domain, until=None):
+    def list_container_meta(self, user, account, container, domain, until=None):
+        """Return a list with all the container's object meta keys for the domain.
+        
+        Raises:
+            NotAllowedError: Operation not permitted
+            
+            NameError: Container does not exist
+        """
+        return []
+    
+    def get_container_meta(self, user, account, container, domain, until=None, include_user_defined=True):
         """Return a dictionary with the container metadata for the domain.
         
         The keys returned are all user-defined, except:
@@ -310,8 +320,11 @@ class BaseBackend(object):
         """
         return []
     
-    def list_object_meta(self, user, account, container, domain, until=None):
-        """Return a list with all the container's object meta keys for the domain.
+    def list_object_meta(self, user, account, container, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], shared=False, until=None, size_range=None):
+        """Return a list of object metadata dicts existing under a container.
+        
+        Same parameters with list_objects. Returned dicts have no user-defined
+        metadata and, if until is not None, a None 'modified' timestamp.
         
         Raises:
             NotAllowedError: Operation not permitted
@@ -320,7 +333,19 @@ class BaseBackend(object):
         """
         return []
     
-    def get_object_meta(self, user, account, container, name, domain, version=None):
+    def list_object_permissions(self, user, account, container, prefix=''):
+        """Return a list of paths that enforce permissions under a container.
+        
+        Raises:
+            NotAllowedError: Operation not permitted
+        """
+        return []
+    
+    def list_object_public(self, user, account, container, prefix=''):
+        """Return a dict mapping paths to public ids for objects that are public under a container."""
+        return {}
+    
+    def get_object_meta(self, user, account, container, name, domain, version=None, include_user_defined=True):
         """Return a dictionary with the object metadata for the domain.
         
         The keys returned are all user-defined, except:
@@ -328,6 +353,8 @@ class BaseBackend(object):
             
             'bytes': The total data size
             
+            'type': The content type
+            
             'hash': The hashmap hash
             
             'modified': Last modification timestamp (overall)
@@ -339,6 +366,8 @@ class BaseBackend(object):
             'version_timestamp': The version's modification timestamp
             
             'uuid': A unique identifier that persists data or metadata updates and renames
+            
+            'checksum': The MD5 sum of the object (may be empty)
         
         Raises:
             NotAllowedError: Operation not permitted
@@ -395,11 +424,6 @@ class BaseBackend(object):
             NameError: Container/object does not exist
             
             ValueError: Invalid users/groups in permissions
-            
-            AttributeError: Can not set permissions, as this object
-                is already shared/private by another object higher
-                in the hierarchy, or setting permissions here will
-                invalidate other permissions deeper in the hierarchy
         """
         return
     
@@ -438,7 +462,7 @@ class BaseBackend(object):
         """
         return 0, []
     
-    def update_object_hashmap(self, user, account, container, name, size, hashmap, domain, meta={}, replace_meta=False, permissions=None):
+    def update_object_hashmap(self, user, account, container, name, size, type, hashmap, checksum, domain, meta={}, replace_meta=False, permissions=None):
         """Create/update an object with the specified size and partial hashes and return the new version.
         
         Parameters:
@@ -457,13 +481,15 @@ class BaseBackend(object):
             
             ValueError: Invalid users/groups in permissions
             
-            AttributeError: Can not set permissions
-            
             QuotaError: Account or container quota exceeded
         """
         return ''
     
-    def copy_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, domain, meta={}, replace_meta=False, permissions=None, src_version=None):
+    def update_object_checksum(self, user, account, container, name, version, checksum):
+        """Update an object's checksum."""
+        return
+    
+    def copy_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta={}, replace_meta=False, permissions=None, src_version=None):
         """Copy an object's data and metadata and return the new version.
         
         Parameters:
@@ -486,13 +512,11 @@ class BaseBackend(object):
             
             ValueError: Invalid users/groups in permissions
             
-            AttributeError: Can not set permissions
-            
             QuotaError: Account or container quota exceeded
         """
         return ''
     
-    def move_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, domain, meta={}, replace_meta=False, permissions=None):
+    def move_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta={}, replace_meta=False, permissions=None):
         """Move an object's data and metadata and return the new version.
         
         Parameters:
@@ -511,8 +535,6 @@ class BaseBackend(object):
             
             ValueError: Invalid users/groups in permissions
             
-            AttributeError: Can not set permissions
-            
             QuotaError: Account or container quota exceeded
         """
         return ''
index 2c5fc5e..af64bfa 100644 (file)
@@ -41,7 +41,7 @@ from context_file import ContextFile, file_sync_read_chunks
 
 class Blocker(object):
     """Blocker.
-       Required contstructor parameters: blocksize, blockpath, hashtype.
+       Required constructor parameters: blocksize, blockpath, hashtype.
     """
 
     blocksize = None
@@ -74,6 +74,9 @@ class Blocker(object):
         self.hashlen = len(emptyhash)
         self.emptyhash = emptyhash
 
+    def _pad(self, block):
+        return block + ('\x00' * (self.blocksize - len(block)))
+
     def _get_rear_block(self, blkhash, create=0):
         filename = hexlify(blkhash)
         dir = join(self.blockpath, filename[0:2], filename[2:4], filename[4:6])
@@ -116,7 +119,7 @@ class Blocker(object):
 
         for h in hashes:
             if h == self.emptyhash:
-                append('')
+                append(self._pad(''))
                 continue
             with self._get_rear_block(h, 0) as rbl:
                 if not rbl:
@@ -125,7 +128,7 @@ class Blocker(object):
                     break # there should be just one block there
             if not block:
                 break
-            append(block)
+            append(self._pad(block))
 
         return blocks
 
@@ -145,36 +148,26 @@ class Blocker(object):
 
         return hashlist, missing
 
-    def block_delta(self, blkhash, offdata=()):
+    def block_delta(self, blkhash, offset, data):
         """Construct and store a new block from a given block
-           and a list of (offset, data) 'patches'. Return:
+           and a data 'patch' applied at offset. Return:
            (the hash of the new block, if the block already existed)
         """
-        if not offdata:
-            return None, None
 
         blocksize = self.blocksize
+        if offset >= blocksize or not data:
+            return None, None
+
         block = self.block_retr((blkhash,))
         if not block:
             return None, None
-
+        
         block = block[0]
-        newblock = ''
-        idx = 0
-        size = 0
-        trunc = 0
-        for off, data in offdata:
-            if not data:
-                trunc = 1
-                break
-            newblock += block[idx:off] + data
-            size += off - idx + len(data)
-            if size >= blocksize:
-                break
-            off = size
-
-        if not trunc:
-            newblock += block[size:len(block)]
+        newblock = block[:offset] + data
+        if len(newblock) > blocksize:
+            newblock = newblock[:blocksize]
+        elif len(newblock) < blocksize:
+            newblock += block[len(newblock):]
 
         h, a = self.block_stor((newblock,))
         return h[0], 1 if a else 0
index a366156..f9ecefb 100644 (file)
@@ -40,7 +40,7 @@ from context_file import ContextFile
 
 class Mapper(object):
     """Mapper.
-       Required contstructor parameters: mappath, namelen.
+       Required constructor parameters: mappath, namelen.
     """
     
     mappath = None
index fdd1204..54cd6f3 100644 (file)
@@ -38,7 +38,7 @@ from mapper import Mapper
 
 class Store(object):
     """Store.
-       Required contstructor parameters: path, block_size, hash_algorithm.
+       Required constructor parameters: path, block_size, hash_algorithm.
     """
     
     def __init__(self, **params):
@@ -76,7 +76,7 @@ class Store(object):
         return hashes[0]
     
     def block_update(self, hash, offset, data):
-        h, e = self.blocker.block_delta(hash, ((offset, data),))
+        h, e = self.blocker.block_delta(hash, offset, data)
         return h
     
     def block_search(self, map):
@@ -1,18 +1,18 @@
-# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+# Copyright 2012 GRNET S.A. All rights reserved.
+# 
 # Redistribution and use in source and binary forms, with or
 # without modification, are permitted provided that the following
 # conditions are met:
-#
+# 
 #   1. Redistributions of source code must retain the above
 #      copyright notice, this list of conditions and the following
 #      disclaimer.
-#
+# 
 #   2. Redistributions in binary form must reproduce the above
 #      copyright notice, this list of conditions and the following
 #      disclaimer in the documentation and/or other materials
 #      provided with the distribution.
-#
+# 
 # THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
 # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
 # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
-#
+# 
 # The views and conclusions contained in the software and
 # documentation are those of the authors and should not be
 # interpreted as representing official policies, either expressed
 # or implied, of GRNET S.A.
 
-import json
+from queue import Queue
 
-from django.conf import settings
-from django.core.mail import send_mail
-from django.http import Http404, HttpResponse
-from django.template.loader import render_to_string
-from django.utils.translation import ugettext as _
+__all__ = ["Queue"]
 
-
-def send_feedback(request):
-    if request.method != 'POST':
-        raise Http404
-    if not request.user:
-        return HttpResponse('Unauthorized', status=401)
-
-    subject = _("Feedback from Pithos")
-    from_email = settings.FEEDBACK_FROM_EMAIL
-    recipient_list = [settings.FEEDBACK_CONTACT_EMAIL]
-    content = render_to_string('feedback_mail.txt', {
-                'message': request.POST.get('feedback-msg'),
-                'data': request.POST.get('feedback-data'),
-                'request': request})
-
-    send_mail(subject, content, from_email, recipient_list)
-
-    resp = json.dumps({'status': 'send'})
-    return HttpResponse(resp)
diff --git a/snf-pithos-backend/pithos/backends/lib/rabbitmq/queue.py b/snf-pithos-backend/pithos/backends/lib/rabbitmq/queue.py
new file mode 100644 (file)
index 0000000..f53ad97
--- /dev/null
@@ -0,0 +1,53 @@
+# Copyright 2012 GRNET S.A. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or
+# without modification, are permitted provided that the following
+# conditions are met:
+# 
+#   1. Redistributions of source code must retain the above
+#      copyright notice, this list of conditions and the following
+#      disclaimer.
+# 
+#   2. Redistributions in binary form must reproduce the above
+#      copyright notice, this list of conditions and the following
+#      disclaimer in the documentation and/or other materials
+#      provided with the distribution.
+# 
+# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+# 
+# The views and conclusions contained in the software and
+# documentation are those of the authors and should not be
+# interpreted as representing official policies, either expressed
+# or implied, of GRNET S.A.
+
+from pithos.lib.queue import exchange_connect, exchange_send, exchange_close, Receipt
+
+
+class Queue(object):
+    """Queue.
+       Required constructor parameters: exchange, message_key, client_id.
+    """
+    
+    def __init__(self, **params):
+        exchange = params['exchange']
+        self.conn = exchange_connect(exchange)
+        self.message_key = params['message_key']
+        self.client_id = params['client_id']
+    
+    def send(self, user, resource, value, details):
+        body = Receipt(self.client_id, user, resource, value, details).format()
+        exchange_send(self.conn, self.message_key, body)
+    
+    def close(self):
+        exchange_close(self.conn)
index 49ada08..d96382f 100644 (file)
 # or implied, of GRNET S.A.
 
 from dbwrapper import DBWrapper
-from node import Node, ROOTNODE, SERIAL, HASH, SIZE, MTIME, MUSER, UUID, CLUSTER
+from node import Node, ROOTNODE, SERIAL, HASH, SIZE, TYPE, MTIME, MUSER, UUID, CHECKSUM, CLUSTER, MATCH_PREFIX, MATCH_EXACT
 from permissions import Permissions, READ, WRITE
 
 __all__ = ["DBWrapper",
-           "Node", "ROOTNODE", "SERIAL", "HASH", "SIZE", "MTIME", "MUSER", "UUID", "CLUSTER",
+           "Node", "ROOTNODE", "SERIAL", "HASH", "SIZE", "TYPE", "MTIME", "MUSER", "UUID", "CHECKSUM", "CLUSTER", "MATCH_PREFIX", "MATCH_EXACT",
            "Permissions", "READ", "WRITE"]
 
index 7caa30e..d931c31 100644 (file)
@@ -44,9 +44,9 @@ class Groups(DBWorker):
         DBWorker.__init__(self, **params)
         metadata = MetaData()
         columns=[]
-        columns.append(Column('owner', String(255), primary_key=True))
-        columns.append(Column('name', String(255), primary_key=True))
-        columns.append(Column('member', String(255), primary_key=True))
+        columns.append(Column('owner', String(256), primary_key=True))
+        columns.append(Column('name', String(256), primary_key=True))
+        columns.append(Column('member', String(256), primary_key=True))
         self.groups = Table('groups', metadata, *columns, mysql_engine='InnoDB')
         
         # place an index on member
index e977057..c01cc7f 100644 (file)
@@ -46,7 +46,9 @@ from pithos.lib.filter import parse_filters
 
 ROOTNODE  = 0
 
-( SERIAL, NODE, HASH, SIZE, SOURCE, MTIME, MUSER, UUID, CLUSTER ) = range(9)
+( SERIAL, NODE, HASH, SIZE, TYPE, SOURCE, MTIME, MUSER, UUID, CHECKSUM, CLUSTER ) = range(11)
+
+( MATCH_PREFIX, MATCH_EXACT ) = range(2)
 
 inf = float('inf')
 
@@ -90,11 +92,13 @@ _propnames = {
     'node'      : 1,
     'hash'      : 2,
     'size'      : 3,
-    'source'    : 4,
-    'mtime'     : 5,
-    'muser'     : 6,
-    'uuid'      : 7,
-    'cluster'   : 8
+    'type'      : 4,
+    'source'    : 5,
+    'mtime'     : 6,
+    'muser'     : 7,
+    'uuid'      : 8,
+    'checksum'  : 9,
+    'cluster'   : 10
 }
 
 
@@ -118,8 +122,7 @@ class Node(DBWorker):
                                          ondelete='CASCADE',
                                          onupdate='CASCADE'),
                               autoincrement=False))
-        path_length = 2048
-        columns.append(Column('path', String(path_length), default='', nullable=False))
+        columns.append(Column('path', String(2048), default='', nullable=False))
         self.nodes = Table('nodes', metadata, *columns, mysql_engine='InnoDB')
         Index('idx_nodes_path', self.nodes.c.path, unique=True)
         
@@ -130,8 +133,8 @@ class Node(DBWorker):
                                          ondelete='CASCADE',
                                          onupdate='CASCADE'),
                               primary_key=True))
-        columns.append(Column('key', String(255), primary_key=True))
-        columns.append(Column('value', String(255)))
+        columns.append(Column('key', String(128), primary_key=True))
+        columns.append(Column('value', String(256)))
         self.policies = Table('policy', metadata, *columns, mysql_engine='InnoDB')
         
         #create statistics table
@@ -155,12 +158,14 @@ class Node(DBWorker):
                               ForeignKey('nodes.node',
                                          ondelete='CASCADE',
                                          onupdate='CASCADE')))
-        columns.append(Column('hash', String(255)))
+        columns.append(Column('hash', String(256)))
         columns.append(Column('size', BigInteger, nullable=False, default=0))
+        columns.append(Column('type', String(256), nullable=False, default=''))
         columns.append(Column('source', Integer))
         columns.append(Column('mtime', DECIMAL(precision=16, scale=6)))
-        columns.append(Column('muser', String(255), nullable=False, default=''))
+        columns.append(Column('muser', String(256), nullable=False, default=''))
         columns.append(Column('uuid', String(64), nullable=False, default=''))
+        columns.append(Column('checksum', String(256), nullable=False, default=''))
         columns.append(Column('cluster', Integer, nullable=False, default=0))
         self.versions = Table('versions', metadata, *columns, mysql_engine='InnoDB')
         Index('idx_versions_node_mtime', self.versions.c.node, self.versions.c.mtime)
@@ -173,9 +178,9 @@ class Node(DBWorker):
                                          ondelete='CASCADE',
                                          onupdate='CASCADE'),
                               primary_key=True))
-        columns.append(Column('domain', String(255), primary_key=True))
-        columns.append(Column('key', String(255), primary_key=True))
-        columns.append(Column('value', String(255)))
+        columns.append(Column('domain', String(256), primary_key=True))
+        columns.append(Column('key', String(128), primary_key=True))
+        columns.append(Column('value', String(256)))
         self.attributes = Table('attributes', metadata, *columns, mysql_engine='InnoDB')
         
         metadata.create_all(self.engine)
@@ -229,17 +234,19 @@ class Node(DBWorker):
     def node_get_versions(self, node, keys=(), propnames=_propnames):
         """Return the properties of all versions at node.
            If keys is empty, return all properties in the order
-           (serial, node, hash, size, source, mtime, muser, uuid, cluster).
+           (serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster).
         """
         
         s = select([self.versions.c.serial,
                     self.versions.c.node,
                     self.versions.c.hash,
                     self.versions.c.size,
+                    self.versions.c.type,
                     self.versions.c.source,
                     self.versions.c.mtime,
                     self.versions.c.muser,
                     self.versions.c.uuid,
+                    self.versions.c.checksum,
                     self.versions.c.cluster], self.versions.c.node == node)
         s = s.order_by(self.versions.c.serial)
         r = self.conn.execute(s)
@@ -267,7 +274,7 @@ class Node(DBWorker):
     def node_purge_children(self, parent, before=inf, cluster=0):
         """Delete all versions with the specified
            parent and cluster, and return
-           the hashes of versions deleted.
+           the hashes and size of versions deleted.
            Clears out nodes with no remaining versions.
         """
         #update statistics
@@ -284,7 +291,7 @@ class Node(DBWorker):
         row = r.fetchone()
         r.close()
         if not row:
-            return ()
+            return (), 0
         nr, size = row[0], -row[1] if row[1] else 0
         mtime = time()
         self.statistics_update(parent, -nr, size, mtime, cluster)
@@ -312,12 +319,12 @@ class Node(DBWorker):
         s = self.nodes.delete().where(self.nodes.c.node.in_(nodes))
         self.conn.execute(s).close()
         
-        return hashes
+        return hashes, size
     
     def node_purge(self, node, before=inf, cluster=0):
         """Delete all versions with the specified
            node and cluster, and return
-           the hashes of versions deleted.
+           the hashes and size of versions deleted.
            Clears out the node if it has no remaining versions.
         """
         
@@ -334,7 +341,7 @@ class Node(DBWorker):
         nr, size = row[0], row[1]
         r.close()
         if not nr:
-            return ()
+            return (), 0
         mtime = time()
         self.statistics_update_ancestors(node, -nr, -size, mtime, cluster)
         
@@ -360,7 +367,7 @@ class Node(DBWorker):
         s = self.nodes.delete().where(self.nodes.c.node.in_(nodes))
         self.conn.execute(s).close()
         
-        return hashes
+        return hashes, size
     
     def node_remove(self, node):
         """Remove the node specified.
@@ -488,10 +495,12 @@ class Node(DBWorker):
                     self.versions.c.node,
                     self.versions.c.hash,
                     self.versions.c.size,
+                    self.versions.c.type,
                     self.versions.c.source,
                     self.versions.c.mtime,
                     self.versions.c.muser,
                     self.versions.c.uuid,
+                    self.versions.c.checksum,
                     self.versions.c.cluster])
         filtered = select([func.max(self.versions.c.serial)],
                             self.versions.c.node == node)
@@ -529,7 +538,7 @@ class Node(DBWorker):
             return (0, 0, mtime)
         
         # All children (get size and mtime).
-        # XXX: This is why the full path is stored.
+        # This is why the full path is stored.
         s = select([func.count(v.c.serial),
                     func.sum(v.c.size),
                     func.max(v.c.mtime)])
@@ -550,14 +559,14 @@ class Node(DBWorker):
         mtime = max(mtime, r[2])
         return (count, size, mtime)
     
-    def version_create(self, node, hash, size, source, muser, uuid, cluster=0):
+    def version_create(self, node, hash, size, type, source, muser, uuid, checksum, cluster=0):
         """Create a new version from the given properties.
            Return the (serial, mtime) of the new version.
         """
         
         mtime = time()
-        s = self.versions.insert().values(node=node, hash=hash, size=size, source=source,
-                                          mtime=mtime, muser=muser, uuid=uuid, cluster=cluster)
+        s = self.versions.insert().values(node=node, hash=hash, size=size, type=type, source=source,
+                                          mtime=mtime, muser=muser, uuid=uuid, checksum=checksum, cluster=cluster)
         serial = self.conn.execute(s).inserted_primary_key[0]
         self.statistics_update_ancestors(node, 1, size, mtime, cluster)
         return serial, mtime
@@ -565,14 +574,15 @@ class Node(DBWorker):
     def version_lookup(self, node, before=inf, cluster=0):
         """Lookup the current version of the given node.
            Return a list with its properties:
-           (serial, node, hash, size, source, mtime, muser, uuid, cluster)
+           (serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster)
            or None if the current version is not found in the given cluster.
         """
         
         v = self.versions.alias('v')
         s = select([v.c.serial, v.c.node, v.c.hash,
-                    v.c.size, v.c.source, v.c.mtime,
-                    v.c.muser, v.c.uuid, v.c.cluster])
+                    v.c.size, v.c.type, v.c.source,
+                    v.c.mtime, v.c.muser, v.c.uuid,
+                    v.c.checksum, v.c.cluster])
         c = select([func.max(self.versions.c.serial)],
             self.versions.c.node == node)
         if before != inf:
@@ -590,13 +600,14 @@ class Node(DBWorker):
         """Return a sequence of values for the properties of
            the version specified by serial and the keys, in the order given.
            If keys is empty, return all properties in the order
-           (serial, node, hash, size, source, mtime, muser, uuid, cluster).
+           (serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster).
         """
         
         v = self.versions.alias()
         s = select([v.c.serial, v.c.node, v.c.hash,
-                    v.c.size, v.c.source, v.c.mtime,
-                    v.c.muser, v.c.uuid, v.c.cluster], v.c.serial == serial)
+                    v.c.size, v.c.type, v.c.source,
+                    v.c.mtime, v.c.muser, v.c.uuid,
+                    v.c.checksum, v.c.cluster], v.c.serial == serial)
         rp = self.conn.execute(s)
         r = rp.fetchone()
         rp.close()
@@ -607,6 +618,16 @@ class Node(DBWorker):
             return r
         return [r[propnames[k]] for k in keys if k in propnames]
     
+    def version_put_property(self, serial, key, value):
+        """Set value for the property of version specified by key."""
+        
+        if key not in _propnames:
+            return
+        s = self.versions.update()
+        s = s.where(self.versions.c.serial == serial)
+        s = s.values(**{key: value})
+        self.conn.execute(s).close()
+    
     def version_recluster(self, serial, cluster):
         """Move the version into another cluster."""
         
@@ -644,7 +665,7 @@ class Node(DBWorker):
         
         s = self.versions.delete().where(self.versions.c.serial == serial)
         self.conn.execute(s).close()
-        return hash
+        return hash, size
     
     def attribute_get(self, serial, domain, keys=()):
         """Return a list of (key, value) pairs of the version specified by serial.
@@ -748,8 +769,11 @@ class Node(DBWorker):
         s = s.where(a.c.domain == domain)
         s = s.where(n.c.node == v.c.node)
         conj = []
-        for x in pathq:
-            conj.append(n.c.path.like(self.escape_like(x) + '%', escape='\\'))
+        for path, match in pathq:
+            if match == MATCH_PREFIX:
+                conj.append(n.c.path.like(self.escape_like(path) + '%', escape='\\'))
+            elif match == MATCH_EXACT:
+                conj.append(n.c.path == path)
         if conj:
             s = s.where(or_(*conj))
         rp = self.conn.execute(s)
@@ -759,7 +783,8 @@ class Node(DBWorker):
     
     def latest_version_list(self, parent, prefix='', delimiter=None,
                             start='', limit=10000, before=inf,
-                            except_cluster=0, pathq=[], domain=None, filterq=[], sizeq=None):
+                            except_cluster=0, pathq=[], domain=None,
+                            filterq=[], sizeq=None, all_props=False):
         """Return a (list of (path, serial) tuples, list of common prefixes)
            for the current versions of the paths with the given parent,
            matching the following criteria.
@@ -806,6 +831,8 @@ class Node(DBWorker):
            will always match.
            
            Limit applies to the first list of tuples returned.
+           
+           If all_props is True, return all properties after path, not just serial.
         """
         
         if not start or start < prefix:
@@ -814,7 +841,14 @@ class Node(DBWorker):
         
         v = self.versions.alias('v')
         n = self.nodes.alias('n')
-        s = select([n.c.path, v.c.serial]).distinct()
+        if not all_props:
+            s = select([n.c.path, v.c.serial]).distinct()
+        else:
+            s = select([n.c.path,
+                        v.c.serial, v.c.node, v.c.hash,
+                        v.c.size, v.c.type, v.c.source,
+                        v.c.mtime, v.c.muser, v.c.uuid,
+                        v.c.checksum, v.c.cluster]).distinct()
         filtered = select([func.max(self.versions.c.serial)])
         if before != inf:
             filtered = filtered.where(self.versions.c.mtime < before)
@@ -826,8 +860,11 @@ class Node(DBWorker):
         s = s.where(n.c.node == v.c.node)
         s = s.where(and_(n.c.path > bindparam('start'), n.c.path < nextling))
         conj = []
-        for x in pathq:
-            conj.append(n.c.path.like(self.escape_like(x) + '%', escape='\\'))
+        for path, match in pathq:
+            if match == MATCH_PREFIX:
+                conj.append(n.c.path.like(self.escape_like(path) + '%', escape='\\'))
+            elif match == MATCH_EXACT:
+                conj.append(n.c.path == path)
         if conj:
             s = s.where(or_(*conj))
         
@@ -882,7 +919,8 @@ class Node(DBWorker):
             props = rp.fetchone()
             if props is None:
                 break
-            path, serial = props
+            path = props[0]
+            serial = props[1]
             idx = path.find(delimiter, pfz)
             
             if idx < 0:
index 2f54aaa..2e03c95 100644 (file)
@@ -58,17 +58,39 @@ class Permissions(XFeatures, Groups, Public):
         if not members:
             return
         feature = self.xfeature_create(path)
-        if feature is None:
-            return
         self.feature_setmany(feature, access, members)
     
     def access_set(self, path, permissions):
         """Set permissions for path. The permissions dict
            maps 'read', 'write' keys to member lists."""
         
-        self.xfeature_destroy(path)
-        self.access_grant(path, READ, permissions.get('read', []))
-        self.access_grant(path, WRITE, permissions.get('write', []))
+        r = permissions.get('read', [])
+        w = permissions.get('write', [])
+        if not r and not w:
+            self.xfeature_destroy(path)
+            return
+        feature = self.xfeature_create(path)
+        if r:
+            self.feature_clear(feature, READ)
+            self.feature_setmany(feature, READ, r)
+        if w:
+            self.feature_clear(feature, WRITE)
+            self.feature_setmany(feature, WRITE, w)
+    
+    def access_get(self, path):
+        """Get permissions for path."""
+        
+        feature = self.xfeature_get(path)
+        if not feature:
+            return {}
+        permissions = self.feature_dict(feature)
+        if READ in permissions:
+            permissions['read'] = permissions[READ]
+            del(permissions[READ])
+        if WRITE in permissions:
+            permissions['write'] = permissions[WRITE]
+            del(permissions[WRITE])
+        return permissions
     
     def access_clear(self, path):
         """Revoke access to path (both permissions and public)."""
@@ -79,13 +101,9 @@ class Permissions(XFeatures, Groups, Public):
     def access_check(self, path, access, member):
         """Return true if the member has this access to the path."""
         
-        if access == READ and self.public_get(path) is not None:
-            return True
-        
-        r = self.xfeature_inherit(path)
-        if not r:
+        feature = self.xfeature_get(path)
+        if not feature:
             return False
-        fpath, feature = r
         members = self.feature_get(feature, access)
         if member in members or '*' in members:
             return True
@@ -95,25 +113,23 @@ class Permissions(XFeatures, Groups, Public):
         return False
     
     def access_inherit(self, path):
-        """Return the inherited or assigned (path, permissions) pair for path."""
+        """Return the paths influencing the access for path."""
         
-        r = self.xfeature_inherit(path)
-        if not r:
-            return (path, {})
-        fpath, feature = r
-        permissions = self.feature_dict(feature)
-        if READ in permissions:
-            permissions['read'] = permissions[READ]
-            del(permissions[READ])
-        if WRITE in permissions:
-            permissions['write'] = permissions[WRITE]
-            del(permissions[WRITE])
-        return (fpath, permissions)
-    
-    def access_list(self, path):
-        """List all permission paths inherited by or inheriting from path."""
+#         r = self.xfeature_inherit(path)
+#         if not r:
+#             return []
+#         # Compute valid.
+#         return [x[0] for x in r if x[0] in valid]
         
-        return [x[0] for x in self.xfeature_list(path) if x[0] != path]
+        # Only keep path components.
+        parts = path.rstrip('/').split('/')
+        valid = []
+        for i in range(1, len(parts)):
+            subp = '/'.join(parts[:i + 1])
+            valid.append(subp)
+            if subp != path:
+                valid.append(subp + '/')
+        return [x for x in valid if self.xfeature_get(x)]
     
     def access_list_paths(self, member, prefix=None):
         """Return the list of paths granted to member."""
index 3618961..25bf0f3 100644 (file)
@@ -85,6 +85,15 @@ class Public(DBWorker):
             return row[0]
         return None
     
+    def public_list(self, prefix):
+        s = select([self.public.c.path, self.public.c.public_id])
+        s = s.where(self.public.c.path.like(self.escape_like(prefix) + '%', escape='\\'))
+        s = s.where(self.public.c.active == True)
+        r = self.conn.execute(s)
+        rows = r.fetchall()
+        r.close()
+        return rows
+    
     def public_path(self, public):
         s = select([self.public.c.path])
         s = s.where(and_(self.public.c.public_id == public,
index 8fca110..d9ebd66 100644 (file)
@@ -62,58 +62,44 @@ class XFeatures(DBWorker):
                               primary_key=True))
         columns.append(Column('key', Integer, primary_key=True,
                               autoincrement=False))
-        columns.append(Column('value', String(255), primary_key=True))
+        columns.append(Column('value', String(256), primary_key=True))
         self.xfeaturevals = Table('xfeaturevals', metadata, *columns, mysql_engine='InnoDB')
         
         metadata.create_all(self.engine)
     
-    def xfeature_inherit(self, path):
-        """Return the (path, feature) inherited by the path, or None."""
-        
-        s = select([self.xfeatures.c.path, self.xfeatures.c.feature_id])
-        s = s.where(self.xfeatures.c.path <= path)
-        s = s.order_by(desc(self.xfeatures.c.path)).limit(1)
-        r = self.conn.execute(s)
-        row = r.fetchone()
-        r.close()
-        if row and path.startswith(row[0]):
-            return row
-        else:
-            return None
+#     def xfeature_inherit(self, path):
+#         """Return the (path, feature) inherited by the path, or None."""
+#         
+#         s = select([self.xfeatures.c.path, self.xfeatures.c.feature_id])
+#         s = s.where(self.xfeatures.c.path <= path)
+#         #s = s.where(self.xfeatures.c.path.like(self.escape_like(path) + '%', escape='\\')) # XXX: Implement reverse and escape like...
+#         s = s.order_by(desc(self.xfeatures.c.path))
+#         r = self.conn.execute(s)
+#         l = r.fetchall()
+#         r.close()
+#         return l
     
-    def xfeature_list(self, path):
-        """Return the list of the (prefix, feature) pairs matching path.
-           A prefix matches path if either the prefix includes the path,
-           or the path includes the prefix.
-        """
-        
-        inherited = self.xfeature_inherit(path)
-        if inherited:
-            return [inherited]
+    def xfeature_get(self, path):
+        """Return feature for path."""
         
-        s = select([self.xfeatures.c.path, self.xfeatures.c.feature_id])
-        s = s.where(and_(self.xfeatures.c.path.like(self.escape_like(path) + '%', escape='\\'),
-                     self.xfeatures.c.path != path))
+        s = select([self.xfeatures.c.feature_id])
+        s = s.where(self.xfeatures.c.path == path)
         s = s.order_by(self.xfeatures.c.path)
         r = self.conn.execute(s)
-        l = r.fetchall()
+        row = r.fetchone()
         r.close()
-        return l
+        if row:
+            return row[0]
+        return None
     
     def xfeature_create(self, path):
         """Create and return a feature for path.
-           If the path already inherits a feature or
-           bestows to paths already inheriting a feature,
-           create no feature and return None.
            If the path has a feature, return it.
         """
         
-        prefixes = self.xfeature_list(path)
-        pl = len(prefixes)
-        if (pl > 1) or (pl == 1 and prefixes[0][0] != path):
-            return None
-        if pl == 1 and prefixes[0][0] == path:
-            return prefixes[0][1]
+        feature = self.xfeature_get(path)
+        if feature is not None:
+            return feature
         s = self.xfeatures.insert()
         r = self.conn.execute(s, path=path)
         inserted_primary_key = r.inserted_primary_key[0]
index 49ada08..d96382f 100644 (file)
 # or implied, of GRNET S.A.
 
 from dbwrapper import DBWrapper
-from node import Node, ROOTNODE, SERIAL, HASH, SIZE, MTIME, MUSER, UUID, CLUSTER
+from node import Node, ROOTNODE, SERIAL, HASH, SIZE, TYPE, MTIME, MUSER, UUID, CHECKSUM, CLUSTER, MATCH_PREFIX, MATCH_EXACT
 from permissions import Permissions, READ, WRITE
 
 __all__ = ["DBWrapper",
-           "Node", "ROOTNODE", "SERIAL", "HASH", "SIZE", "MTIME", "MUSER", "UUID", "CLUSTER",
+           "Node", "ROOTNODE", "SERIAL", "HASH", "SIZE", "TYPE", "MTIME", "MUSER", "UUID", "CHECKSUM", "CLUSTER", "MATCH_PREFIX", "MATCH_EXACT",
            "Permissions", "READ", "WRITE"]
 
index 752c3c0..e45a967 100644 (file)
@@ -40,7 +40,9 @@ from pithos.lib.filter import parse_filters
 
 ROOTNODE  = 0
 
-( SERIAL, NODE, HASH, SIZE, SOURCE, MTIME, MUSER, UUID, CLUSTER ) = range(9)
+( SERIAL, NODE, HASH, SIZE, TYPE, SOURCE, MTIME, MUSER, UUID, CHECKSUM, CLUSTER ) = range(11)
+
+( MATCH_PREFIX, MATCH_EXACT ) = range(2)
 
 inf = float('inf')
 
@@ -85,11 +87,13 @@ _propnames = {
     'node'      : 1,
     'hash'      : 2,
     'size'      : 3,
-    'source'    : 4,
-    'mtime'     : 5,
-    'muser'     : 6,
-    'uuid'      : 7,
-    'cluster'   : 8
+    'type'      : 4,
+    'source'    : 5,
+    'mtime'     : 6,
+    'muser'     : 7,
+    'uuid'      : 8,
+    'checksum'  : 9,
+    'cluster'   : 10
 }
 
 
@@ -145,10 +149,12 @@ class Node(DBWorker):
                             node       integer,
                             hash       text,
                             size       integer not null default 0,
+                            type       text    not null default '',
                             source     integer,
                             mtime      integer,
                             muser      text    not null default '',
                             uuid       text    not null default '',
+                            checksum   text    not null default '',
                             cluster    integer not null default 0,
                             foreign key (node)
                             references nodes(node)
@@ -207,10 +213,10 @@ class Node(DBWorker):
     def node_get_versions(self, node, keys=(), propnames=_propnames):
         """Return the properties of all versions at node.
            If keys is empty, return all properties in the order
-           (serial, node, hash, size, source, mtime, muser, uuid, cluster).
+           (serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster).
         """
         
-        q = ("select serial, node, hash, size, source, mtime, muser, uuid, cluster "
+        q = ("select serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster "
              "from versions "
              "where node = ?")
         self.execute(q, (node,))
@@ -235,7 +241,7 @@ class Node(DBWorker):
     def node_purge_children(self, parent, before=inf, cluster=0):
         """Delete all versions with the specified
            parent and cluster, and return
-           the hashes of versions deleted.
+           the hashes and size of versions deleted.
            Clears out nodes with no remaining versions.
         """
         
@@ -250,7 +256,7 @@ class Node(DBWorker):
         execute(q, args)
         nr, size = self.fetchone()
         if not nr:
-            return ()
+            return (), 0
         mtime = time()
         self.statistics_update(parent, -nr, -size, mtime, cluster)
         self.statistics_update_ancestors(parent, -nr, -size, mtime, cluster)
@@ -277,12 +283,12 @@ class Node(DBWorker):
                                    "where node = n.node) = 0 "
                             "and parent = ?)")
         execute(q, (parent,))
-        return hashes
+        return hashes, size
     
     def node_purge(self, node, before=inf, cluster=0):
         """Delete all versions with the specified
            node and cluster, and return
-           the hashes of versions deleted.
+           the hashes and size of versions deleted.
            Clears out the node if it has no remaining versions.
         """
         
@@ -295,7 +301,7 @@ class Node(DBWorker):
         execute(q, args)
         nr, size = self.fetchone()
         if not nr:
-            return ()
+            return (), 0
         mtime = time()
         self.statistics_update_ancestors(node, -nr, -size, mtime, cluster)
         
@@ -317,7 +323,7 @@ class Node(DBWorker):
                                    "where node = n.node) = 0 "
                             "and node = ?)")
         execute(q, (node,))
-        return hashes
+        return hashes, size
     
     def node_remove(self, node):
         """Remove the node specified.
@@ -413,7 +419,7 @@ class Node(DBWorker):
         parent, path = props
         
         # The latest version.
-        q = ("select serial, node, hash, size, source, mtime, muser, uuid, cluster "
+        q = ("select serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster "
              "from versions "
              "where serial = (select max(serial) "
                              "from versions "
@@ -445,7 +451,7 @@ class Node(DBWorker):
             return (0, 0, mtime)
         
         # All children (get size and mtime).
-        # XXX: This is why the full path is stored.
+        # This is why the full path is stored.
         q = ("select count(serial), sum(size), max(mtime) "
              "from versions v "
              "where serial = (select max(serial) "
@@ -463,15 +469,15 @@ class Node(DBWorker):
         mtime = max(mtime, r[2])
         return (count, size, mtime)
     
-    def version_create(self, node, hash, size, source, muser, uuid, cluster=0):
+    def version_create(self, node, hash, size, type, source, muser, uuid, checksum, cluster=0):
         """Create a new version from the given properties.
            Return the (serial, mtime) of the new version.
         """
         
-        q = ("insert into versions (node, hash, size, source, mtime, muser, uuid, cluster) "
-             "values (?, ?, ?, ?, ?, ?, ?, ?)")
+        q = ("insert into versions (node, hash, size, type, source, mtime, muser, uuid, checksum, cluster) "
+             "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
         mtime = time()
-        props = (node, hash, size, source, mtime, muser, uuid, cluster)
+        props = (node, hash, size, type, source, mtime, muser, uuid, checksum, cluster)
         serial = self.execute(q, props).lastrowid
         self.statistics_update_ancestors(node, 1, size, mtime, cluster)
         return serial, mtime
@@ -479,11 +485,11 @@ class Node(DBWorker):
     def version_lookup(self, node, before=inf, cluster=0):
         """Lookup the current version of the given node.
            Return a list with its properties:
-           (serial, node, hash, size, source, mtime, muser, uuid, cluster)
+           (serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster)
            or None if the current version is not found in the given cluster.
         """
         
-        q = ("select serial, node, hash, size, source, mtime, muser, uuid, cluster "
+        q = ("select serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster "
              "from versions "
              "where serial = (select max(serial) "
                              "from versions "
@@ -499,10 +505,10 @@ class Node(DBWorker):
         """Return a sequence of values for the properties of
            the version specified by serial and the keys, in the order given.
            If keys is empty, return all properties in the order
-           (serial, node, hash, size, source, mtime, muser, uuid, cluster).
+           (serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster).
         """
         
-        q = ("select serial, node, hash, size, source, mtime, muser, uuid, cluster "
+        q = ("select serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster "
              "from versions "
              "where serial = ?")
         self.execute(q, (serial,))
@@ -514,6 +520,14 @@ class Node(DBWorker):
             return r
         return [r[propnames[k]] for k in keys if k in propnames]
     
+    def version_put_property(self, serial, key, value):
+        """Set value for the property of version specified by key."""
+        
+        if key not in _propnames:
+            return
+        q = "update versions set %s = ? where serial = ?" % key
+        self.execute(q, (value, serial))
+    
     def version_recluster(self, serial, cluster):
         """Move the version into another cluster."""
         
@@ -549,7 +563,7 @@ class Node(DBWorker):
         
         q = "delete from versions where serial = ?"
         self.execute(q, (serial,))
-        return hash
+        return hash, size
     
     def attribute_get(self, serial, domain, keys=()):
         """Return a list of (key, value) pairs of the version specified by serial.
@@ -640,10 +654,19 @@ class Node(DBWorker):
         if not pathq:
             return None, None
         
-        subq = " and ("
-        subq += ' or '.join(("n.path like ? escape '\\'" for x in pathq))
-        subq += ")"
-        args = tuple([self.escape_like(x) + '%' for x in pathq])
+        subqlist = []
+        args = []
+        print pathq
+        for path, match in pathq:
+            if match == MATCH_PREFIX:
+                subqlist.append("n.path like ? escape '\\'")
+                args.append(self.escape_like(path) + '%')
+            elif match == MATCH_EXACT:
+                subqlist.append("n.path = ?")
+                args.append(path)
+        
+        subq = ' and (' + ' or '.join(subqlist) + ')'
+        args = tuple(args)
         
         return subq, args
     
@@ -691,7 +714,8 @@ class Node(DBWorker):
     
     def latest_version_list(self, parent, prefix='', delimiter=None,
                             start='', limit=10000, before=inf,
-                            except_cluster=0, pathq=[], domain=None, filterq=[], sizeq=None):
+                            except_cluster=0, pathq=[], domain=None,
+                            filterq=[], sizeq=None, all_props=False):
         """Return a (list of (path, serial) tuples, list of common prefixes)
            for the current versions of the paths with the given parent,
            matching the following criteria.
@@ -738,6 +762,8 @@ class Node(DBWorker):
            will always match.
            
            Limit applies to the first list of tuples returned.
+           
+           If all_props is True, return all properties after path, not just serial.
         """
         
         execute = self.execute
@@ -746,7 +772,7 @@ class Node(DBWorker):
             start = strprevling(prefix)
         nextling = strnextling(prefix)
         
-        q = ("select distinct n.path, v.serial "
+        q = ("select distinct n.path, %s "
              "from versions v, nodes n "
              "where v.serial = (select max(serial) "
                                "from versions "
@@ -757,6 +783,10 @@ class Node(DBWorker):
                             "where parent = ?) "
              "and n.node = v.node "
              "and n.path > ? and n.path < ?")
+        if not all_props:
+            q = q % "v.serial"
+        else:
+            q = q % "v.serial, v.node, v.hash, v.size, v.type, v.source, v.mtime, v.muser, v.uuid, v.checksum, v.cluster"
         args = [before, except_cluster, parent, start, nextling]
         
         subq, subargs = self._construct_paths(pathq)
@@ -796,7 +826,8 @@ class Node(DBWorker):
             props = fetchone()
             if props is None:
                 break
-            path, serial = props
+            path = props[0]
+            serial = props[1]
             idx = path.find(delimiter, pfz)
             
             if idx < 0:
index d5b3434..476aaf2 100644 (file)
@@ -55,17 +55,39 @@ class Permissions(XFeatures, Groups, Public):
         if not members:
             return
         feature = self.xfeature_create(path)
-        if feature is None:
-            return
         self.feature_setmany(feature, access, members)
     
     def access_set(self, path, permissions):
         """Set permissions for path. The permissions dict
            maps 'read', 'write' keys to member lists."""
         
-        self.xfeature_destroy(path)
-        self.access_grant(path, READ, permissions.get('read', []))
-        self.access_grant(path, WRITE, permissions.get('write', []))
+        r = permissions.get('read', [])
+        w = permissions.get('write', [])
+        if not r and not w:
+            self.xfeature_destroy(path)
+            return
+        feature = self.xfeature_create(path)
+        if r:
+            self.feature_clear(feature, READ)
+            self.feature_setmany(feature, READ, r)
+        if w:
+            self.feature_clear(feature, WRITE)
+            self.feature_setmany(feature, WRITE, w)
+    
+    def access_get(self, path):
+        """Get permissions for path."""
+        
+        feature = self.xfeature_get(path)
+        if not feature:
+            return {}
+        permissions = self.feature_dict(feature)
+        if READ in permissions:
+            permissions['read'] = permissions[READ]
+            del(permissions[READ])
+        if WRITE in permissions:
+            permissions['write'] = permissions[WRITE]
+            del(permissions[WRITE])
+        return permissions
     
     def access_clear(self, path):
         """Revoke access to path (both permissions and public)."""
@@ -76,13 +98,9 @@ class Permissions(XFeatures, Groups, Public):
     def access_check(self, path, access, member):
         """Return true if the member has this access to the path."""
         
-        if access == READ and self.public_get(path) is not None:
-            return True
-        
-        r = self.xfeature_inherit(path)
-        if not r:
+        feature = self.xfeature_get(path)
+        if not feature:
             return False
-        fpath, feature = r
         members = self.feature_get(feature, access)
         if member in members or '*' in members:
             return True
@@ -92,25 +110,23 @@ class Permissions(XFeatures, Groups, Public):
         return False
     
     def access_inherit(self, path):
-        """Return the inherited or assigned (path, permissions) pair for path."""
+        """Return the paths influencing the access for path."""
         
-        r = self.xfeature_inherit(path)
-        if not r:
-            return (path, {})
-        fpath, feature = r
-        permissions = self.feature_dict(feature)
-        if READ in permissions:
-            permissions['read'] = permissions[READ]
-            del(permissions[READ])
-        if WRITE in permissions:
-            permissions['write'] = permissions[WRITE]
-            del(permissions[WRITE])
-        return (fpath, permissions)
-    
-    def access_list(self, path):
-        """List all permission paths inherited by or inheriting from path."""
+#         r = self.xfeature_inherit(path)
+#         if not r:
+#             return []
+#         # Compute valid.
+#         return [x[0] for x in r if x[0] in valid]
         
-        return [x[0] for x in self.xfeature_list(path) if x[0] != path]
+        # Only keep path components.
+        parts = path.rstrip('/').split('/')
+        valid = []
+        for i in range(1, len(parts)):
+            subp = '/'.join(parts[:i + 1])
+            valid.append(subp)
+            if subp != path:
+                valid.append(subp + '/')
+        return [x for x in valid if self.xfeature_get(x)]
     
     def access_list_paths(self, member, prefix=None):
         """Return the list of paths granted to member."""
index eec19b5..2d6e9cb 100644 (file)
@@ -66,6 +66,11 @@ class Public(DBWorker):
             return row[0]
         return None
     
+    def public_list(self, prefix):
+        q = "select path, public_id from public where path like ? escape '\\' and active = 1"
+        self.execute(q, (self.escape_like(prefix) + '%',))
+        return self.fetchall()
+    
     def public_path(self, public):
         q = "select path from public where public_id = ? and active = 1"
         self.execute(q, (public,))
index 9f4f885..7d682d0 100644 (file)
@@ -61,47 +61,34 @@ class XFeatures(DBWorker):
                             foreign key (feature_id) references xfeatures(feature_id)
                             on delete cascade ) """)
     
-    def xfeature_inherit(self, path):
-        """Return the (path, feature) inherited by the path, or None."""
+#     def xfeature_inherit(self, path):
+#         """Return the (path, feature) inherited by the path, or None."""
+#         
+#         q = ("select path, feature_id from xfeatures "
+#              "where path <= ? "
+#              "and ? like path || '%' " # XXX: Escape like...
+#              "order by path desc")
+#         self.execute(q, (path, path))
+#         return self.fetchall()
+    
+    def xfeature_get(self, path):
+        """Return feature for path."""
         
-        q = ("select path, feature_id from xfeatures "
-             "where path <= ? "
-             "order by path desc limit 1")
+        q = "select feature_id from xfeatures where path = ?"
         self.execute(q, (path,))
         r = self.fetchone()
-        if r is not None and path.startswith(r[0]):
-            return r
+        if r is not None:
+            return r[0]
         return None
     
-    def xfeature_list(self, path):
-        """Return the list of the (prefix, feature) pairs matching path.
-           A prefix matches path if either the prefix includes the path,
-           or the path includes the prefix.
-        """
-        
-        inherited = self.xfeature_inherit(path)
-        if inherited:
-            return [inherited]
-        
-        q = ("select path, feature_id from xfeatures "
-             "where path like ? escape '\\' and path != ? order by path")
-        self.execute(q, (self.escape_like(path) + '%', path,))
-        return self.fetchall()
-    
     def xfeature_create(self, path):
         """Create and return a feature for path.
-           If the path already inherits a feature or
-           bestows to paths already inheriting a feature,
-           create no feature and return None.
            If the path has a feature, return it.
         """
         
-        prefixes = self.xfeature_list(path)
-        pl = len(prefixes)
-        if (pl > 1) or (pl == 1 and prefixes[0][0] != path):
-            return None
-        if pl == 1 and prefixes[0][0] == path:
-            return prefixes[0][1]
+        feature = self.xfeature_get(path)
+        if feature is not None:
+            return feature
         q = "insert into xfeatures (path) values (?)"
         id = self.execute(q, (path,)).lastrowid
         return id
index 950c6db..9c1fdac 100644 (file)
@@ -47,6 +47,11 @@ DEFAULT_DB_MODULE = 'pithos.backends.lib.sqlalchemy'
 DEFAULT_DB_CONNECTION = 'sqlite:///backend.db'
 DEFAULT_BLOCK_MODULE = 'pithos.backends.lib.hashfiler'
 DEFAULT_BLOCK_PATH = 'data/'
+#DEFAULT_QUEUE_MODULE = 'pithos.backends.lib.rabbitmq'
+#DEFAULT_QUEUE_CONNECTION = 'rabbitmq://guest:guest@localhost:5672/pithos'
+
+QUEUE_MESSAGE_KEY = '#'
+QUEUE_CLIENT_ID = 2 # Pithos.
 
 ( CLUSTER_NORMAL, CLUSTER_HISTORY, CLUSTER_DELETED ) = range(3)
 
@@ -84,39 +89,60 @@ class ModularBackend(BaseBackend):
     Uses modules for SQL functions and storage.
     """
     
-    def __init__(self, db_module=None, db_connection=None, block_module=None, block_path=None):
+    def __init__(self, db_module=None, db_connection=None,
+                 block_module=None, block_path=None,
+                 queue_module=None, queue_connection=None):
         db_module = db_module or DEFAULT_DB_MODULE
         db_connection = db_connection or DEFAULT_DB_CONNECTION
         block_module = block_module or DEFAULT_BLOCK_MODULE
         block_path = block_path or DEFAULT_BLOCK_PATH
+        #queue_module = queue_module or DEFAULT_QUEUE_MODULE
+        #queue_connection = queue_connection or DEFAULT_QUEUE_CONNECTION
         
         self.hash_algorithm = 'sha256'
         self.block_size = 4 * 1024 * 1024 # 4MB
         
         self.default_policy = {'quota': DEFAULT_QUOTA, 'versioning': DEFAULT_VERSIONING}
         
-        __import__(db_module)
-        self.db_module = sys.modules[db_module]
-        self.wrapper = self.db_module.DBWrapper(db_connection)
+        def load_module(m):
+            __import__(m)
+            return sys.modules[m]
         
+        self.db_module = load_module(db_module)
+        self.wrapper = self.db_module.DBWrapper(db_connection)
         params = {'wrapper': self.wrapper}
         self.permissions = self.db_module.Permissions(**params)
         for x in ['READ', 'WRITE']:
             setattr(self, x, getattr(self.db_module, x))
         self.node = self.db_module.Node(**params)
-        for x in ['ROOTNODE', 'SERIAL', 'HASH', 'SIZE', 'MTIME', 'MUSER', 'UUID', 'CLUSTER']:
+        for x in ['ROOTNODE', 'SERIAL', 'HASH', 'SIZE', 'TYPE', 'MTIME', 'MUSER', 'UUID', 'CHECKSUM', 'CLUSTER', 'MATCH_PREFIX', 'MATCH_EXACT']:
             setattr(self, x, getattr(self.db_module, x))
         
-        __import__(block_module)
-        self.block_module = sys.modules[block_module]
-        
+        self.block_module = load_module(block_module)
         params = {'path': block_path,
                   'block_size': self.block_size,
                   'hash_algorithm': self.hash_algorithm}
         self.store = self.block_module.Store(**params)
+
+        if queue_module and queue_connection:
+            self.queue_module = load_module(queue_module)
+            params = {'exchange': queue_connection,
+                      'message_key': QUEUE_MESSAGE_KEY,
+                      'client_id': QUEUE_CLIENT_ID}
+            self.queue = self.queue_module.Queue(**params)
+        else:
+            class NoQueue:
+                def send(self, *args):
+                    pass
+                
+                def close(self):
+                    pass
+            
+            self.queue = NoQueue()
     
     def close(self):
         self.wrapper.close()
+        self.queue.close()
     
     @backend_method
     def list_accounts(self, user, marker=None, limit=10000):
@@ -128,7 +154,7 @@ class ModularBackend(BaseBackend):
         return allowed[start:start + limit]
     
     @backend_method
-    def get_account_meta(self, user, account, domain, until=None):
+    def get_account_meta(self, user, account, domain, until=None, include_user_defined=True):
         """Return a dictionary with the account metadata for the domain."""
         
         logger.debug("get_account_meta: %s %s %s", account, domain, until)
@@ -154,7 +180,7 @@ class ModularBackend(BaseBackend):
             meta = {'name': account}
         else:
             meta = {}
-            if props is not None:
+            if props is not None and include_user_defined:
                 meta.update(dict(self.node.attribute_get(props[self.SERIAL], domain)))
             if until is not None:
                 meta.update({'until_timestamp': tstamp})
@@ -270,10 +296,27 @@ class ModularBackend(BaseBackend):
             start, limit = self._list_limits(allowed, marker, limit)
             return allowed[start:start + limit]
         node = self.node.node_lookup(account)
-        return [x[0] for x in self._list_objects(node, account, '', '/', marker, limit, False, None, [], until)]
+        return [x[0] for x in self._list_object_properties(node, account, '', '/', marker, limit, False, None, [], until)]
+    
+    @backend_method
+    def list_container_meta(self, user, account, container, domain, until=None):
+        """Return a list with all the container's object meta keys for the domain."""
+        
+        logger.debug("list_container_meta: %s %s %s %s", account, container, domain, until)
+        allowed = []
+        if user != account:
+            if until:
+                raise NotAllowedError
+            allowed = self.permissions.access_list_paths(user, '/'.join((account, container)))
+            if not allowed:
+                raise NotAllowedError
+        path, node = self._lookup_container(account, container)
+        before = until if until is not None else inf
+        allowed = self._get_formatted_paths(allowed)
+        return self.node.latest_attribute_keys(node, domain, before, CLUSTER_DELETED, allowed)
     
     @backend_method
-    def get_container_meta(self, user, account, container, domain, until=None):
+    def get_container_meta(self, user, account, container, domain, until=None, include_user_defined=True):
         """Return a dictionary with the container metadata for the domain."""
         
         logger.debug("get_container_meta: %s %s %s %s", account, container, domain, until)
@@ -294,7 +337,9 @@ class ModularBackend(BaseBackend):
         if user != account:
             meta = {'name': container}
         else:
-            meta = dict(self.node.attribute_get(props[self.SERIAL], domain))
+            meta = {}
+            if include_user_defined:
+                meta.update(dict(self.node.attribute_get(props[self.SERIAL], domain)))
             if until is not None:
                 meta.update({'until_timestamp': tstamp})
             meta.update({'name': container, 'count': count, 'bytes': bytes})
@@ -363,58 +408,95 @@ class ModularBackend(BaseBackend):
         path, node = self._lookup_container(account, container)
         
         if until is not None:
-            hashes = self.node.node_purge_children(node, until, CLUSTER_HISTORY)
+            hashes, size = self.node.node_purge_children(node, until, CLUSTER_HISTORY)
             for h in hashes:
                 self.store.map_delete(h)
             self.node.node_purge_children(node, until, CLUSTER_DELETED)
+            self._report_size_change(user, account, -size, {'action': 'container purge'})
             return
         
         if self._get_statistics(node)[0] > 0:
             raise IndexError('Container is not empty')
-        hashes = self.node.node_purge_children(node, inf, CLUSTER_HISTORY)
+        hashes, size = self.node.node_purge_children(node, inf, CLUSTER_HISTORY)
         for h in hashes:
             self.store.map_delete(h)
         self.node.node_purge_children(node, inf, CLUSTER_DELETED)
         self.node.node_remove(node)
+        self._report_size_change(user, account, -size, {'action': 'container delete'})
     
-    @backend_method
-    def list_objects(self, user, account, container, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], shared=False, until=None, size_range=None):
-        """Return a list of objects existing under a container."""
-        
-        logger.debug("list_objects: %s %s %s %s %s %s %s %s %s %s %s", account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until)
+    def _list_objects(self, user, account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range, all_props):
+        if user != account and until:
+            raise NotAllowedError
+        allowed = self._list_object_permissions(user, account, container, prefix, shared)
+        if shared and not allowed:
+            return []
+        path, node = self._lookup_container(account, container)
+        allowed = self._get_formatted_paths(allowed)
+        return self._list_object_properties(node, path, prefix, delimiter, marker, limit, virtual, domain, keys, until, size_range, allowed, all_props)
+    
+    def _list_object_permissions(self, user, account, container, prefix, shared):
         allowed = []
+        path = '/'.join((account, container, prefix)).rstrip('/')
         if user != account:
-            if until:
-                raise NotAllowedError
-            allowed = self.permissions.access_list_paths(user, '/'.join((account, container)))
+            allowed = self.permissions.access_list_paths(user, path)
             if not allowed:
                 raise NotAllowedError
         else:
             if shared:
-                allowed = self.permissions.access_list_shared('/'.join((account, container)))
+                allowed = self.permissions.access_list_shared(path)
                 if not allowed:
                     return []
-        path, node = self._lookup_container(account, container)
-        return self._list_objects(node, path, prefix, delimiter, marker, limit, virtual, domain, keys, until, size_range, allowed)
+        return allowed
     
     @backend_method
-    def list_object_meta(self, user, account, container, domain, until=None):
-        """Return a list with all the container's object meta keys for the domain."""
+    def list_objects(self, user, account, container, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], shared=False, until=None, size_range=None):
+        """Return a list of object (name, version_id) tuples existing under a container."""
         
-        logger.debug("list_object_meta: %s %s %s %s", account, container, domain, until)
-        allowed = []
-        if user != account:
-            if until:
-                raise NotAllowedError
-            allowed = self.permissions.access_list_paths(user, '/'.join((account, container)))
-            if not allowed:
-                raise NotAllowedError
-        path, node = self._lookup_container(account, container)
-        before = until if until is not None else inf
-        return self.node.latest_attribute_keys(node, domain, before, CLUSTER_DELETED, allowed)
+        logger.debug("list_objects: %s %s %s %s %s %s %s %s %s %s %s %s", account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range)
+        return self._list_objects(user, account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range, False)
+    
+    @backend_method
+    def list_object_meta(self, user, account, container, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], shared=False, until=None, size_range=None):
+        """Return a list of object metadata dicts existing under a container."""
+        
+        logger.debug("list_object_meta: %s %s %s %s %s %s %s %s %s %s %s %s", account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range)
+        props = self._list_objects(user, account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range, True)
+        objects = []
+        for p in props:
+            if len(p) == 2:
+                objects.append({'subdir': p[0]})
+            else:
+                objects.append({'name': p[0],
+                                'bytes': p[self.SIZE + 1],
+                                'type': p[self.TYPE + 1],
+                                'hash': p[self.HASH + 1],
+                                'version': p[self.SERIAL + 1],
+                                'version_timestamp': p[self.MTIME + 1],
+                                'modified': p[self.MTIME + 1] if until is None else None,
+                                'modified_by': p[self.MUSER + 1],
+                                'uuid': p[self.UUID + 1],
+                                'checksum': p[self.CHECKSUM + 1]})
+        return objects
+    
+    @backend_method
+    def list_object_permissions(self, user, account, container, prefix=''):
+        """Return a list of paths that enforce permissions under a container."""
+        
+        logger.debug("list_object_permissions: %s %s %s", account, container, prefix)
+        return self._list_object_permissions(user, account, container, prefix, True)
     
     @backend_method
-    def get_object_meta(self, user, account, container, name, domain, version=None):
+    def list_object_public(self, user, account, container, prefix=''):
+        """Return a dict mapping paths to public ids for objects that are public under a container."""
+        
+        logger.debug("list_object_public: %s %s %s", account, container, prefix)
+        public = {}
+        for path, p in self.permissions.public_list('/'.join((account, container, prefix))):
+            public[path] = p + ULTIMATE_ANSWER
+        return public
+    
+    @backend_method
+    def get_object_meta(self, user, account, container, name, domain, version=None, include_user_defined=True):
         """Return a dictionary with the object metadata for the domain."""
         
         logger.debug("get_object_meta: %s %s %s %s %s", account, container, name, domain, version)
@@ -432,10 +514,19 @@ class ModularBackend(BaseBackend):
                     raise NameError('Object does not exist')
                 modified = del_props[self.MTIME]
         
-        meta = dict(self.node.attribute_get(props[self.SERIAL], domain))
-        meta.update({'name': name, 'bytes': props[self.SIZE], 'hash':props[self.HASH]})
-        meta.update({'version': props[self.SERIAL], 'version_timestamp': props[self.MTIME]})
-        meta.update({'modified': modified, 'modified_by': props[self.MUSER], 'uuid': props[self.UUID]})
+        meta = {}
+        if include_user_defined:
+            meta.update(dict(self.node.attribute_get(props[self.SERIAL], domain)))
+        meta.update({'name': name,
+                     'bytes': props[self.SIZE],
+                     'type': props[self.TYPE],
+                     'hash': props[self.HASH],
+                     'version': props[self.SERIAL],
+                     'version_timestamp': props[self.MTIME],
+                     'modified': modified,
+                     'modified_by': props[self.MUSER],
+                     'uuid': props[self.UUID],
+                     'checksum': props[self.CHECKSUM]})
         return meta
     
     @backend_method
@@ -457,16 +548,16 @@ class ModularBackend(BaseBackend):
         
         logger.debug("get_object_permissions: %s %s %s", account, container, name)
         allowed = 'write'
+        permissions_path = self._get_permissions_path(account, container, name)
         if user != account:
-            path = '/'.join((account, container, name))
-            if self.permissions.access_check(path, self.WRITE, user):
+            if self.permissions.access_check(permissions_path, self.WRITE, user):
                 allowed = 'write'
-            elif self.permissions.access_check(path, self.READ, user):
+            elif self.permissions.access_check(permissions_path, self.READ, user):
                 allowed = 'read'
             else:
                 raise NotAllowedError
-        path = self._lookup_object(account, container, name)[0]
-        return (allowed,) + self.permissions.access_inherit(path)
+        self._lookup_object(account, container, name)
+        return (allowed, permissions_path, self.permissions.access_get(permissions_path))
     
     @backend_method
     def update_object_permissions(self, user, account, container, name, permissions):
@@ -514,7 +605,7 @@ class ModularBackend(BaseBackend):
         hashmap = self.store.map_get(binascii.unhexlify(props[self.HASH]))
         return props[self.SIZE], [binascii.hexlify(x) for x in hashmap]
     
-    def _update_object_hash(self, user, account, container, name, size, hash, permissions, src_node=None, is_copy=False):
+    def _update_object_hash(self, user, account, container, name, size, type, hash, checksum, permissions, src_node=None, is_copy=False):
         if permissions is not None and user != account:
             raise NotAllowedError
         self._can_write(user, account, container, name)
@@ -525,14 +616,11 @@ class ModularBackend(BaseBackend):
         account_path, account_node = self._lookup_account(account, True)
         container_path, container_node = self._lookup_container(account, container)
         path, node = self._put_object_node(container_path, container_node, name)
-        pre_version_id, dest_version_id = self._put_version_duplicate(user, node, src_node=src_node, size=size, hash=hash, is_copy=is_copy)
+        pre_version_id, dest_version_id = self._put_version_duplicate(user, node, src_node=src_node, size=size, type=type, hash=hash, checksum=checksum, is_copy=is_copy)
         
         # Check quota.
-        versioning = self._get_policy(container_node)['versioning']
-        if versioning != 'auto':
-            size_delta = size - 0 # TODO: Get previous size.
-        else:
-            size_delta = size
+        del_size = self._apply_versioning(account, container, pre_version_id)
+        size_delta = size - del_size
         if size_delta > 0:
             account_quota = long(self._get_policy(account_node)['quota'])
             container_quota = long(self._get_policy(container_node)['quota'])
@@ -540,17 +628,17 @@ class ModularBackend(BaseBackend):
                (container_quota > 0 and self._get_statistics(container_node)[1] + size_delta > container_quota):
                 # This must be executed in a transaction, so the version is never created if it fails.
                 raise QuotaError
+        self._report_size_change(user, account, size_delta, {'action': 'object update'})
         
         if permissions is not None:
             self.permissions.access_set(path, permissions)
-        self._apply_versioning(account, container, pre_version_id)
         return pre_version_id, dest_version_id
     
     @backend_method
-    def update_object_hashmap(self, user, account, container, name, size, hashmap, domain, meta={}, replace_meta=False, permissions=None):
+    def update_object_hashmap(self, user, account, container, name, size, type, hashmap, checksum, domain, meta={}, replace_meta=False, permissions=None):
         """Create/update an object with the specified size and partial hashes."""
         
-        logger.debug("update_object_hashmap: %s %s %s %s %s", account, container, name, size, hashmap)
+        logger.debug("update_object_hashmap: %s %s %s %s %s %s %s", account, container, name, size, type, hashmap, checksum)
         if size == 0: # No such thing as an empty hashmap.
             hashmap = [self.put_block('')]
         map = HashMap(self.block_size, self.hash_algorithm)
@@ -562,12 +650,26 @@ class ModularBackend(BaseBackend):
             raise ie
         
         hash = map.hash()
-        pre_version_id, dest_version_id = self._update_object_hash(user, account, container, name, size, binascii.hexlify(hash), permissions)
+        pre_version_id, dest_version_id = self._update_object_hash(user, account, container, name, size, type, binascii.hexlify(hash), checksum, permissions)
         self._put_metadata_duplicate(pre_version_id, dest_version_id, domain, meta, replace_meta)
         self.store.map_put(hash, map)
         return dest_version_id
     
-    def _copy_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, dest_domain=None, dest_meta={}, replace_meta=False, permissions=None, src_version=None, is_move=False):
+    @backend_method
+    def update_object_checksum(self, user, account, container, name, version, checksum):
+        """Update an object's checksum."""
+        
+        logger.debug("update_object_checksum: %s %s %s %s %s", account, container, name, version, checksum)
+        # Update objects with greater version and same hashmap and size (fix metadata updates).
+        self._can_write(user, account, container, name)
+        path, node = self._lookup_object(account, container, name)
+        props = self._get_version(node, version)
+        versions = self.node.node_get_versions(node)
+        for x in versions:
+            if x[self.SERIAL] >= int(version) and x[self.HASH] == props[self.HASH] and x[self.SIZE] == props[self.SIZE]:
+                self.node.version_put_property(x[self.SERIAL], 'checksum', checksum)
+    
+    def _copy_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, dest_domain=None, dest_meta={}, replace_meta=False, permissions=None, src_version=None, is_move=False):
         self._can_read(user, src_account, src_container, src_name)
         path, node = self._lookup_object(src_account, src_container, src_name)
         # TODO: Will do another fetch of the properties in duplicate version...
@@ -577,25 +679,26 @@ class ModularBackend(BaseBackend):
         size = props[self.SIZE]
         
         is_copy = not is_move and (src_account, src_container, src_name) != (dest_account, dest_container, dest_name) # New uuid.
-        pre_version_id, dest_version_id = self._update_object_hash(user, dest_account, dest_container, dest_name, size, hash, permissions, src_node=node, is_copy=is_copy)
+        pre_version_id, dest_version_id = self._update_object_hash(user, dest_account, dest_container, dest_name, size, type, hash, None, permissions, src_node=node, is_copy=is_copy)
         self._put_metadata_duplicate(src_version_id, dest_version_id, dest_domain, dest_meta, replace_meta)
         return dest_version_id
     
     @backend_method
-    def copy_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, domain, meta={}, replace_meta=False, permissions=None, src_version=None):
+    def copy_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta={}, replace_meta=False, permissions=None, src_version=None):
         """Copy an object's data and metadata."""
         
-        logger.debug("copy_object: %s %s %s %s %s %s %s %s %s %s %s", src_account, src_container, src_name, dest_account, dest_container, dest_name, domain, meta, replace_meta, permissions, src_version)
-        return self._copy_object(user, src_account, src_container, src_name, dest_account, dest_container, dest_name, domain, meta, replace_meta, permissions, src_version, False)
+        logger.debug("copy_object: %s %s %s %s %s %s %s %s %s %s %s %s", src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta, replace_meta, permissions, src_version)
+        dest_version_id = self._copy_object(user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta, replace_meta, permissions, src_version, False)
+        return dest_version_id
     
     @backend_method
-    def move_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, domain, meta={}, replace_meta=False, permissions=None):
+    def move_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta={}, replace_meta=False, permissions=None):
         """Move an object's data and metadata."""
         
-        logger.debug("move_object: %s %s %s %s %s %s %s %s %s %s", src_account, src_container, src_name, dest_account, dest_container, dest_name, domain, meta, replace_meta, permissions)
+        logger.debug("move_object: %s %s %s %s %s %s %s %s %s %s %s", src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta, replace_meta, permissions)
         if user != src_account:
             raise NotAllowedError
-        dest_version_id = self._copy_object(user, src_account, src_container, src_name, dest_account, dest_container, dest_name, domain, meta, replace_meta, permissions, None, True)
+        dest_version_id = self._copy_object(user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta, replace_meta, permissions, None, True)
         if (src_account, src_container, src_name) != (dest_account, dest_container, dest_name):
             self._delete_object(user, src_account, src_container, src_name)
         return dest_version_id
@@ -609,8 +712,14 @@ class ModularBackend(BaseBackend):
             node = self.node.node_lookup(path)
             if node is None:
                 return
-            hashes = self.node.node_purge(node, until, CLUSTER_NORMAL)
-            hashes += self.node.node_purge(node, until, CLUSTER_HISTORY)
+            hashes = []
+            size = 0
+            h, s = self.node.node_purge(node, until, CLUSTER_NORMAL)
+            hashes += h
+            size += s
+            h, s = self.node.node_purge(node, until, CLUSTER_HISTORY)
+            hashes += h
+            size += s
             for h in hashes:
                 self.store.map_delete(h)
             self.node.node_purge(node, until, CLUSTER_DELETED)
@@ -618,11 +727,14 @@ class ModularBackend(BaseBackend):
                 props = self._get_version(node)
             except NameError:
                 self.permissions.access_clear(path)
+            self._report_size_change(user, account, -size, {'action': 'object purge'})
             return
         
         path, node = self._lookup_object(account, container, name)
-        src_version_id, dest_version_id = self._put_version_duplicate(user, node, size=0, hash=None, cluster=CLUSTER_DELETED)
-        self._apply_versioning(account, container, src_version_id)
+        src_version_id, dest_version_id = self._put_version_duplicate(user, node, size=0, type='', hash=None, checksum='', cluster=CLUSTER_DELETED)
+        del_size = self._apply_versioning(account, container, src_version_id)
+        if del_size:
+            self._report_size_change(user, account, -del_size, {'action': 'object delete'})
         self.permissions.access_clear(path)
     
     @backend_method
@@ -710,7 +822,7 @@ class ModularBackend(BaseBackend):
     
     def _put_path(self, user, parent, path):
         node = self.node.node_create(parent, path)
-        self.node.version_create(node, None, 0, None, user, self._generate_uuid(), CLUSTER_NORMAL)
+        self.node.version_create(node, None, 0, '', None, user, self._generate_uuid(), '', CLUSTER_NORMAL)
         return node
     
     def _lookup_account(self, account, create=True):
@@ -770,7 +882,7 @@ class ModularBackend(BaseBackend):
                 raise IndexError('Version does not exist')
         return props
     
-    def _put_version_duplicate(self, user, node, src_node=None, size=None, hash=None, cluster=CLUSTER_NORMAL, is_copy=False):
+    def _put_version_duplicate(self, user, node, src_node=None, size=None, type=None, hash=None, checksum=None, cluster=CLUSTER_NORMAL, is_copy=False):
         """Create a new version of the node."""
         
         props = self.node.version_lookup(node if src_node is None else src_node, inf, CLUSTER_NORMAL)
@@ -778,13 +890,21 @@ class ModularBackend(BaseBackend):
             src_version_id = props[self.SERIAL]
             src_hash = props[self.HASH]
             src_size = props[self.SIZE]
+            src_type = props[self.TYPE]
+            src_checksum = props[self.CHECKSUM]
         else:
             src_version_id = None
             src_hash = None
             src_size = 0
-        if size is None:
-            hash = src_hash # This way hash can be set to None.
+            src_type = ''
+            src_checksum = ''
+        if size is None: # Set metadata.
+            hash = src_hash # This way hash can be set to None (account or container).
             size = src_size
+        if type is None:
+            type = src_type
+        if checksum is None:
+            checksum = src_checksum
         uuid = self._generate_uuid() if (is_copy or src_version_id is None) else props[self.UUID]
         
         if src_node is None:
@@ -797,7 +917,7 @@ class ModularBackend(BaseBackend):
         if pre_version_id is not None:
             self.node.version_recluster(pre_version_id, CLUSTER_HISTORY)
         
-        dest_version_id, mtime = self.node.version_create(node, hash, size, src_version_id, user, uuid, cluster)
+        dest_version_id, mtime = self.node.version_create(node, hash, size, type, src_version_id, user, uuid, checksum, cluster)
         return pre_version_id, dest_version_id
     
     def _put_metadata_duplicate(self, src_version_id, dest_version_id, domain, meta, replace=False):
@@ -828,7 +948,7 @@ class ModularBackend(BaseBackend):
             limit = 10000
         return start, limit
     
-    def _list_objects(self, parent, path, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], until=None, size_range=None, allowed=[]):
+    def _list_object_properties(self, parent, path, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], until=None, size_range=None, allowed=[], all_props=False):
         cont_prefix = path + '/'
         prefix = cont_prefix + prefix
         start = cont_prefix + marker if marker else None
@@ -836,14 +956,23 @@ class ModularBackend(BaseBackend):
         filterq = keys if domain else []
         sizeq = size_range
         
-        objects, prefixes = self.node.latest_version_list(parent, prefix, delimiter, start, limit, before, CLUSTER_DELETED, allowed, domain, filterq, sizeq)
+        objects, prefixes = self.node.latest_version_list(parent, prefix, delimiter, start, limit, before, CLUSTER_DELETED, allowed, domain, filterq, sizeq, all_props)
         objects.extend([(p, None) for p in prefixes] if virtual else [])
         objects.sort(key=lambda x: x[0])
-        objects = [(x[0][len(cont_prefix):], x[1]) for x in objects]
+        objects = [(x[0][len(cont_prefix):],) + x[1:] for x in objects]
         
         start, limit = self._list_limits([x[0] for x in objects], marker, limit)
         return objects[start:start + limit]
     
+    # Reporting functions.
+    
+    def _report_size_change(self, user, account, size, details={}):
+        logger.debug("_report_size_change: %s %s %s %s", user, account, size, details)
+        account_node = self._lookup_account(account, True)[1]
+        total = self._get_statistics(account_node)[1]
+        details.update({'user': user, 'total': total})
+        self.queue.send(account, 'diskspace', size, details)
+    
     # Policy functions.
     
     def _check_policy(self, policy):
@@ -874,13 +1003,19 @@ class ModularBackend(BaseBackend):
         return policy
     
     def _apply_versioning(self, account, container, version_id):
+        """Delete the provided version if such is the policy.
+           Return size of object removed.
+        """
+        
         if version_id is None:
-            return
+            return 0
         path, node = self._lookup_container(account, container)
         versioning = self._get_policy(node)['versioning']
         if versioning != 'auto':
-            hash = self.node.version_remove(version_id)
+            hash, size = self.node.version_remove(version_id)
             self.store.map_delete(hash)
+            return size
+        return 0
     
     # Access control functions.
     
@@ -890,18 +1025,48 @@ class ModularBackend(BaseBackend):
     
     def _check_permissions(self, path, permissions):
         # raise ValueError('Bad characters in permissions')
-        
-        # Check for existing permissions.
-        paths = self.permissions.access_list(path)
-        if paths:
-            ae = AttributeError()
-            ae.data = paths
-            raise ae
+        pass
+    
+    def _get_formatted_paths(self, paths):
+        formatted = []
+        for p in paths:
+            node = self.node.node_lookup(p)
+            if node is not None:
+                props = self.node.version_lookup(node, inf, CLUSTER_NORMAL)
+            if props is not None:
+                if props[self.TYPE] in ('application/directory', 'application/folder'):
+                    formatted.append((p.rstrip('/') + '/', self.MATCH_PREFIX))
+                formatted.append((p, self.MATCH_EXACT))
+        return formatted
+    
+    def _get_permissions_path(self, account, container, name):
+        path = '/'.join((account, container, name))
+        permission_paths = self.permissions.access_inherit(path)
+        permission_paths.sort()
+        permission_paths.reverse()
+        for p in permission_paths:
+            if p == path:
+                return p
+            else:
+                if p.count('/') < 2:
+                    continue
+                node = self.node.node_lookup(p)
+                if node is not None:
+                    props = self.node.version_lookup(node, inf, CLUSTER_NORMAL)
+                if props is not None:
+                    if props[self.TYPE] in ('application/directory', 'application/folder'):
+                        return p
+        return None
     
     def _can_read(self, user, account, container, name):
         if user == account:
             return True
         path = '/'.join((account, container, name))
+        if self.permissions.public_get(path) is not None:
+            return True
+        path = self._get_permissions_path(account, container, name)
+        if not path:
+            raise NotAllowedError
         if not self.permissions.access_check(path, self.READ, user) and not self.permissions.access_check(path, self.WRITE, user):
             raise NotAllowedError
     
@@ -909,6 +1074,9 @@ class ModularBackend(BaseBackend):
         if user == account:
             return True
         path = '/'.join((account, container, name))
+        path = self._get_permissions_path(account, container, name)
+        if not path:
+            raise NotAllowedError
         if not self.permissions.access_check(path, self.WRITE, user):
             raise NotAllowedError
     
index ff14928..92afe1c 100644 (file)
 # interpreted as representing official policies, either expressed
 # or implied, of GRNET S.A.
 
-from httplib import HTTPConnection, HTTP
+from httplib import HTTPConnection, HTTPSConnection, HTTP
 from sys import stdin
 from xml.dom import minidom
 from StringIO import StringIO
 from urllib import quote, unquote
+from urlparse import urlparse
 
 import json
 import types
@@ -66,20 +67,26 @@ class Fault(Exception):
         self.status = status
 
 class Client(object):
-    def __init__(self, host, token, account, api='v1', verbose=False, debug=False):
-        """`host` can also include a port, e.g '127.0.0.1:8000'."""
+    def __init__(self, url, token, account, verbose=False, debug=False):
+        """`url` can also include a port, e.g '127.0.0.1:8000'."""
         
-        self.host = host
+        self.url = url
         self.account = account
-        self.api = api
         self.verbose = verbose or debug
         self.debug = debug
         self.token = token
     
     def _req(self, method, path, body=None, headers={}, format='text', params={}):
-        full_path = _prepare_path(path, self.api, format, params)
+        p = urlparse(self.url)
+        if p.scheme == 'http':
+            conn = HTTPConnection(p.netloc)
+        elif p.scheme == 'https':
+            conn = HTTPSConnection(p.netloc)
+        else:
+            raise Exception('Unknown URL scheme')
+        
+        full_path = _prepare_path(p.path + path, format, params)
         
-        conn = HTTPConnection(self.host)
         kwargs = {}
         kwargs['headers'] = _prepare_headers(headers)
         kwargs['headers']['X-Auth-Token'] = self.token
@@ -100,9 +107,16 @@ class Client(object):
     def _chunked_transfer(self, path, method='PUT', f=stdin, headers=None,
                           blocksize=1024, params={}):
         """perfomrs a chunked request"""
-        full_path = _prepare_path(path, self.api, params=params)
+        p = urlparse(self.url)
+        if p.scheme == 'http':
+            conn = HTTPConnection(p.netloc)
+        elif p.scheme == 'https':
+            conn = HTTPSConnection(p.netloc)
+        else:
+            raise Exception('Unknown URL scheme')
+        
+        full_path = _prepare_path(p.path + path, params=params)
         
-        conn = HTTPConnection(self.host)
         conn.putrequest(method, full_path)
         conn.putheader('x-auth-token', self.token)
         conn.putheader('content-type', 'application/octet-stream')
@@ -280,10 +294,11 @@ class OOS_Client(Client):
             l = self._filter_trashed(l)
         return l
     
-    def create_container(self, container, account=None, **meta):
+    def create_container(self, container, account=None, meta={}, **headers):
         """creates a container"""
         account = account or self.account
-        headers = {}
+        if not headers:
+            headers = {}
         for k,v in meta.items():
             headers['x-container-meta-%s' %k.strip().upper()] = v.strip()
         status, header, data = self.put('/%s/%s' % (account, container),
@@ -342,13 +357,13 @@ class OOS_Client(Client):
             data = minidom.parseString(data)
         return data
     
-    def retrieve_object_hashmap(self, container, object, params={},
+    def retrieve_object_hashmap(self, container, object, format='json', params={},
                         account=None, **headers):
         """returns the hashmap representing object's data"""
-        args = locals().copy()
-        for elem in ['self', 'container', 'object']:
-            args.pop(elem)
-        return self.retrieve_object(container, object, format='json', **args)
+        if not params:
+            params = {}
+        params.update({'hashmap':None})
+        return self.retrieve_object(container, object, params, format, account, **headers)
     
     def create_directory_marker(self, container, object, account=None):
         """creates a dierectory marker"""
@@ -618,6 +633,12 @@ class Pithos_Client(OOS_Client):
         return self.post(path, headers=headers)
     
     # Storage Container Services
+    def create_container(self, container, account=None, meta={}, policies={}):
+        """creates a container"""
+        args = {}
+        for k, v in policies.items():
+            args['X-Container-Policy-%s' % k.capitalize()] = v
+        return OOS_Client.create_container(self, container, account, meta, **args)
     
     def list_objects(self, container, format='text',
                      limit=None, marker=None, prefix=None, delimiter=None,
@@ -919,9 +940,8 @@ class Pithos_Client(OOS_Client):
         sharing = '%s=%s' % (action, ','.join(l))
         self.update_object(container, object, f=None, x_object_sharing=sharing)
 
-def _prepare_path(path, api, format='text', params={}):
-    slash = '/' if api else ''
-    full_path = '%s%s%s?format=%s' % (slash, api, quote(path), format)
+def _prepare_path(path, format='text', params={}):
+    full_path = '%s?format=%s' % (quote(path), format)
     
     for k,v in params.items():
         value = quote(str(v)) if v else ''
diff --git a/snf-pithos-lib/pithos/lib/dictconfig.py b/snf-pithos-lib/pithos/lib/dictconfig.py
new file mode 100644 (file)
index 0000000..167831b
--- /dev/null
@@ -0,0 +1,553 @@
+# This is a copy of the Python logging.config.dictconfig module.
+# It is provided here for backwards compatibility for Python versions
+# prior to 2.7.
+#
+# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose and without fee is hereby granted,
+# provided that the above copyright notice appear in all copies and that
+# both that copyright notice and this permission notice appear in
+# supporting documentation, and that the name of Vinay Sajip
+# not be used in advertising or publicity pertaining to distribution
+# of the software without specific, written prior permission.
+# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
+# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
+# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
+# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import logging.handlers
+import re
+import sys
+import types
+
+IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
+
+def valid_ident(s):
+    m = IDENTIFIER.match(s)
+    if not m:
+        raise ValueError('Not a valid Python identifier: %r' % s)
+    return True
+
+#
+# This function is defined in logging only in recent versions of Python
+#
+try:
+    from logging import _checkLevel
+except ImportError:
+    def _checkLevel(level):
+        if isinstance(level, int):
+            rv = level
+        elif str(level) == level:
+            if level not in logging._levelNames:
+                raise ValueError('Unknown level: %r' % level)
+            rv = logging._levelNames[level]
+        else:
+            raise TypeError('Level not an integer or a '
+                            'valid string: %r' % level)
+        return rv
+
+# The ConvertingXXX classes are wrappers around standard Python containers,
+# and they serve to convert any suitable values in the container. The
+# conversion converts base dicts, lists and tuples to their wrapped
+# equivalents, whereas strings which match a conversion format are converted
+# appropriately.
+#
+# Each wrapper should have a configurator attribute holding the actual
+# configurator to use for conversion.
+
+class ConvertingDict(dict):
+    """A converting dictionary wrapper."""
+
+    def __getitem__(self, key):
+        value = dict.__getitem__(self, key)
+        result = self.configurator.convert(value)
+        #If the converted value is different, save for next time
+        if value is not result:
+            self[key] = result
+            if type(result) in (ConvertingDict, ConvertingList,
+                                ConvertingTuple):
+                result.parent = self
+                result.key = key
+        return result
+
+    def get(self, key, default=None):
+        value = dict.get(self, key, default)
+        result = self.configurator.convert(value)
+        #If the converted value is different, save for next time
+        if value is not result:
+            self[key] = result
+            if type(result) in (ConvertingDict, ConvertingList,
+                                ConvertingTuple):
+                result.parent = self
+                result.key = key
+        return result
+
+    def pop(self, key, default=None):
+        value = dict.pop(self, key, default)
+        result = self.configurator.convert(value)
+        if value is not result:
+            if type(result) in (ConvertingDict, ConvertingList,
+                                ConvertingTuple):
+                result.parent = self
+                result.key = key
+        return result
+
+class ConvertingList(list):
+    """A converting list wrapper."""
+    def __getitem__(self, key):
+        value = list.__getitem__(self, key)
+        result = self.configurator.convert(value)
+        #If the converted value is different, save for next time
+        if value is not result:
+            self[key] = result
+            if type(result) in (ConvertingDict, ConvertingList,
+                                ConvertingTuple):
+                result.parent = self
+                result.key = key
+        return result
+
+    def pop(self, idx=-1):
+        value = list.pop(self, idx)
+        result = self.configurator.convert(value)
+        if value is not result:
+            if type(result) in (ConvertingDict, ConvertingList,
+                                ConvertingTuple):
+                result.parent = self
+        return result
+
+class ConvertingTuple(tuple):
+    """A converting tuple wrapper."""
+    def __getitem__(self, key):
+        value = tuple.__getitem__(self, key)
+        result = self.configurator.convert(value)
+        if value is not result:
+            if type(result) in (ConvertingDict, ConvertingList,
+                                ConvertingTuple):
+                result.parent = self
+                result.key = key
+        return result
+
+class BaseConfigurator(object):
+    """
+    The configurator base class which defines some useful defaults.
+    """
+
+    CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
+
+    WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
+    DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
+    INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
+    DIGIT_PATTERN = re.compile(r'^\d+$')
+
+    value_converters = {
+        'ext' : 'ext_convert',
+        'cfg' : 'cfg_convert',
+    }
+
+    # We might want to use a different one, e.g. importlib
+    importer = __import__
+
+    def __init__(self, config):
+        self.config = ConvertingDict(config)
+        self.config.configurator = self
+
+    def resolve(self, s):
+        """
+        Resolve strings to objects using standard import and attribute
+        syntax.
+        """
+        name = s.split('.')
+        used = name.pop(0)
+        try:
+            found = self.importer(used)
+            for frag in name:
+                used += '.' + frag
+                try:
+                    found = getattr(found, frag)
+                except AttributeError:
+                    self.importer(used)
+                    found = getattr(found, frag)
+            return found
+        except ImportError:
+            e, tb = sys.exc_info()[1:]
+            v = ValueError('Cannot resolve %r: %s' % (s, e))
+            v.__cause__, v.__traceback__ = e, tb
+            raise v
+
+    def ext_convert(self, value):
+        """Default converter for the ext:// protocol."""
+        return self.resolve(value)
+
+    def cfg_convert(self, value):
+        """Default converter for the cfg:// protocol."""
+        rest = value
+        m = self.WORD_PATTERN.match(rest)
+        if m is None:
+            raise ValueError("Unable to convert %r" % value)
+        else:
+            rest = rest[m.end():]
+            d = self.config[m.groups()[0]]
+            #print d, rest
+            while rest:
+                m = self.DOT_PATTERN.match(rest)
+                if m:
+                    d = d[m.groups()[0]]
+                else:
+                    m = self.INDEX_PATTERN.match(rest)
+                    if m:
+                        idx = m.groups()[0]
+                        if not self.DIGIT_PATTERN.match(idx):
+                            d = d[idx]
+                        else:
+                            try:
+                                n = int(idx) # try as number first (most likely)
+                                d = d[n]
+                            except TypeError:
+                                d = d[idx]
+                if m:
+                    rest = rest[m.end():]
+                else:
+                    raise ValueError('Unable to convert '
+                                     '%r at %r' % (value, rest))
+        #rest should be empty
+        return d
+
+    def convert(self, value):
+        """
+        Convert values to an appropriate type. dicts, lists and tuples are
+        replaced by their converting alternatives. Strings are checked to
+        see if they have a conversion format and are converted if they do.
+        """
+        if not isinstance(value, ConvertingDict) and isinstance(value, dict):
+            value = ConvertingDict(value)
+            value.configurator = self
+        elif not isinstance(value, ConvertingList) and isinstance(value, list):
+            value = ConvertingList(value)
+            value.configurator = self
+        elif not isinstance(value, ConvertingTuple) and\
+                 isinstance(value, tuple):
+            value = ConvertingTuple(value)
+            value.configurator = self
+        elif isinstance(value, basestring): # str for py3k
+            m = self.CONVERT_PATTERN.match(value)
+            if m:
+                d = m.groupdict()
+                prefix = d['prefix']
+                converter = self.value_converters.get(prefix, None)
+                if converter:
+                    suffix = d['suffix']
+                    converter = getattr(self, converter)
+                    value = converter(suffix)
+        return value
+
+    def configure_custom(self, config):
+        """Configure an object with a user-supplied factory."""
+        c = config.pop('()')
+        if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
+            c = self.resolve(c)
+        props = config.pop('.', None)
+        # Check for valid identifiers
+        kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
+        result = c(**kwargs)
+        if props:
+            for name, value in props.items():
+                setattr(result, name, value)
+        return result
+
+    def as_tuple(self, value):
+        """Utility function which converts lists to tuples."""
+        if isinstance(value, list):
+            value = tuple(value)
+        return value
+
+class DictConfigurator(BaseConfigurator):
+    """
+    Configure logging using a dictionary-like object to describe the
+    configuration.
+    """
+
+    def configure(self):
+        """Do the configuration."""
+
+        config = self.config
+        if 'version' not in config:
+            raise ValueError("dictionary doesn't specify a version")
+        if config['version'] != 1:
+            raise ValueError("Unsupported version: %s" % config['version'])
+        incremental = config.pop('incremental', False)
+        EMPTY_DICT = {}
+        logging._acquireLock()
+        try:
+            if incremental:
+                handlers = config.get('handlers', EMPTY_DICT)
+                # incremental handler config only if handler name
+                # ties in to logging._handlers (Python 2.7)
+                if sys.version_info[:2] == (2, 7):
+                    for name in handlers:
+                        if name not in logging._handlers:
+                            raise ValueError('No handler found with '
+                                             'name %r'  % name)
+                        else:
+                            try:
+                                handler = logging._handlers[name]
+                                handler_config = handlers[name]
+                                level = handler_config.get('level', None)
+                                if level:
+                                    handler.setLevel(_checkLevel(level))
+                            except StandardError, e:
+                                raise ValueError('Unable to configure handler '
+                                                 '%r: %s' % (name, e))
+                loggers = config.get('loggers', EMPTY_DICT)
+                for name in loggers:
+                    try:
+                        self.configure_logger(name, loggers[name], True)
+                    except StandardError, e:
+                        raise ValueError('Unable to configure logger '
+                                         '%r: %s' % (name, e))
+                root = config.get('root', None)
+                if root:
+                    try:
+                        self.configure_root(root, True)
+                    except StandardError, e:
+                        raise ValueError('Unable to configure root '
+                                         'logger: %s' % e)
+            else:
+                disable_existing = config.pop('disable_existing_loggers', True)
+
+                logging._handlers.clear()
+                del logging._handlerList[:]
+
+                # Do formatters first - they don't refer to anything else
+                formatters = config.get('formatters', EMPTY_DICT)
+                for name in formatters:
+                    try:
+                        formatters[name] = self.configure_formatter(
+                                                            formatters[name])
+                    except StandardError, e:
+                        raise ValueError('Unable to configure '
+                                         'formatter %r: %s' % (name, e))
+                # Next, do filters - they don't refer to anything else, either
+                filters = config.get('filters', EMPTY_DICT)
+                for name in filters:
+                    try:
+                        filters[name] = self.configure_filter(filters[name])
+                    except StandardError, e:
+                        raise ValueError('Unable to configure '
+                                         'filter %r: %s' % (name, e))
+
+                # Next, do handlers - they refer to formatters and filters
+                # As handlers can refer to other handlers, sort the keys
+                # to allow a deterministic order of configuration
+                handlers = config.get('handlers', EMPTY_DICT)
+                for name in sorted(handlers):
+                    try:
+                        handler = self.configure_handler(handlers[name])
+                        handler.name = name
+                        handlers[name] = handler
+                    except StandardError, e:
+                        raise ValueError('Unable to configure handler '
+                                         '%r: %s' % (name, e))
+                # Next, do loggers - they refer to handlers and filters
+
+                #we don't want to lose the existing loggers,
+                #since other threads may have pointers to them.
+                #existing is set to contain all existing loggers,
+                #and as we go through the new configuration we
+                #remove any which are configured. At the end,
+                #what's left in existing is the set of loggers
+                #which were in the previous configuration but
+                #which are not in the new configuration.
+                root = logging.root
+                existing = root.manager.loggerDict.keys()
+                #The list needs to be sorted so that we can
+                #avoid disabling child loggers of explicitly
+                #named loggers. With a sorted list it is easier
+                #to find the child loggers.
+                existing.sort()
+                #We'll keep the list of existing loggers
+                #which are children of named loggers here...
+                child_loggers = []
+                #now set up the new ones...
+                loggers = config.get('loggers', EMPTY_DICT)
+                for name in loggers:
+                    if name in existing:
+                        i = existing.index(name)
+                        prefixed = name + "."
+                        pflen = len(prefixed)
+                        num_existing = len(existing)
+                        i = i + 1 # look at the entry after name
+                        while (i < num_existing) and\
+                              (existing[i][:pflen] == prefixed):
+                            child_loggers.append(existing[i])
+                            i = i + 1
+                        existing.remove(name)
+                    try:
+                        self.configure_logger(name, loggers[name])
+                    except StandardError, e:
+                        raise ValueError('Unable to configure logger '
+                                         '%r: %s' % (name, e))
+
+                #Disable any old loggers. There's no point deleting
+                #them as other threads may continue to hold references
+                #and by disabling them, you stop them doing any logging.
+                #However, don't disable children of named loggers, as that's
+                #probably not what was intended by the user.
+                for log in existing:
+                    logger = root.manager.loggerDict[log]
+                    if log in child_loggers:
+                        logger.level = logging.NOTSET
+                        logger.handlers = []
+                        logger.propagate = True
+                    elif disable_existing:
+                        logger.disabled = True
+
+                # And finally, do the root logger
+                root = config.get('root', None)
+                if root:
+                    try:
+                        self.configure_root(root)
+                    except StandardError, e:
+                        raise ValueError('Unable to configure root '
+                                         'logger: %s' % e)
+        finally:
+            logging._releaseLock()
+
+    def configure_formatter(self, config):
+        """Configure a formatter from a dictionary."""
+        if '()' in config:
+            factory = config['()'] # for use in exception handler
+            try:
+                result = self.configure_custom(config)
+            except TypeError, te:
+                if "'format'" not in str(te):
+                    raise
+                #Name of parameter changed from fmt to format.
+                #Retry with old name.
+                #This is so that code can be used with older Python versions
+                #(e.g. by Django)
+                config['fmt'] = config.pop('format')
+                config['()'] = factory
+                result = self.configure_custom(config)
+        else:
+            fmt = config.get('format', None)
+            dfmt = config.get('datefmt', None)
+            result = logging.Formatter(fmt, dfmt)
+        return result
+
+    def configure_filter(self, config):
+        """Configure a filter from a dictionary."""
+        if '()' in config:
+            result = self.configure_custom(config)
+        else:
+            name = config.get('name', '')
+            result = logging.Filter(name)
+        return result
+
+    def add_filters(self, filterer, filters):
+        """Add filters to a filterer from a list of names."""
+        for f in filters:
+            try:
+                filterer.addFilter(self.config['filters'][f])
+            except StandardError, e:
+                raise ValueError('Unable to add filter %r: %s' % (f, e))
+
+    def configure_handler(self, config):
+        """Configure a handler from a dictionary."""
+        formatter = config.pop('formatter', None)
+        if formatter:
+            try:
+                formatter = self.config['formatters'][formatter]
+            except StandardError, e:
+                raise ValueError('Unable to set formatter '
+                                 '%r: %s' % (formatter, e))
+        level = config.pop('level', None)
+        filters = config.pop('filters', None)
+        if '()' in config:
+            c = config.pop('()')
+            if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
+                c = self.resolve(c)
+            factory = c
+        else:
+            klass = self.resolve(config.pop('class'))
+            #Special case for handler which refers to another handler
+            if issubclass(klass, logging.handlers.MemoryHandler) and\
+                'target' in config:
+                try:
+                    config['target'] = self.config['handlers'][config['target']]
+                except StandardError, e:
+                    raise ValueError('Unable to set target handler '
+                                     '%r: %s' % (config['target'], e))
+            elif issubclass(klass, logging.handlers.SMTPHandler) and\
+                'mailhost' in config:
+                config['mailhost'] = self.as_tuple(config['mailhost'])
+            elif issubclass(klass, logging.handlers.SysLogHandler) and\
+                'address' in config:
+                config['address'] = self.as_tuple(config['address'])
+            factory = klass
+        kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
+        try:
+            result = factory(**kwargs)
+        except TypeError, te:
+            if "'stream'" not in str(te):
+                raise
+            #The argument name changed from strm to stream
+            #Retry with old name.
+            #This is so that code can be used with older Python versions
+            #(e.g. by Django)
+            kwargs['strm'] = kwargs.pop('stream')
+            result = factory(**kwargs)
+        if formatter:
+            result.setFormatter(formatter)
+        if level is not None:
+            result.setLevel(_checkLevel(level))
+        if filters:
+            self.add_filters(result, filters)
+        return result
+
+    def add_handlers(self, logger, handlers):
+        """Add handlers to a logger from a list of names."""
+        for h in handlers:
+            try:
+                logger.addHandler(self.config['handlers'][h])
+            except StandardError, e:
+                raise ValueError('Unable to add handler %r: %s' % (h, e))
+
+    def common_logger_config(self, logger, config, incremental=False):
+        """
+        Perform configuration which is common to root and non-root loggers.
+        """
+        level = config.get('level', None)
+        if level is not None:
+            logger.setLevel(_checkLevel(level))
+        if not incremental:
+            #Remove any existing handlers
+            for h in logger.handlers[:]:
+                logger.removeHandler(h)
+            handlers = config.get('handlers', None)
+            if handlers:
+                self.add_handlers(logger, handlers)
+            filters = config.get('filters', None)
+            if filters:
+                self.add_filters(logger, filters)
+
+    def configure_logger(self, name, config, incremental=False):
+        """Configure a non-root logger from a dictionary."""
+        logger = logging.getLogger(name)
+        self.common_logger_config(logger, config, incremental)
+        propagate = config.get('propagate', None)
+        if propagate is not None:
+            logger.propagate = propagate
+
+    def configure_root(self, config, incremental=False):
+        """Configure a root logger from a dictionary."""
+        root = logging.getLogger()
+        self.common_logger_config(root, config, incremental)
+
+dictConfigClass = DictConfigurator
+
+def dictConfig(config):
+    """Configure logging using a dictionary."""
+    dictConfigClass(config).configure()
diff --git a/snf-pithos-lib/pithos/lib/queue.py b/snf-pithos-lib/pithos/lib/queue.py
new file mode 100755 (executable)
index 0000000..eb48e1d
--- /dev/null
@@ -0,0 +1,112 @@
+# Copyright 2012 GRNET S.A. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or
+# without modification, are permitted provided that the following
+# conditions are met:
+#
+#   1. Redistributions of source code must retain the above
+#      copyright notice, this list of conditions and the following
+#      disclaimer.
+#
+#   2. Redistributions in binary form must reproduce the above
+#      copyright notice, this list of conditions and the following
+#      disclaimer in the documentation and/or other materials
+#      provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and
+# documentation are those of the authors and should not be
+# interpreted as representing official policies, either expressed
+# or implied, of GRNET S.A.
+
+import pika
+import json
+import uuid
+
+from urlparse import urlparse
+from hashlib import sha1
+from random import random
+from time import time
+
+
+def exchange_connect(exchange, vhost='/'):
+    """Format exchange as a URI: rabbitmq://user:pass@host:port/exchange"""
+    parts = urlparse(exchange)
+    if parts.scheme != 'rabbitmq':
+        return None
+    if len(parts.path) < 2 or not parts.path.startswith('/'):
+        return None
+    exchange = parts.path[1:]
+    connection = pika.BlockingConnection(pika.ConnectionParameters(
+                    host=parts.hostname, port=parts.port, virtual_host=vhost,
+                    credentials=pika.PlainCredentials(parts.username, parts.password)))
+    channel = connection.channel()
+    channel.exchange_declare(exchange=exchange, type='topic', durable=True)
+    return (connection, channel, exchange)
+
+def exchange_close(conn):
+    connection, channel, exchange = conn
+    connection.close()
+
+def exchange_send(conn, key, value):
+    """Messages are sent to exchanges at a key."""
+    connection, channel, exchange = conn
+    channel.basic_publish(exchange=exchange,
+                          routing_key=key,
+                          body=json.dumps(value))
+
+    
+def exchange_route(conn, key, queue):
+    """Set up routing of keys to queue."""
+    connection, channel, exchange = conn
+    channel.queue_declare(queue=queue, durable=True,
+                          exclusive=False, auto_delete=False)
+    channel.queue_bind(exchange=exchange,
+                       queue=queue,
+                       routing_key=key)
+
+def queue_callback(conn, queue, cb):
+    
+    def handle_delivery(channel, method_frame, header_frame, body):
+        #print 'Basic.Deliver %s delivery-tag %i: %s' % (header_frame.content_type,
+        #                                                method_frame.delivery_tag,
+        #                                                body)
+        if cb:
+            cb(json.loads(body))
+        channel.basic_ack(delivery_tag=method_frame.delivery_tag)
+    
+    connection, channel, exchange = conn
+    channel.basic_consume(handle_delivery, queue=queue)
+
+def queue_start(conn):
+    connection, channel, exchange = conn
+    channel.start_consuming()
+
+class Receipt(object):
+    def __init__(self, client, user, resource, value, details={}):
+        self.eventVersion = '1.0'
+        self.occurredMillis = int(time() * 1000)
+        self.receivedMillis = self.occurredMillis
+        self.clientID = client
+        self.userID = user
+        self.resource = resource
+        self.value = value
+        self.details = details
+        hash = sha1()
+        hash.update(json.dumps([client, user, resource, value, details, random()]))
+        self.id = hash.hexdigest()
+    
+    def format(self):
+        return self.__dict__
similarity index 65%
rename from snf-pithos-app/pithos/middleware/user.py
rename to snf-pithos-lib/pithos/lib/user.py
index d334648..e511bce 100644 (file)
 # or implied, of GRNET S.A.
 
 from time import time, mktime
-from httplib import HTTPConnection
+from urlparse import urlparse
+from httplib import HTTPConnection, HTTPSConnection
 from urllib import quote, unquote
 
 from django.conf import settings
 from django.utils import simplejson as json
 
 
-def authenticate(authentication_host, token):
-    con = HTTPConnection(authentication_host)
+def authenticate(token, authentication_url='http://127.0.0.1:8000/im/authenticate'):
+    p = urlparse(authentication_url)
+    if p.scheme == 'http':
+        conn = HTTPConnection(p.netloc)
+    elif p.scheme == 'https':
+        conn = HTTPSConnection(p.netloc)
+    else:
+        raise Exception('Unknown URL scheme')
+    
     kwargs = {}
     kwargs['headers'] = {}
     kwargs['headers']['X-Auth-Token'] = token
     kwargs['headers']['Content-Length'] = 0
     
-    path = '/im/authenticate'
-    con.request('GET', path, **kwargs)
-    response = con.getresponse()
+    conn.request('GET', p.path, **kwargs)
+    response = conn.getresponse()
     
     headers = response.getheaders()
     headers = dict((unquote(h), unquote(v)) for h,v in headers)
@@ -61,34 +68,31 @@ def authenticate(authentication_host, token):
     
     return json.loads(data)
 
-def get_user_from_token(token):
+def user_for_token(token, authentication_url, override_users):
     if not token:
         return None
     
-    users = settings.AUTHENTICATION_USERS
-    if users is not None:
+    if override_users:
         try:
-            return {'id': 0, 'uniq': users[token].decode('utf8')}
+            return {'uniq': override_users[token].decode('utf8')}
         except:
             return None
     
-    host = settings.AUTHENTICATION_HOST
     try:
-        return authenticate(host, token)
+        return authenticate(token, authentication_url)
     except:
         return None
 
-class UserMiddleware(object):
-    def process_request(self, request):
-        request.user = None
-        request.user_uniq = None
-        
-        # Try to find token in a parameter, in a request header, or in a cookie.
-        user = get_user_from_token(request.GET.get('X-Auth-Token'))
-        if not user:
-            user = get_user_from_token(request.META.get('HTTP_X_AUTH_TOKEN'))
-        if not user:
-            return
-        
-        request.user = user
-        request.user_uniq = user['uniq']
+def get_user(request, authentication_url='http://127.0.0.1:8000/im/authenticate', override_users={}):
+    request.user = None
+    request.user_uniq = None
+    
+    # Try to find token in a parameter or in a request header.
+    user = user_for_token(request.GET.get('X-Auth-Token'), authentication_url, override_users)
+    if not user:
+        user = user_for_token(request.META.get('HTTP_X_AUTH_TOKEN'), authentication_url, override_users)
+    if not user:
+        return
+    
+    request.user = user
+    request.user_uniq = user['uniq']
index df650de..d33fd76 100644 (file)
 
 import os
 
-DEFAULT_SERVER = 'plus.pithos.grnet.gr'
-DEFAULT_API = 'v1'
+DEFAULT_URL = 'https://plus.pithos.grnet.gr/v1'
 DEFAULT_USER = 'test'
-DEFAULT_AUTH = '0000'
+DEFAULT_TOKEN = '0000'
 
 def get_user():
     try:
@@ -46,18 +45,12 @@ def get_user():
 
 def get_auth():
     try:
-        return os.environ['PITHOS_AUTH']
+        return os.environ['PITHOS_TOKEN']
     except KeyError:
-        return DEFAULT_AUTH
+        return DEFAULT_TOKEN
 
-def get_server():
+def get_url():
     try:
-        return os.environ['PITHOS_SERVER']
+        return os.environ['PITHOS_URL'].rstrip('/')
     except KeyError:
-        return DEFAULT_SERVER
-
-def get_api():
-    try:
-        return os.environ['PITHOS_API']
-    except KeyError:
-        return DEFAULT_API
+        return DEFAULT_URL
index 2138ac6..d1af303 100755 (executable)
 import sys
 import logging
 
-from optparse import OptionParser
-
-try:
-    from carrot.connection import BrokerConnection
-    from carrot.messaging import Consumer
-    from carrot.messaging import Publisher
-except ImportError:
-    sys.stderr.write("Dispatcher requires 'carrot' python library to " \
-                     "be installed\n")
-    sys.exit(1)
+from pithos.lib.queue import (exchange_connect, exchange_close,
+    exchange_send, exchange_route, queue_callback, queue_start)
 
+from optparse import OptionParser
 
 
 BROKER_HOST = 'localhost'
@@ -87,29 +80,21 @@ def main():
     parser.add_option('--test', action='store_true', default=False,
                       dest='test', help='Produce a dummy message for testing')
     opts, args = parser.parse_args()
-
+    
     if opts.verbose:
         DEBUG = True
     logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s %(message)s',
                         datefmt='%Y-%m-%d %H:%M:%S',
                         level=logging.DEBUG if DEBUG else logging.INFO)
     logger = logging.getLogger('dispatcher')
-
-    conn = BrokerConnection(hostname=opts.host, port=opts.port,
-                            userid=opts.user, password=opts.password,
-                            virtual_host=opts.vhost)
+    
+    exchange = 'rabbitmq://%s:%s@%s:%s/%s' % (opts.user, opts.password, opts.host, opts.port, opts.exchange)
+    connection = exchange_connect(exchange)
     if opts.test:
-        publisher = Publisher(connection=conn,
-                              exchange=opts.exchange, routing_key=opts.key,
-                              exchange_type="topic")
-        publisher.send({"test": "0123456789"})
-        publisher.close()
-        conn.close()
+        exchange_send(connection, opts.key, {"test": "0123456789"})
+        exchange_close(connection)
         sys.exit()
-    consumer = Consumer(connection=conn, queue=opts.queue,
-                        exchange=opts.exchange, routing_key=opts.key,
-                        exchange_type="topic")
-
+    
     callback = None
     if opts.callback:
         cb = opts.callback.rsplit('.', 1)
@@ -117,19 +102,19 @@ def main():
             __import__(cb[0])
             cb_module = sys.modules[cb[0]]
             callback = getattr(cb_module, cb[1])
-
-    def process_message(message_data, message):
-        logger.debug('%s', message_data)
+    
+    def handle_message(msg):
+        logger.debug('%s', msg)
         if callback:
-            callback(message_data)
-        message.ack()
-
-    consumer.register_callback(process_message)
+            callback(msg)
+    
+    exchange_route(connection, opts.key, opts.queue)
+    queue_callback(connection, opts.queue, handle_message)
     try:
-        consumer.wait()
+        queue_start(connection)
     except KeyboardInterrupt:
         pass
 
+
 if __name__ == '__main__':
     main()
-
index 96a6411..ef7110b 100755 (executable)
@@ -44,7 +44,7 @@ from time import time
 from pithos.lib.compat import parse_http_date
 from pithos.lib.client import OOS_Client, Fault
 from pithos.lib.fuse import FUSE, FuseOSError, Operations
-from pithos.lib.util import get_user, get_auth, get_server
+from pithos.lib.util import get_user, get_auth, get_url
 
 
 epoch = int(time())
@@ -53,7 +53,7 @@ epoch = int(time())
 class StoreFS(Operations):
     def __init__(self, verbose=False):
         self.verbose = verbose
-        self.client = OOS_Client(get_server(), get_auth(), get_user())
+        self.client = OOS_Client(get_url(), get_auth(), get_user())
     
     def __call__(self, op, path, *args):
         container, sep, object = path[1:].partition('/')
index dda8e8c..f0ae12c 100755 (executable)
@@ -40,7 +40,7 @@ from sys import argv, exit, stdin, stdout
 from datetime import datetime
 
 from pithos.lib.client import Pithos_Client, Fault
-from pithos.lib.util import get_user, get_auth, get_server, get_api
+from pithos.lib.util import get_user, get_auth, get_url
 from pithos.lib.transfer import upload, download
 
 import json
@@ -68,20 +68,18 @@ class Command(object):
     
     def __init__(self, name, argv):
         parser = OptionParser('%%prog %s [options] %s' % (name, self.syntax))
-        parser.add_option('--host', dest='host', metavar='HOST',
-                          default=get_server(), help='use server HOST')
-        parser.add_option('--user', dest='user', metavar='USERNAME',
+        parser.add_option('--url', dest='url', metavar='URL',
+                          default=get_url(), help='server URL (currently: %s)' % get_url())
+        parser.add_option('--user', dest='user', metavar='USER',
                           default=get_user(),
-                          help='use account USERNAME')
-        parser.add_option('--token', dest='token', metavar='AUTH',
+                          help='account USER (currently: %s)' % get_user())
+        parser.add_option('--token', dest='token', metavar='TOKEN',
                           default=get_auth(),
-                          help='use account AUTH')
-        parser.add_option('--api', dest='api', metavar='API',
-                          default=get_api(), help='use api API')
+                          help='account TOKEN (currently: %s)' % get_auth())
         parser.add_option('-v', action='store_true', dest='verbose',
-                          default=False, help='use verbose output')
+                          default=False, help='verbose output')
         parser.add_option('-d', action='store_true', dest='debug',
-                          default=False, help='use debug output')
+                          default=False, help='debug output')
         self.add_options(parser)
         options, args = parser.parse_args(argv)
         
@@ -92,7 +90,7 @@ class Command(object):
                 val = getattr(options, key)
                 setattr(self, key, val)
         
-        self.client = Pithos_Client(self.host, self.token, self.user, self.api, self.verbose,
+        self.client = Pithos_Client(self.url, self.token, self.user, self.verbose,
                              self.debug)
         
         self.parser = parser
@@ -227,13 +225,20 @@ class Meta(Command):
 class CreateContainer(Command):
     syntax = '<container> [key=val] [...]'
     description = 'create a container'
+    policy={}
+    
+    def add_options(self, parser):
+        parser.add_option('--versioning', action='store', dest=policy['versioning'],
+                          default=None, help='set container versioning (auto/none)')
+        parser.add_option('--quota', action='store', dest=policy['quota'],
+                          default=None, help='set default container quota')
     
     def execute(self, container, *args):
         meta = {}
         for arg in args:
             key, sep, val = arg.partition('=')
             meta[key] = val
-        ret = self.client.create_container(container, **meta)
+        ret = self.client.create_container(container, meta=meta, policies=policy)
         if not ret:
             print 'Container already exists'
 
@@ -293,10 +298,13 @@ class GetObject(Command):
         parser.add_option('--versionlist', action='store_true',
                           dest='versionlist', default=False,
                           help='get the full object version list')
+        parser.add_option('--hashmap', action='store_true',
+                          dest='hashmap', default=False,
+                          help='get the object hashmap instead')
     
     def execute(self, path):
         attrs = ['if_match', 'if_none_match', 'if_modified_since',
-                 'if_unmodified_since']
+                 'if_unmodified_since', 'hashmap']
         args = self._build_args(attrs)
         args['format'] = 'json' if self.detail else 'text'
         if self.range:
@@ -315,11 +323,17 @@ class GetObject(Command):
         elif self.version:
             data = self.client.retrieve_object_version(container, object,
                                                        self.version, **args)
+        elif self.hashmap:
+            if 'detail' in args.keys():
+                args.pop('detail')
+            args.pop('format')
+            self.detail = True
+            data = self.client.retrieve_object_hashmap(container, object, **args)
         else:
             data = self.client.retrieve_object(container, object, **args)    
         
         f = open(self.file, 'w') if self.file else stdout
-        if self.detail:
+        if self.detail or type(data) == types.DictionaryType:
             if self.versionlist:
                 print_versions(data, f=f)
             else:
@@ -360,7 +374,7 @@ class PutObject(Command):
         #                  help='use for large file support')
         parser.add_option('--manifest', action='store',
                           dest='x_object_manifest', default=None,
-                          help='upload a manifestation file')
+                          help='provide object parts prefix in <container>/<object> form')
         parser.add_option('--content-type', action='store',
                           dest='content_type', default=None,
                           help='create object with specific content type')
@@ -487,7 +501,7 @@ class UpdateObject(Command):
         parser.add_option('--offset', action='store',
                           dest='offset',
                           default=None, help='starting offest to be updated')
-        parser.add_option('--range', action='store', dest='content-range',
+        parser.add_option('--range', action='store', dest='content_range',
                           default=None, help='range of data to be updated')
         parser.add_option('--chunked', action='store_true', dest='chunked',
                           default=False, help='set chunked transfer mode')
@@ -528,7 +542,8 @@ class UpdateObject(Command):
         
         
         attrs = ['content_encoding', 'content_disposition', 'x_object_sharing',
-                 'x_object_public', 'replace']
+                 'x_object_public', 'x_object_manifest', 'replace', 'offset',
+                 'content_range']
         args = self._build_args(attrs)
         
         if self.no_sharing:
index d92c9cb..abf2c3f 100755 (executable)
@@ -44,7 +44,7 @@ from time import time
 from pithos.lib.transfer import download, upload
 from pithos.lib.client import Pithos_Client, Fault
 from pithos.lib.hashmap import merkle
-from pithos.lib.util import get_user, get_auth, get_server
+from pithos.lib.util import get_user, get_auth, get_url
 
 
 DEFAULT_CONTAINER = 'pithos'
@@ -57,7 +57,7 @@ SQL_CREATE_FILES_TABLE = '''CREATE TABLE IF NOT EXISTS files (
                                 timestamp INTEGER)'''
 
 
-client = Pithos_Client(get_server(), get_auth(), get_user())
+client = Pithos_Client(get_url(), get_auth(), get_user())
 
 
 def _makedirs(path):
index 35d5e9c..e4b36da 100755 (executable)
 # or implied, of GRNET S.A.
 
 from pithos.lib.client import Pithos_Client, Fault
-from pithos.lib.util import get_user, get_auth, get_server, get_api
+from pithos.lib.util import get_user, get_auth, get_url
 
 from xml.dom import minidom
 from StringIO import StringIO
 from hashlib import new as newhasher
 from binascii import hexlify
+from httplib import HTTPConnection
+from urlparse import urlparse
 
 import json
 import unittest
@@ -71,11 +73,9 @@ OTHER_ACCOUNTS = {
 class BaseTestCase(unittest.TestCase):
     #TODO unauthorized request
     def setUp(self):
-        self.client = Pithos_Client(get_server(), get_auth(), get_user(),
-                                    get_api())
+        self.client = Pithos_Client(get_url(), get_auth(), get_user())
         self._clean_account()
-        self.invalid_client = Pithos_Client(get_server(), get_auth(), 'invalid',
-                                            get_api())
+        self.invalid_client = Pithos_Client(get_url(), get_auth(), 'invalid')
         
         #keep track of initial account groups
         self.initial_groups = self.client.retrieve_account_groups()
@@ -1416,7 +1416,7 @@ class ObjectPost(BaseTestCase):
                                  self.containers[0],
                                  self.obj[0]['name']):
             #perform update metadata
-            more = {'foo':'foo', 'bar':'bar'}
+            more = {'foo': 'foo', 'bar': 'bar', 'f' * 114: 'b' * 256}
             status = self.client.update_object_metadata(self.containers[0],
                                                         self.obj[0]['name'],
                                                         **more)[0]
@@ -1431,6 +1431,13 @@ class ObjectPost(BaseTestCase):
             for k,v in more.items():
                 self.assertTrue(k in headers.keys())
                 self.assertTrue(headers[k], v)
+            
+            #out of limits
+            more = {'f' * 114: 'b' * 257}
+            self.assert_raises_fault(400, self.client.update_object_metadata,
+                                                        self.containers[0],
+                                                        self.obj[0]['name'],
+                                                        **more)
     
     def test_update_object(self,
                            first_byte_pos=0,
@@ -1452,9 +1459,10 @@ class ObjectPost(BaseTestCase):
             if content_length:
                 args['content_length'] = content_length
             
-            status = self.client.update_object(self.containers[0], self.obj[0]['name'],
-                                      StringIO(data), **args)[0]
-            
+            r = self.client.update_object(self.containers[0], self.obj[0]['name'],
+                                      StringIO(data), **args)
+            status = r[0]
+            etag = r[1]['etag']
             if partial < 0 or (instance_length and l <= last_byte_pos):
                 self.assertEqual(status, 202)    
             else:
@@ -1465,6 +1473,7 @@ class ObjectPost(BaseTestCase):
                 self.assertEqual(content[:first_byte_pos], self.obj[0]['data'][:first_byte_pos])
                 self.assertEqual(content[first_byte_pos:last_byte_pos+1], data)
                 self.assertEqual(content[last_byte_pos+1:], self.obj[0]['data'][last_byte_pos+1:])
+                self.assertEqual(etag, compute_md5_hash(content))
     
     def test_update_object_lt_blocksize(self):
         self.test_update_object(10, 20, content_length=None)
@@ -1681,10 +1690,9 @@ class ListSharing(BaseTestCase):
             l.append(accounts.popitem())
     
     def test_list_other_shared(self):
-        self.other = Pithos_Client(get_server(),
+        self.other = Pithos_Client(get_url(),
                               self.o1_sharing_with[0],
-                              self.o1_sharing_with[1],
-                              get_api())
+                              self.o1_sharing_with[1])
         self.assertTrue(get_user() in self.other.list_shared_by_others())
     
     def test_list_my_shared(self):
@@ -1834,10 +1842,9 @@ class TestGreek(BaseTestCase):
         self.client.create_container('φάκελος')
         o = self.upload_random_data('φάκελος', 'ο1')
         self.client.share_object('φάκελος', 'ο1', ['%s:σεφς' % get_user()])
-        chef = Pithos_Client(get_server(),
+        chef = Pithos_Client(get_url(),
                             '0009',
-                            'διογένης',
-                            get_api())
+                            'διογένης')
         self.assert_not_raises_fault(403, chef.retrieve_object_metadata,
                                      'φάκελος', 'ο1', account=get_user())
         
@@ -1892,40 +1899,61 @@ class TestPermissions(BaseTestCase):
         self.authorized = ['chazapis', 'verigak', 'gtsouk']
         groups = {'pithosdev':','.join(self.authorized)}
         self.client.set_account_groups(**groups)
-    
-    def assert_read(self, authorized=[], any=False):
+        
+        self.container = 'c'
+        self.object = 'o'
+        self.client.create_container(self.container)
+        self.upload_random_data(self.container, self.object)
+        self.upload_random_data(self.container, self.object+'/')
+        self.upload_random_data(self.container, self.object+'/a')
+        self.upload_random_data(self.container, self.object+'a')
+        self.upload_random_data(self.container, self.object+'a/')
+        self.dir_content_types = ('application/directory', 'application/folder')
+    
+    def assert_read(self, authorized=[], any=False, depth=0):
         for token, account in OTHER_ACCOUNTS.items():
-            cl = Pithos_Client(get_server(), token, account, get_api()) 
+            cl = Pithos_Client(get_url(), token, account)
             if account in authorized or any:
                 self.assert_not_raises_fault(403, cl.retrieve_object_metadata,
-                                             'c', 'o', account=get_user())
+                                             self.container, self.object,
+                                             account=get_user())
             else:
                 self.assert_raises_fault(403, cl.retrieve_object_metadata,
-                                         'c', 'o', account=get_user())
+                                         self.container, self.object,
+                                         account=get_user())
         
         #check inheritance
-        o = self.upload_random_data('c', 'o/also-shared')
+        meta = self.client.retrieve_object_metadata(self.container, self.object)
+        type = meta['content-type']
+        derivatives = self.client.list_objects(self.container, prefix=self.object)
+        #exclude the self.object
+        del derivatives[derivatives.index(self.object)]
+        for o in derivatives:
+            for token, account in OTHER_ACCOUNTS.items():
+                cl = Pithos_Client(get_url(), token, account)
+                prefix = self.object if self.object.endswith('/') else self.object+'/'
+                if (account in authorized or any) and \
+                (type in self.dir_content_types) and \
+                o.startswith(prefix):
+                    self.assert_not_raises_fault(403, cl.retrieve_object_metadata,
+                                             self.container, o, account=get_user())
+                else:
+                    self.assert_raises_fault(403, cl.retrieve_object_metadata,
+                                         self.container, o, account=get_user())
+    
+    def assert_write(self, authorized=[], any=False):
+        o_data = self.client.retrieve_object(self.container, self.object)
         for token, account in OTHER_ACCOUNTS.items():
-            cl = Pithos_Client(get_server(), token, account, get_api()) 
-            if account in authorized or any:
-                self.assert_not_raises_fault(403, cl.retrieve_object_metadata,
-                                             'c', 'o/also-shared', account=get_user())
-            else:
-                self.assert_raises_fault(403, cl.retrieve_object_metadata,
-                                         'c', 'o/also-shared', account=get_user())
-    
-    def assert_write(self, o_data, authorized=[], any=False):
-        for token, account in OTHER_ACCOUNTS.items():
-            cl = Pithos_Client(get_server(), token, account, get_api()) 
+            cl = Pithos_Client(get_url(), token, account)
             new_data = get_random_data()
             if account in authorized or any:
                 # test write access
                 self.assert_not_raises_fault(403, cl.update_object,
-                                             'c', 'o', StringIO(new_data),
+                                             self.container, self.object, StringIO(new_data),
                                              account=get_user())
                 try:
                     # test read access
-                    server_data = cl.retrieve_object('c', 'o', account=get_user())
+                    server_data = cl.retrieve_object(self.container, self.object, account=get_user())
                     self.assertEqual(o_data, server_data[:len(o_data)])
                     self.assertEqual(new_data, server_data[len(o_data):])
                     o_data = server_data
@@ -1933,72 +1961,105 @@ class TestPermissions(BaseTestCase):
                     self.failIf(f.status == 403)
             else:
                 self.assert_raises_fault(403, cl.update_object,
-                                             'c', 'o', StringIO(new_data),
+                                             self.container, self.object, StringIO(new_data),
                                              account=get_user())
-        
         #check inheritance
-        o = self.upload_random_data('c', 'o/also-shared')
-        o_data = o['data']
-        for token, account in OTHER_ACCOUNTS.items():
-            cl = Pithos_Client(get_server(), token, account, get_api()) 
-            new_data = get_random_data()
-            if account in authorized or any:
-                # test write access
-                self.assert_not_raises_fault(403, cl.update_object,
-                                             'c', o['name'],
-                                             StringIO(new_data),
-                                             account=get_user())
-                try:
-                    server_data = cl.retrieve_object('c', o['name'], account=get_user())
-                    self.assertEqual(o_data, server_data[:len(o_data)])
-                    self.assertEqual(new_data, server_data[len(o_data):])
-                    o_data = server_data
-                except Fault, f:
-                    self.failIf(f.status == 403)
-            else:
-                self.assert_raises_fault(403, cl.update_object,
-                                             'c', o['name'],
-                                             StringIO(new_data),
-                                             account=get_user())
+        meta = self.client.retrieve_object_metadata(self.container, self.object)
+        type = meta['content-type']
+        derivatives = self.client.list_objects(self.container, prefix=self.object)
+        #exclude the object
+        del derivatives[derivatives.index(self.object)]
+        for o in derivatives:
+            for token, account in OTHER_ACCOUNTS.items():
+                prefix = self.object if self.object.endswith('/') else self.object+'/'
+                cl = Pithos_Client(get_url(), token, account)
+                new_data = get_random_data()
+                if (account in authorized or any) and \
+                (type in self.dir_content_types) and \
+                o.startswith(prefix):
+                    # test write access
+                    self.assert_not_raises_fault(403, cl.update_object,
+                                                 self.container, o,
+                                                 StringIO(new_data),
+                                                 account=get_user())
+                    try:
+                        server_data = cl.retrieve_object(self.container, o, account=get_user())
+                        self.assertEqual(new_data, server_data[-len(new_data):])
+                    except Fault, f:
+                        self.failIf(f.status == 403)
+                else:
+                    self.assert_raises_fault(403, cl.update_object,
+                                                 self.container, o,
+                                                 StringIO(new_data),
+                                                 account=get_user())
     
     def test_group_read(self):
-        self.client.create_container('c')
-        o = self.upload_random_data('c', 'o')
-        self.client.share_object('c', 'o', ['%s:pithosdev' % get_user()])
+        self.client.share_object(self.container, self.object, ['%s:pithosdev' % get_user()])
         self.assert_read(authorized=self.authorized)
     
     def test_read_many(self):
-        #test read access
-        self.client.create_container('c')
-        o = self.upload_random_data('c', 'o')
-        self.client.share_object('c', 'o', self.authorized)
+        self.client.share_object(self.container, self.object, self.authorized)
         self.assert_read(authorized=self.authorized)
     
     def test_read_by_everyone(self):
-        self.client.create_container('c')
-        o = self.upload_random_data('c', 'o')
-        self.client.share_object('c', 'o', ['*'])
+        self.client.share_object(self.container, self.object, ['*'])
         self.assert_read(any=True)
     
+    def test_read_directory(self):
+        for type in self.dir_content_types:
+            #change content type
+            self.client.move_object(self.container, self.object, self.container, self.object, content_type=type)
+            self.client.share_object(self.container, self.object, ['*'])
+            self.assert_read(any=True)
+            self.client.share_object(self.container, self.object, self.authorized)
+            self.assert_read(authorized=self.authorized)
+            self.client.share_object(self.container, self.object, ['%s:pithosdev' % get_user()])
+            self.assert_read(authorized=self.authorized)
+    
     def test_group_write(self):
-        self.client.create_container('c')
-        o = self.upload_random_data('c', 'o')
-        self.client.share_object('c', 'o', ['%s:pithosdev' % get_user()], read=False)
-        self.assert_write(o['data'], authorized=self.authorized)
+        self.client.share_object(self.container, self.object, ['%s:pithosdev' % get_user()], read=False)
+        self.assert_write(authorized=self.authorized)
     
     def test_write_many(self):
-        self.client.create_container('c')
-        o = self.upload_random_data('c', 'o')
-        self.client.share_object('c', 'o', self.authorized, read=False)
-        self.assert_write(o['data'], authorized=self.authorized)
+        self.client.share_object(self.container, self.object, self.authorized, read=False)
+        self.assert_write(authorized=self.authorized)
     
     def test_write_by_everyone(self):
-        self.client.create_container('c')
-        o = self.upload_random_data('c', 'o')
-        self.client.share_object('c', 'o', ['*'], read=False)
-        o_data = o['data']
-        self.assert_write(o['data'], any=True)
-
+        self.client.share_object(self.container, self.object, ['*'], read=False)
+        self.assert_write(any=True)
+    
+    def test_write_directory(self):
+        dir_content_types = ('application/directory', 'application/foler')
+        for type in dir_content_types:
+            #change content type
+            self.client.move_object(self.container, self.object, self.container, self.object, content_type='application/folder')
+            self.client.share_object(self.container, self.object, ['*'], read=False)
+            self.assert_write(any=True)
+            self.client.share_object(self.container, self.object, self.authorized, read=False)
+            self.assert_write(authorized=self.authorized)
+            self.client.share_object(self.container, self.object, ['%s:pithosdev' % get_user()], read=False)
+            self.assert_write(authorized=self.authorized)
+    
+    def test_shared_listing(self):
+        self.client.share_object(self.container, self.object, self.authorized)
+        
+        my_shared_containers = self.client.list_containers(shared=True)
+        self.assertEqual(['c'], my_shared_containers)
+        my_shared_objects = self.client.list_objects('c', shared=True)
+        self.assertEqual(['o'], my_shared_objects)
+        
+        dir_content_types = ('application/directory', 'application/foler')
+        for type in dir_content_types:
+            #change content type
+            self.client.move_object(self.container, self.object, self.container, self.object, content_type='application/folder')
+            my_shared_objects = self.client.list_objects('c', shared=True)
+            self.assertEqual(['o', 'o/', 'o/a'], my_shared_objects)
+        
+        for token, account in OTHER_ACCOUNTS.items():
+            if account in self.authorized:
+                self.other = Pithos_Client(get_url(), token, account)
+                self.assertTrue(get_user() in self.other.list_shared_by_others())
+    
 class TestPublish(BaseTestCase):
     def test_publish(self):
         self.client.create_container('c')
@@ -2007,10 +2068,51 @@ class TestPublish(BaseTestCase):
         meta = self.client.retrieve_object_metadata('c', 'o')
         self.assertTrue('x-object-public' in meta)
         url = meta['x-object-public']
-        public_client = Pithos_Client(get_server(), get_auth(), get_user(), api='')
-        data = public_client.get(url)[2]
+        
+        p = urlparse(get_url())
+        if p.scheme == 'http':
+            conn = HTTPConnection(p.netloc)
+        elif p.scheme == 'https':
+            conn = HTTPSConnection(p.netloc)
+        else:
+            raise Exception('Unknown URL scheme')
+        
+        conn.request('GET', url)
+        resp = conn.getresponse()
+        length = resp.getheader('content-length', None)
+        data = resp.read(length)
         self.assertEqual(o_data, data)
 
+class TestPolicies(BaseTestCase):
+    def test_none_versioning(self):
+        self.client.create_container('c', policies={'versioning':'none'})
+        o = self.upload_random_data('c', 'o')
+        meta = self.client.retrieve_object_metadata('c', 'o')
+        v = meta['x-object-version']
+        more_data = get_random_data()
+        self.client.update_object('c', 'o', StringIO(more_data))
+        vlist = self.client.retrieve_object_versionlist('c', 'o')
+        self.assert_raises_fault(404, self.client.retrieve_object_version,
+                                 'c', 'o', v)
+        data = self.client.retrieve_object('c', 'o')
+        end = len(o['data'])
+        self.assertEqual(data[:end], o['data'])
+        self.assertEqual(data[end:], more_data)
+    
+    def test_quota(self):
+        self.client.create_container('c', policies={'quota':'1'})
+        meta = self.client.retrieve_container_metadata('c')
+        self.assertEqual(meta['x-container-policy-quota'], '1')
+        self.assert_raises_fault(413, self.upload_random_data, 'c', 'o',
+                                 length=1024*1024+1)
+    
+    def test_quota_none(self):
+        self.client.create_container('c', policies={'quota':'0'})
+        meta = self.client.retrieve_container_metadata('c')
+        self.assertEqual(meta['x-container-policy-quota'], '0')
+        self.assert_not_raises_fault(413, self.upload_random_data, 'c', 'o',
+                                 length=1024*1024+1)
+
 class AssertUUidInvariant(object):
     def __init__(self, callable, *args, **kwargs):
         self.callable = callable
@@ -2079,7 +2181,7 @@ def compute_block_hash(data, algorithm):
 
 def get_random_data(length=500):
     char_set = string.ascii_uppercase + string.digits
-    return ''.join(random.choice(char_set) for x in range(length))
+    return ''.join(random.choice(char_set) for x in xrange(length))
 
 def is_date(date):
     MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()