using System;
using System.Collections.Generic;
-using System.ComponentModel;
using System.ComponentModel.Composition;
using System.Diagnostics;
using System.Diagnostics.Contracts;
using System.IO;
using System.Linq;
using System.Net;
-using System.Text;
-using System.Threading;
using System.Threading.Tasks;
using Pithos.Interfaces;
using Pithos.Network;
public IStatusKeeper StatusKeeper { get; set; }
public IStatusNotification StatusNotification { get; set; }
-/*
- [Import]
- public FileAgent FileAgent {get;set;}
-*/
-
- /* public int BlockSize { get; set; }
- public string BlockHash { get; set; }*/
private static readonly ILog Log = LogManager.GetLogger("NetworkAgent");
- private List<AccountInfo> _accounts=new List<AccountInfo>();
+ private readonly List<AccountInfo> _accounts=new List<AccountInfo>();
- public void Start(/*int blockSize, string blockHash*/)
+ public void Start()
{
-/*
- if (blockSize<0)
- throw new ArgumentOutOfRangeException("blockSize");
- if (String.IsNullOrWhiteSpace(blockHash))
- throw new ArgumentOutOfRangeException("blockHash");
- Contract.EndContractBlock();
-*/
-
-/*
- BlockSize = blockSize;
- BlockHash = blockHash;
-*/
-
_agent = Agent<CloudAction>.Start(inbox =>
{
inbox.LoopAsync(process, loop);
};
loop();
- });
+ });
}
private async Task Process(CloudAction action)
{
Log.InfoFormat("[ACTION] Start Processing {0}", action);
- var localFile = action.LocalFile;
var cloudFile = action.CloudFile;
var downloadPath = action.GetDownloadPath();
}
catch (WebException exc)
{
- Log.ErrorFormat("[WEB ERROR] {0} : {1} -> {2} due to exception\r\n{3}", action.Action, action.LocalFile, action.CloudFile, exc);
+ Log.ErrorFormat("[WEB ERROR] {0} : {1} -> {2} due to exception\r\n{3}", action.Action, action.LocalFile, action.CloudFile, exc);
}
catch (OperationCanceledException)
{
throw;
}
- catch (FileNotFoundException exc)
+ catch (DirectoryNotFoundException)
+ {
+ Log.ErrorFormat("{0} : {1} -> {2} failed because the directory was not found.\n Rescheduling a delete",
+ action.Action, action.LocalFile, action.CloudFile);
+ //Post a delete action for the missing file
+ Post(new CloudDeleteAction(action));
+ }
+ catch (FileNotFoundException)
{
Log.ErrorFormat("{0} : {1} -> {2} failed because the file was not found.\n Rescheduling a delete",
- action.Action, action.LocalFile, action.CloudFile, exc);
+ action.Action, action.LocalFile, action.CloudFile);
//Post a delete action for the missing file
Post(new CloudDeleteAction(action));
}
if (cloudAction.LocalFile != null)
{
var accountInfo = cloudAction.AccountInfo;
- if (cloudAction.LocalFile.Length>accountInfo.BlockSize)
- cloudAction.TopHash = new Lazy<string>(() => Signature.CalculateTreeHashAsync(cloudAction.LocalFile,
- accountInfo.BlockSize, accountInfo.BlockHash).Result
- .TopHash.ToHashString());
- else
+ if (!Directory.Exists(cloudAction.LocalFile.FullName))
{
- cloudAction.TopHash=new Lazy<string>(()=> cloudAction.LocalHash.Value);
+ if (cloudAction.LocalFile.Length > accountInfo.BlockSize)
+ cloudAction.TopHash =
+ new Lazy<string>(() => Signature.CalculateTreeHashAsync(cloudAction.LocalFile,
+ accountInfo.BlockSize,
+ accountInfo.BlockHash).Result
+ .TopHash.ToHashString());
+ else
+ {
+ cloudAction.TopHash = new Lazy<string>(() => cloudAction.LocalHash.Value);
+ }
}
}
//Remote files are polled periodically. Any changes are processed
- public Task ProcessRemoteFiles(DateTime? since=null)
- {
- return Task<Task>.Factory.StartNewDelayed(10000, () =>
+ public async Task ProcessRemoteFiles(DateTime? since = null)
+ {
+ await TaskEx.Delay(TimeSpan.FromSeconds(10),_agent.CancellationToken);
+
+ using (log4net.ThreadContext.Stacks["Retrieve Remote"].Push("All accounts"))
{
- using (log4net.ThreadContext.Stacks["Retrieve Remote"].Push("All accounts"))
+
+ try
{
//Next time we will check for all changes since the current check minus 1 second
//This is done to ensure there are no discrepancies due to clock differences
DateTime nextSince = DateTime.Now.AddSeconds(-1);
-
- var tasks=from accountInfo in _accounts
- select ProcessAccountFiles(accountInfo, since);
- var process=Task.Factory.Iterate(tasks);
- return process.ContinueWith(t =>
- {
- if (t.IsFaulted)
- {
- Log.Error("Error while processing accounts");
- t.Exception.Handle(exc=>
- {
- Log.Error("Details:", exc);
- return true;
- });
- }
- ProcessRemoteFiles(nextSince);
- });
+ var tasks = from accountInfo in _accounts
+ select ProcessAccountFiles(accountInfo, since);
+
+ await TaskEx.WhenAll(tasks);
+
+ ProcessRemoteFiles(nextSince);
}
- });
+ catch (Exception ex)
+ {
+ Log.ErrorFormat("Error while processing accounts\r\n{0}",ex);
+ //In case of failure retry with the same parameter
+ ProcessRemoteFiles(since);
+ }
+
+
+ }
}
- public Task ProcessAccountFiles(AccountInfo accountInfo,DateTime? since=null)
+ public async Task ProcessAccountFiles(AccountInfo accountInfo,DateTime? since=null)
{
if (accountInfo==null)
throw new ArgumentNullException("accountInfo");
CreateContainerFolders(accountInfo, containers);
-
- //Get the list of server objects changed since the last check
- var listObjects = from container in containers
- select Task<IList<ObjectInfo>>.Factory.StartNew(_ =>
- client.ListObjects(accountInfo.UserName, container.Name, since),container.Name);
-
- var listAll = Task.Factory.WhenAll(listObjects.ToArray());
-
-
-
- //Get the list of deleted objects since the last check
-/*
- var listTrash = Task<IList<ObjectInfo>>.Factory.StartNew(() =>
- client.ListObjects(accountInfo.UserName, FolderConstants.TrashContainer, since));
-
- var listShared = Task<IList<ObjectInfo>>.Factory.StartNew(() =>
- client.ListSharedObjects(since));
-
- var listAll = Task.Factory.TrackedSequence(
- () => listObjects,
- () => listTrash,
- () => listShared);
-*/
+ try
+ {
+
+ //Get the list of server objects changed since the last check
+ //The name of the container is passed as state in order to create a dictionary of tasks in a subsequent step
+ var listObjects = from container in containers
+ select Task<IList<ObjectInfo>>.Factory.StartNew(_ =>
+ client.ListObjects(accountInfo.UserName,container.Name, since),container.Name);
+ var listTasks = await Task.Factory.WhenAll(listObjects.ToArray());
- var enqueueFiles = listAll.ContinueWith(task =>
- {
- if (task.IsFaulted)
- {
- //ListObjects failed at this point, need to reschedule
- Log.ErrorFormat("[FAIL] ListObjects for{0} in ProcessRemoteFiles with {1}", accountInfo.UserName,task.Exception);
- return;
- }
using (log4net.ThreadContext.Stacks["SCHEDULE"].Push("Process Results"))
{
- var dict=task.Result.ToDictionary(t=> t.AsyncState);
-
+ var dict = listTasks.ToDictionary(t => t.AsyncState);
+
//Get all non-trash objects. Remember, the container name is stored in AsyncState
- var remoteObjects = from objectList in task.Result
- where (string)objectList.AsyncState != "trash"
+ var remoteObjects = from objectList in listTasks
+ where (string) objectList.AsyncState != "trash"
from obj in objectList.Result
select obj;
-
+
var trashObjects = dict["trash"].Result;
//var sharedObjects = ((Task<IList<ObjectInfo>>) task.Result[2]).Result;
//Items with the same name, hash may be both in the container and the trash
//Don't delete items that exist in the container
var realTrash = from trash in trashObjects
- where !remoteObjects.Any(info => info.Name == trash.Name && info.Hash == trash.Hash)
+ where
+ !remoteObjects.Any(
+ info => info.Name == trash.Name && info.Hash == trash.Hash)
select trash;
- ProcessDeletedFiles(accountInfo,realTrash);
+ ProcessDeletedFiles(accountInfo, realTrash);
- var remote = from info in remoteObjects//.Union(sharedObjects)
+ var remote = from info in remoteObjects
+ //.Union(sharedObjects)
let name = info.Name
where !name.EndsWith(".ignore", StringComparison.InvariantCultureIgnoreCase) &&
- !name.StartsWith(FolderConstants.CacheFolder +"/", StringComparison.InvariantCultureIgnoreCase)
+ !name.StartsWith(FolderConstants.CacheFolder + "/",
+ StringComparison.InvariantCultureIgnoreCase)
select info;
//Create a list of actions from the remote files
- var allActions = ObjectsToActions(accountInfo,remote);
-
+ var allActions = ObjectsToActions(accountInfo, remote);
+
//And remove those that are already being processed by the agent
var distinctActions = allActions
.Except(_agent.GetEnumerable(), new PithosMonitor.LocalFileComparer())
Post(message);
}
- /* //Report the number of new files
- var remoteCount = distinctActions.Count(action=>
- action.Action==CloudActionType.DownloadUnconditional);
-
- if ( remoteCount > 0)
- StatusNotification.NotifyChange(String.Format("Processing {0} new files", remoteCount));
- */
-
- Log.Info("[LISTENER] End Processing");
+ Log.Info("[LISTENER] End Processing");
}
- });
+ }
+ catch (Exception ex)
+ {
+ Log.ErrorFormat("[FAIL] ListObjects for{0} in ProcessRemoteFiles with {1}", accountInfo.UserName, ex);
+ return;
+ }
+
+ Log.Info("[LISTENER] Finished");
- var log = enqueueFiles.ContinueWith(t =>
- {
- if (t.IsFaulted)
- {
- Log.Error("[LISTENER] Exception", t.Exception);
- }
- else
- {
- Log.Info("[LISTENER] Finished");
- }
- });
- return log;
}
}
var newFilePath = action.LocalFile.FullName;
//The local file is already renamed
- this.StatusKeeper.SetFileOverlayStatus(newFilePath, FileOverlayStatus.Modified);
+ StatusKeeper.SetFileOverlayStatus(newFilePath, FileOverlayStatus.Modified);
var account = action.CloudFile.Account ?? accountInfo.UserName;
var client = new CloudFilesClient(accountInfo);
client.MoveObject(account, container, action.OldCloudFile.Name, container, action.CloudFile.Name);
- this.StatusKeeper.SetFileStatus(newFilePath, FileStatus.Unchanged);
- this.StatusKeeper.SetFileOverlayStatus(newFilePath, FileOverlayStatus.Normal);
+ StatusKeeper.SetFileStatus(newFilePath, FileStatus.Unchanged);
+ StatusKeeper.SetFileOverlayStatus(newFilePath, FileOverlayStatus.Normal);
NativeMethods.RaiseChangeNotification(newFilePath);
}
var info = fileAgent.GetFileInfo(fileName);
var fullPath = info.FullName.ToLower();
- this.StatusKeeper.SetFileOverlayStatus(fullPath, FileOverlayStatus.Modified);
+ StatusKeeper.SetFileOverlayStatus(fullPath, FileOverlayStatus.Modified);
var account = cloudFile.Account ?? accountInfo.UserName;
var container = cloudFile.Container ;//?? FolderConstants.PithosContainer;
var client = new CloudFilesClient(accountInfo);
client.DeleteObject(account, container, cloudFile.Name);
- this.StatusKeeper.ClearFileStatus(fullPath);
+ StatusKeeper.ClearFileStatus(fullPath);
}
}
throw new ArgumentException("The localPath must be rooted", "localPath");
Contract.EndContractBlock();
- Uri relativeUrl = new Uri(cloudFile.Name, UriKind.Relative);
+ var relativeUrl = new Uri(cloudFile.Name, UriKind.Relative);
var url = relativeUrl.ToString();
if (cloudFile.Name.EndsWith(".ignore", StringComparison.InvariantCultureIgnoreCase))
var account = cloudFile.Account;
var container = cloudFile.Container;
- //Retrieve the hashmap from the server
- var serverHash = await client.GetHashMap(account, container, url);
- //If it's a small file
- if (serverHash.Hashes.Count == 1 )
- //Download it in one go
- await DownloadEntireFileAsync(accountInfo, client, cloudFile, relativeUrl, localPath, serverHash);
- //Otherwise download it block by block
+ if (cloudFile.Content_Type == @"application/directory")
+ {
+ if (!Directory.Exists(localPath))
+ Directory.CreateDirectory(localPath);
+ }
else
- await DownloadWithBlocks(accountInfo,client, cloudFile, relativeUrl, localPath, serverHash);
-
- if (cloudFile.AllowedTo == "read")
{
- var attributes=File.GetAttributes(localPath);
- File.SetAttributes(localPath,attributes|FileAttributes.ReadOnly);
+ //Retrieve the hashmap from the server
+ var serverHash = await client.GetHashMap(account, container, url);
+ //If it's a small file
+ if (serverHash.Hashes.Count == 1)
+ //Download it in one go
+ await
+ DownloadEntireFileAsync(accountInfo, client, cloudFile, relativeUrl, localPath, serverHash);
+ //Otherwise download it block by block
+ else
+ await DownloadWithBlocks(accountInfo, client, cloudFile, relativeUrl, localPath, serverHash);
+
+ if (cloudFile.AllowedTo == "read")
+ {
+ var attributes = File.GetAttributes(localPath);
+ File.SetAttributes(localPath, attributes | FileAttributes.ReadOnly);
+ }
}
-
+
//Now we can store the object's metadata without worrying about ghost status entries
StatusKeeper.StoreInfo(localPath, cloudFile);
try
{
- if (action == null)
- throw new ArgumentNullException("action");
- Contract.EndContractBlock();
-
var accountInfo = action.AccountInfo;
var fileInfo = action.LocalFile;
if (hash == cloudHash || topHash == cloudHash)
{
//but store any metadata changes
- this.StatusKeeper.StoreInfo(fullFileName, info);
+ StatusKeeper.StoreInfo(fullFileName, info);
Log.InfoFormat("Skip upload of {0}, hashes match", fullFileName);
return;
}
await UploadWithHashMap(accountInfo, cloudFile, fileInfo, cloudFile.Name, treeHash);
//If everything succeeds, change the file and overlay status to normal
- this.StatusKeeper.SetFileState(fullFileName, FileStatus.Unchanged, FileOverlayStatus.Normal);
+ StatusKeeper.SetFileState(fullFileName, FileStatus.Unchanged, FileOverlayStatus.Normal);
}
//Notify the Shell to update the overlays
NativeMethods.RaiseChangeNotification(fullFileName);