#!/usr/bin/python

"""
    s3fs - a FUSE Filesystem program to mount S3 web storage buckets
    Copyright (C) 2008  Neil Horman <nhorman@tuxdriver.com> 

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; Specifically version 2 of the License only

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
"""

import sys
import os
import getopt
try:
    import _find_fuse_parts
except ImportError:
    pass
import fuse
from fuse import Fuse
import boto
from boto.s3.connection import S3Connection
import syslog
import pickle
import tempfile
import errno
import stat
import threading
from threading import Thread
from threading import Timer
from threading import Condition
import thread
import time
import tempfile
import shutil
import atexit
import hashlib

"""
This is a base class to use in stat requests
"""
class S3Stat(fuse.Stat):
	def __init__(self):
		self.st_mode = 0
		self.st_ino = 0
		self.st_dev = 0
		self.st_nlink = 0
		self.st_uid = 0
		self.st_gid = 0
		self.st_size = 0
		self.st_atime = 0
		self.st_mtime = 0
		self.st_ctime = 0


class S3ROStat(S3Stat):
	def __init__(self, clone, uid, gid):
		super(S3Stat,self).__setattr__('st_mode',clone.st_mode) 
		super(S3Stat,self).__setattr__('st_ino',clone.st_mode)
		super(S3Stat,self).__setattr__('st_dev',clone.st_dev) 
		super(S3Stat,self).__setattr__('st_nlink',clone.st_nlink)
		super(S3Stat,self).__setattr__('st_uid',uid) 
		super(S3Stat,self).__setattr__('st_gid',gid)
		super(S3Stat,self).__setattr__('st_size',clone.st_size)
		super(S3Stat,self).__setattr__('st_atime',clone.st_atime) 
		super(S3Stat,self).__setattr__('st_mtime',clone.st_mtime)
		super(S3Stat,self).__setattr__('st_ctime',clone.st_ctime)

	def __setattr__(self, *args):
		raise TypeError("can't modify immutable instance")
	__delattr__ = __setattr__
"""
This class represents every node in our file system (directories and files)
It contains metadata about each node, and a set of children that appear beneath
it. A tree of these elements is stored in the S3DriveMetaData class as a pickle
on the specified bucket in S3
"""
class S3KeyLayoutElement:
	def __init__(self,name):
		self.name = name
		self.stat = S3Stat()
		# by default make a directory that is mode 755
		self.stat.st_mode = stat.S_IFDIR | 0755
		self.stat.st_nlink = 1 
		
		# UID/GID should probably be set as the owner of the mount
		self.stat.st_uid = -1 
		self.stat.st_gid = -1 
		# By default this shoul be zero
		self.stat.st_size = 0 
		self.stat.st_blksize = 0
		self.stat.st_blocks =0 
		# We can worry about these later

		self.stat.st_atime = 0
		self.stat.st_mtime = 0
		self.stat.st_ctime = 0

		self.dirty = False

		self.children = [] 
		self.parent = None
		self.iolock = None
		self.ioreq = [] 
		self.openref = 0
		self.sha1_sum = '' 
		self.target_path = None 
		self.pending_delete = False
		self.xattrs = {} 

	def runtimeSetup(self):
		for i in self.children:
			i.runtimeSetup()
		self.iolock = thread.allocate_lock()

	def runtimeShutdown(self):
		for i in self.children:
			i.runtimeShutdown()
		self.iolock = None

	# Make a clone of this node for the purpose of saving as a pickle
	def cloneSubtree(self, parent=None):
		clone = S3KeyLayoutElement(self.name)

		clone.stat.st_mode = self.stat.st_mode 
		clone.stat.st_nlink = self.stat.st_nlink 
		
		# UID/GID should probably be set as the owner of the mount
		clone.stat.st_uid = self.stat.st_uid 
		clone.stat.st_gid = self.stat.st_gid 
		# By default this shoul be zero
		clone.stat.st_size = self.stat.st_size 
		clone.stat.st_blksize = self.stat.st_blksize
		clone.stat.st_blocks = self.stat.st_blocks
		# We can worry about these later

		clone.stat.st_atime = self.stat.st_atime 
		clone.stat.st_mtime = self.stat.st_mtime
		clone.stat.st_ctime = self.stat.st_ctime 

		# Need to repoint the parent to the passed in parent
		clone.parent = parent 

		clone.sha1_sum = self.sha1_sum

		for i in self.children:
			child = i.cloneSubtree(clone)
			clone.children.append(child)

		return clone
		
	
	def setDirty(self, isdirty):
		self.iolock.acquire()
		self.dirty = isdirty
		self.iolock.release()

	def addChild(self, child):
		self.iolock.acquire()
		self.children.append(child)
		child.parent = self
		self.iolock.release()

	def removeNode(self, filesystem, recurse=True):
		if self.parent != None:
			self.parent.removeChild(self,recurse)

	def removeChild(self, child, filesystem, recurse=True):
		self.iolock.acquire()
		self.children.remove(child)
		child.parent = None
		self.iolock.release()
		if (recurse == True):
			for i in child.children:
				child.removeChild(i, filesystem, recurse)

	def writeToS3(self, filesystem):
		newthread = S3AioWriter(self, filesystem)
		self.iolock.acquire()
		self.ioreq.append(newthread)
		self.iolock.release()
		newthread.start()

	def readFromS3(self, filesystem):
		newthread = S3AioReader(self, filesystem)
		self.iolock.acquire()
		self.ioreq.append(newthread)
		self.iolock.release()
		newthread.start()

	def deleteFromFs(self, filesystem):
		newthread = S3AioDelete(self, filesystem)
		self.iolock.acquire()
		self.ioreq.append(newthread)
		self.iolock.release()
		newthread.start()

	def delete(self, filesystem):
		self.iolock.acquire()
		self.pending_delete = True
		self.iolock.release()
		self.deleteFromFs(filesystem)
		for i in self.children:
			i.delete(filesystem)

	def dequeueIo(self, oldthread):
		self.iolock.acquire()
		self.ioreq.remove(oldthread)
		self.iolock.release()

	def completeIo(self, Rwait, Wwait):
		templist = []
		self.iolock.acquire()
		for i in self.ioreq:
			if ((Rwait == True) and (i.isRead == True)):
				templist.append(i)
			else:
				if ((Wwait == True) and (i.isRead == False)):
					templist.append(i)
		self.iolock.release()
		for i in templist:
			i.join()

	def completeAllIo(self, Rwait, Wwait):
		for i in self.children:
			i.completeAllIo(Rwait, Wwait)
		self.completeIo(Rwait, Wwait)

	def writebackAll(self, filesystem):
		for i in self.children:
			i.writebackAll(filesystem)
		if self.dirty == True:
			self.writeToS3(filesystem)

	def isOnS3(self, filesystem):
		lkr = S3AioLookup(self, filesystem)
		self.iolock.acquire()
		self.ioreq.append(lkr)
		self.iolock.release()
		lkr.start()
		return lkr.getResult()

	def open(self, filesystem, flags):
		if filesystem.cache.isInCache(self) == False:
			if self.isOnS3(filesystem) == False:
				print self.getKeyName(), " Is Not on S3"
				filesystem.cache.addToCache(self)
			else:
				# File on S3 but not in cache, read it in
				self.readFromS3(filesystem)
				while filesystem.cache.isInCache(self) == False:
					time.sleep(1)

		#File in cache, open it up
		self.iolock.acquire()
		self.openref = self.openref+1
		self.iolock.release()
		return os.fdopen(filesystem.cache.openInCache(self,flags),
				flag2mode(flags))

	def close(self, filesystem):
		self.iolock.acquire()
		if self.openref == 0:
			print "WARNING: UNBALANCE REF COUNT!"
		self.openref = self.openref-1
		if self.openref == 0:
			# We should try to start a write thread
			if self.dirty == True:
				self.iolock.release()
				if filesystem.writeback_time != '-1':
					self.writeToS3(filesystem)
				return
		self.iolock.release()
		return

	def pathWalk(self, namelist):
		#name should be fully qualfied from the node pointed
		#to by self
		name = namelist[0]
		if len(namelist) == 1:
			#We might be the node you're looking for
			if self.name == name:
				if self.pending_delete == True:
					self.completeIo(True, True)
					return None
				return self
			else:
				return None
		else:
			#iterate over the children
			for kid in self.children:
				rc = kid.pathWalk(namelist[1:])
				if rc != None:
					return rc
		return None

	def getParentKeyString(self):
		if self.parent == None:
			# WE return . here because we delimit key names with a
			# '.' instead of a '/'
			return "root"
		parentkeystring = self.parent.getParentKeyString()
		return parentkeystring + "." + self.name

	def getKeyName(self):
		#walk our parent path and return the string that makes up our
		#key name
		keystring = self.getParentKeyString() 
		return keystring

	def makeUploadCopy(self, filesystem):
		# We make a temporary copy of the file for uploading
		fd = filesystem.cache.shadowInCache(self)
		return fd

"""
This class is pickled and stored on S3 in the specified bucket.  It maintains
All of our filesystem heirarchy data, and file metadata
"""
class S3DriveMetaData:
	def __init__(self):
		self.version = 2 
		self.size_unlimited = True
		self.locked_on_s3 = True
		self.root = S3KeyLayoutElement('/') 
		self.root.runtimeSetup()

	def findNode(self, name):
		if name == "/":
			return self.root
		tokens = name.split("/")
		return self.root.pathWalk(tokens)

	def cloneForSave(self):
		clone = S3DriveMetaData()
		clone.root = None
		clone.version = self.version
		clone.size_unlimited = self.size_unlimited
		clone.root = self.root.cloneSubtree() 
		return clone

	def checkUpgrade(self):
		#here we need to scan our versions and do any needed updates

		# Version 1 S3DriveMetaData classes lacked the locked_on_s3
		# variable
		if self.version == 1:
			self.locked_on_s3 = True
			self.version = 2


"""
The S3Drive class represents the blockdevice that is an amazon S3 bucket
This object tracks all io requests to and from the bucket
"""
class S3Drive:
	def __init__(self, bucket, host=None):
		self.connection_ready = False
		self.drive_ready = False
		if host == None:
			self.connection = S3Connection()
		else:
			self.connection = S3Connection(None, None, False,
				None, None, None, host, 0, none)

		if self.connection == None:
			syslog.syslog("Could not establish connection to S3\n")
			return
		self.connection_ready = True
		self.bucket = self.connection.lookup(bucket)
		if self.bucket == None:
			syslog.syslog("Could not find bucket "+bucket)
			return
		self.drive_ready = True

	def lookup(self, node):
		fqkn = node.getKeyName()
		return self.bucket.lookup(fqkn)

	def create(self, node):
		fqkn = node.getKeyName()
		return self.bucket.new_key(fqkn)
"""
The S3Cache class represents the local cache that we store our downloaded files
in
"""
class S3Cache:
	def __init__(self, bucket, dir="/tmp", size=-1, preserve=False):
		self.dir = os.path.abspath(dir) + "/" + bucket
		self.size = size
		self.stored_size = 0
		self.lru_list = []
		self.preserve = preserve

		if (os.path.exists(self.dir) == False):
			os.mkdir(self.dir)

		# Now check the files for SHA1 correctness

	def shutdown(self):
		if self.preserve == False:
			for root, dirs, files in os.walk(self.dir, topdown=False):
				for name in files:
					os.remove(os.path.join(root, name))
				for name in dirs:
					os.rmdir(os.path.join(root, name))
			os.rmdir(self.dir)

	# Verify that the node is in cache and up to date
	def validate(self, node):
		if self.isInCache(node) == False:
			return True 
		keyname = node.getKeyName()
		filename = self.dir + "/" + keyname
		sum = hashlib.new('sha1')
		fd = open(filename)
		end = False
		while end == False:
			buf = fd.read(4096)
			if (len(buf) == 0):
				end = True
			else:
				sum.update(buf)
		fd.close()

		if sum.hexdigest() != node.sha1_sum:
			return False
		return True
	
	# Make sure that all files in cache are up to date
	def syncTree(self, node):
		# Validate the children
		for i in node.children:
			self.syncTree(i)
		# Validate this tree
	 	if self.validate(node) == False:
			# Remove the file from cache
			self.removeFromCache(node)

	def addToCache(self, node, flags=(os.O_RDWR|os.O_CREAT)):
		keyname = node.getKeyName()
		fd = os.fdopen(os.open(self.dir + "/" + keyname, flags),
			flag2mode(flags))
		fd.close()
		self.lru_list.append(keyname)
		return 0 

	def removeFromCache(self, node):
		keyname = node.getKeyName()
		filename = self.dir + "/" + keyname
		if os.path.exists(filename) == True:
			os.unlink(filename)
			self.lru_list.remove(keyname)

	def isInCache(self, node):
		if node.pending_delete == True:
			return False
		keyname = node.getKeyName()
		filename = self.dir + "/" + keyname
		return os.path.exists(filename)

	def openInCache(self, node, flags=(os.O_RDWR|os.O_CREAT)):
		keyname = node.getKeyName()
		filename = self.dir + "/" + keyname
		self.updateLRU(keyname)
		fd = os.open(filename, flags)
		return fd

	def statInCache(self, node):
		filename = self.dir + "/" + node.getKeyName() 
		return os.stat(filename)

	def updateLRU(self, keyname):
		try:
			self.lru_list.remove(keyname)
		except ValueError:
			pass
		self.lru_list.append(keyname)

	def getLRUName(self):
		return self.lru_list[0]

	def shadowInCache(self, node):
		#Make a shadow copy that dissappears on close
		#Must be called with the nodes iolock held
		keyname = node.getKeyName()
		filename = self.dir + "/" + keyname
		origfile = os.fdopen(os.open(filename,os.O_RDWR),
				flag2mode(os.O_RDWR))
		shadowfile = tempfile.TemporaryFile("w+b")
		shutil.copyfileobj(origfile,shadowfile)
		shadowfile.seek(0)
		origfile.seek(0)
		sum = hashlib.new('sha1')
		end = False
		while end == False:
			buf = origfile.read(4096)
			if (len(buf) == 0):
				end = True
			else:
				sum.update(buf)
		origfile.close()

		node.sha1_sum = sum.hexdigest()

		return shadowfile
		


"""
The S3eioThread class does asynchronous work moving data back and forth between
S3 buckets and the local cache.  Its subclassed into a reader and writer that 
Actually get used
"""
class S3AioThread(Thread):
	requests = []
	iolock = thread.allocate_lock() 
	num_threads = 3 
	running_threads = 0
	def __init__(self, node, filesystem):
		self.fs = filesystem 
		self.node = node
		self.iolock.acquire()
		self.requests.append(self)
		self.pending = False
		self.iolock.release()		
		self.isRead = False
		self.needFsDataWrite = False
		Thread.__init__(self)

	def start(self):
		# get the lock
		self.iolock.acquire()
		print "Calling start on  thread ", self.getName()
		# check to see if we have room to create another thread
		if self.running_threads >= self.num_threads:
			# We don't
			# If pending is already set, then return ourselves to
			# the list
			if self.pending == True:
				self.requests.extend(self)
			print "Setting pending on thread ", self.getName()
			self.pending = True
		else:
			self.running_threads = self.running_threads + 1
			self.pending = False

		self.iolock.release()

		# if pending is true here, then we don't have room to run at the
		# moment
		if self.pending == True:
			return
		else:
			# Otherwise go ahead and start
			print "Starting Thread ", self.getName()
			Thread.start(self)

	def getNextPending(self):
		self.iolock.acquire()
		for i in self.requests:
			if i.pending == True:
				# Remove this entry from the list
				self.requests.remove(i)
				self.iolock.release()
				return i
		self.iolock.release()
		return None

	def run(self):
		print self.getName(), " is running"
		self.ioaction()
		print self.getName(), "is finishing up"
		self.iolock.acquire()
		if self.isRead == False:
			self.needFsDataWrite = True
		self.requests.remove(self)
		self.running_threads = self.running_threads - 1
		self.iolock.release()
		print self.getName(), "Is checking for more threads"
		next = self.getNextPending()
		print self.getName(), "next returned ", next
		if next != None:
			print "Thread ", self.getName(), " is starting next thread"
			next.start()
		else:
			print "No More threads"
			#There is no more I/O to be done, lets write back
			#the fsdata now
			self.iolock.acquire()
			if self.needFsDataWrite == True:
				print self.getName(), "Sees FsData needs write"
				self.needFsDataWrite = False
				self.iolock.release()
				if self.fs.lazy_fsdata == "no":
					print self.getName(), "Is writing FsData"
					self.fs.updateFsData()
					
			else:
				self.iolock.release()
				
		print self.getName(), "is almost done"
		# Remove ourselfves from the nodes ioqueue
		self.node.dequeueIo(self)
		print self.getName(), "is now done"

class S3AioLookup(S3AioThread):
	def __init__(self, node, filesystem):
		S3AioThread.__init__(self, node, filesystem)
		self.isRead = True
		self.result = None 
		self.lock = thread.allocate_lock()
		self.lock.acquire()

	def ioaction(self):
		s3key = None
		self.node.iolock.acquire()
		if self.node.pending_delete == True:
			self.node.iolock.release()
			self.node.completeIo(True, True)
			self.result = False	
			self.lock.release()
			return
		self.node.iolock.release()
		located = False
		while located == False:
			try:
				s3key = self.fs.blockdev.lookup(self.node)
				located = True
			except:
				print "Lookup Hit exception! Retrying"
				time.sleep(1)
				s3key = None

		if s3key == None:
			self.result = False
		else:
			self.result = True
		self.lock.release()

	def getResult(self):
		self.lock.acquire()
		rc = self.result
		self.lock.release()
		return rc

class S3AioDelete(S3AioThread):
	def __init__(self, node, filesystem):
		S3AioThread.__init__(self, node, filesystem)
		self.isRead = False

	def ioaction(self):
		# in here we need to cancel all other io to this node
		# remove the node from the tree, remove the file from S3
		if ((self.node.stat.st_mode & stat.S_IFREG) == stat.S_IFREG):
			found = False
			while found == False:
				try:
					key = self.fs.blockdev.lookup(self.node)
					found = True
				except:
					found = False
			if key != None:
				self.fs.blockdev.bucket.delete_key(key)
				self.fs.cache.removeFromCache(self.node)
		self.node.removeNode(self.fs,False)
		return


class S3AioReader(S3AioThread):
	def __init__(self, node, filesystem):
		S3AioThread.__init__(self, node, filesystem)
		self.isRead = True

	def ioaction(self):
		#Check to see if there are other reader already getting the file
		if self.fs.cache.isInCache(self.node) == True:
			# We're done, another reader is getting it for us
			return
		# Otherwise, we need to fetch it
		key = self.fs.blockdev.lookup(self.node)
		retrieved = False
		while retrieved == False:
			try:
				self.fs.cache.addToCache(self.node)
				fd = os.fdopen(self.fs.cache.openInCache(self.node),
						flag2mode(os.O_RDWR))
				key.get_contents_to_file(fd)
				fd.close()
				csize = self.fs.cache.statInCache(self.node)[stat.ST_SIZE]
				print self.getName(), "Got file, csize =", csize
				print "fsize =",  self.node.stat.st_size

				if csize == self.node.stat.st_size:
					retrieved = True
				else:
					self.fs.cache.removeFromCache(self.node)
			except:
				print self.getName(), "Hit read exception"
				fd.close()
				self.fs.cache.removeFromCache(self.node)
		return

class S3AioWriter(S3AioThread):
	def __init_(self, node, filesystem):
		S3AioThread.__init__(self, node, filesystem)
		self.isRead = False 

	def ioaction(self):
		# Start by waiting for the hysteresis time
		if self.fs.writeback_time != '-1':
			time.sleep(self.fs.writeback_time)
		# Next, check to see if we still have a zero ref count
		self.node.iolock.acquire()
		# If the file was deleted, then we have nothing to do
		if self.node.pending_delete == True:
			# REset the dirty flag to avoid confusing other threads
			self.node.dirty = False
			self.node.iolock.release()
			return
		if self.node.openref > 0:
			# if people have started using it again
			# don't write back yet
			self.node.iolock.release()
			return
		# Its still unused, lets make sure its worth uploading
		if self.node.stat.st_size == 0:
			# S3 Doesn't store files of zero size, so don't bother
			# here doing any upload.  Note this gives rise to a
			# condition in which we may have a non-zero length file
			# on S3, but a zero length file in the Drive Meta Data.
			# This is ok, because we determine file length by the
			# latter value, and a subsequent write will create a new
			# thread that will overwrite the S3 contents later
			self.node.iolock.release()
			return

		# Its non-zero, so lets snap a copy and upload it
		fd = self.node.makeUploadCopy(self.fs)
		self.node.dirty = False
		self.node.iolock.release()
		
		try:
			key = self.fs.blockdev.lookup(self.node)
			if key == None:
				key = self.fs.blockdev.create(self.node)
			key.set_contents_from_file(fd)
		except:
			# Something went wrong.  Lets create a new thread to try
			# this again later
			print "Writing failed!"
			self.node.iolock.acquire()
			print "Resetting dirty"
			self.node.dirty = True
			self.node.iolock.release()
			print "Creating new writer thread"
			self.node.writeToS3(self.fs)
			print "Done handling exception"
		fd.close()
		return
				
def flag2mode(flags):
	md = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'}
	m = md[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)]

	if flags | os.O_APPEND:
		m = m.replace('w', 'a', 1)

	return m


"""
The S3File Class.  fuse-python creates an instance of this class every time a
file is opened.  We use it to map local file requests to cache access and S3
data requests
"""
class S3File(object):

	# This is statically registered when the filesystem
	# is initialized
	myFS = None

	def __init__(self, path, flags, *mode):
		self.file = None
		self.direct_io = True 
		self.keep_cache = "no" 
		self.node = None
		node = self.myFS.FSData.findNode(path)
		nodetok = path.split('/')
		dir=""
		filename = nodetok[len(nodetok)-1]
		nodetok.remove(filename)
		for i in nodetok:
			if i != '':
				dir = dir + "/" + i
		if dir == "":
			dir = "/"

		dirnode = self.myFS.FSData.findNode(dir)
		if dirnode == None:
			return None 

		if node == None:
			#Check to see if we are supposed to be creating a file
			if (flags & os.O_CREAT) == os.O_CREAT:
				node = S3KeyLayoutElement(filename)
				node.runtimeSetup()
				node.stat.st_mode = stat.S_IFREG | 0755
				dirnode.addChild(node)
			else:
				return None 
		else:
			if (flags & os.O_EXCL) == os.O_EXCL:
				return None

		self.file = node.open(self.myFS, os.O_RDWR)
		self.fd = self.file.fileno()
		self.node = node
		return None 


	def read(self, length, offset):
		self.node.completeIo(True,False)
		self.file.seek(offset)
		buf = self.file.read(length)
		return buf 

	def write(self, buf, offset):
		if ((offset + len(buf)) > (5 * 1024 * 1024 * 1024)):
			return -errno.EFBIG

		self.file.seek(offset)
		self.file.write(buf)
		self.file.flush()
		size = self.node.stat.st_size
		self.node.stat.st_size += (len(buf) - (size - offset))
		self.node.setDirty(True)
		return len(buf)

	def release(self, flags):
		self.file.close()
		self.node.close(self.myFS)

	def flush(self):
		self.file.flush()
		return

	def fsync(self, isfsyncfile):
		# Not much to really do here
		os.fsync(self.fd)
		self.node.completeIo(True, True)
		return

	def fgetattr(self):
		uid = os.getuid()
		gid = os.getgid()
		if (self.node.stat.st_uid != -1):
			uid = self.node.stat.st_uid
		if (self.node.stat.st_gid != -1):
			gid = self.node.stat.st_gid
		rostat = S3ROStat(self.node.stat, uid, gid)
		return rostat

	def ftruncate(self, len):
		self.file.truncate(len)
		self.node.stat.st_size = len
		return

"""
This is the big FUSE subclass, it implements all the local FS calls
"""
class S3Fs(Fuse):


	def __init__(self, *args, **kw):
		Fuse.__init__(self, *args, **kw)
		self.formatted = False
		self.bucket = ''
		self.preserve_cache='no'
		self.blockdev = None
		self.writeback_time = 10.0
		self.lazy_fsdata="yes"
		self.cachedir="/tmp"
		self.AWS_ACCESS_KEY_ID=None
		self.AWS_SECRET_ACCESS_KEY=None
		self.host = None
		self.fsdatalock = thread.allocate_lock()

	def setup(self):		
		preserve = False

		#Set the env correctly
		if self.AWS_ACCESS_KEY_ID != None:
			os.putenv("AWS_ACCESS_KEY_ID",self.AWS_ACCESS_KEY_ID)
		if self.AWS_SECRET_ACCESS_KEY != None:
			os.putenv("AWS_SECRET_ACCESS_KEY",
				self.AWS_SECRET_ACCESS_KEY)

		if (os.environ.get("AWS_ACCESS_KEY_ID") == None):
			print "Need to specify AWS_ACCESS_KEY_ID"
		if (os.environ.get("AWS_SECRET_ACCESS_KEY") == None):
			print "Need to specify AWS_SECRET_ACCESS_KEY"

		if self.preserve_cache == 'yes':
			preserve = True
		self.blockdev = S3Drive(self.bucket, self.host)
		if self.blockdev.drive_ready == False:
			return False
		fsdata_key = self.blockdev.bucket.get_key("fsdata")
		if fsdata_key == None:
			return False
		fsdata_name="/tmp/"+self.bucket+".pkl"
		fp = open(fsdata_name,"w+b")
		fsdata_key.get_contents_to_file(fp)
		fp.close()
		fp = open(fsdata_name, "rb")
		self.FSData = pickle.load(fp)
		fp.close()
		os.unlink(fsdata_name)	
		self.FSData.checkUpgrade()
		self.FSData.root.runtimeSetup()

		#create the local cache for the filesystem
		self.cache = S3Cache(self.bucket,self.cachedir,-1,preserve)

		self.cache.syncTree(self.FSData.root)

		return True

	def updateFsData(self):
		updated = False
		found_key = False
		self.fsdatalock.acquire()
		print "Looking for fsdata key"
		while found_key == False:
			try:
				fsdata_key = self.blockdev.bucket.get_key("fsdata")
				found_key = True
			except:
				print "Hit an exception in looking up fsdata"
				time.sleep(1)
		fsdata_name="/tmp/"+self.bucket+".pkl"
		print "cloning Tree"
		fsdata_copy = self.FSData.cloneForSave()
		print "Done clonging tree"
		fp = open(fsdata_name,"w+b")
		pickle.dump(fsdata_copy,fp,-1)
		fp.close()
		print "Uploading fsdata"
		while updated == False:
			try:
				fsdata_key.set_contents_from_filename(fsdata_name)
				updated = True
			except:
				print "Hit exception uploading fsdata"
				time.sleep(2)

		print "Done uploading fsdata"
		os.unlink(fsdata_name)
		self.fsdatalock.release()
	
	def getattr(self, path):
		findnode = self.FSData.findNode(path)		
		if findnode == None:
			return -errno.ENOENT
		uid = findnode.stat.st_uid
		gid = findnode.stat.st_gid
		#For now, we'll assume that the owner of the mouning fuse
		#process owns all the objects in the mount
		if (uid == -1):
			uid = os.getuid()
		if (gid == -1):
			gid = os.getgid()

		return S3ROStat(findnode.stat, uid, gid) 

	def readdir(self, path, offset):
		# Start by finding the node that we're in
		dirNode = self.FSData.findNode(path)
		#Now assemble all the names of the entries in this list
		dirNames = [ '.', '..']
		for node in dirNode.children:
			dirNames.append(node.name)
		# Now yield the direntry list
		for i in dirNames:
			yield fuse.Direntry(i)
		
	def chmod ( self, path, mode ):
		node = self.FSData.findNode(path)
		if node == None:
			return -errno.ENOENT

		node.iolock.acquire()
		node.stat.st_mode = node.stat.st_mode & ~(0777)
		node.stat.st_mode = node.stat.st_mode | mode
		node.iolock.release()
		return 0

	def chown ( self, path, uid, gid ):
		node = self.FSData.findNode(path)
		if node == None:
			return -errno.ENOENT
		node.iolock.acquire()
		node.stat.st_uid = uid
		node.stat.st_gid = gid
		node.iolock.release()

	def link ( self, targetPath, linkPath ):
		nodetok = path.split('/')
		lname = nodetok[len(nodetok)-1]
		nodetok.remove(lname)
		dir = ""
		for i in nodetok:
			if i != '':
				dir = dir + "/" + i
		if dir == "":
			dir = "/"
	
		dirnode = self.FSData.findNode(dir)
		if dirnode == None:
			return -errno.ENOENT

		tnode = self.FSData.findNode(targetPath)
		if tnode == None:
			return -errno.ENOENT

		lnode = S3KeyLayoutElement(lname)
		lnode.runtimeSetup()
		ndnode.stat.st_mode = S_IFLNK | 0777
		lnode.target_path = targetPath
		dirnode.addChild(lnode)
		return 0

	def mkdir ( self, path, mode ):
		nodetok = path.split('/')
		newdir = nodetok[len(nodetok)-1]
		nodetok.remove(newdir)
		dir = ""
		for i in nodetok:
			if i != '':
				dir = dir + "/" + i

		if dir == "":
			dir = "/"
	
		dirnode = self.FSData.findNode(dir)
		if dirnode == None:
			return -errno.ENOENT

		ndnode = S3KeyLayoutElement(newdir)
		ndnode.runtimeSetup()
		ndnode.stat.st_mode = stat.S_IFDIR | mode
		dirnode.addChild(ndnode)
		return 0

	def mknod ( self, path, mode, dev ):
		node = self.FSData.findNode(path)
		if node != None:
			return -errno.EEXIST
		nodetok = path.split('/')
		dir=""
		filename = nodetok[len(nodetok)-1]
		nodetok.remove(filename)
		for i in nodetok:
			if i != '':
				dir = dir + "/" + i
		if dir == "":
			dir = "/"

		dirnode = self.FSData.findNode(dir)
		file = S3KeyLayoutElement(filename)
		file.runtimeSetup()
		file.stat.st_mode = mode
		dirnode.addChild(file)
		node = self.FSData.findNode(path)
		fd = file.open(self) 
		fd.close()
		return 0

	def readlink ( self, path ):
		node = self.FSData.findNode(path)
		if node == None:
			return -errno.ENOENT
		return node.target_path

	def rename ( self, oldPath, newPath ):
		# Start by finding the old node
		oldnode = self.FSData.findNode(oldPath)
		if oldnode == None:
			return -errno.ENOENT
		# Then check the new node
		newnode = self.FSData.findNode(newPath)
		if newnode != None:
			return -errno.EEXIST

		# lets get the parent of the new node
		newntok = newPath.split('/')
		newname = newntok[len(newntok)-1]
		newntok.remove(newname)
		dir = ""
		for i in newntok:
			if i != '':
				dir = dir + "/" + i
		if dir == "":
			dir = "/"

		newdirnode = self.FSData.findNode(dir)
		if newdirnode == None:
			return -errno.ENOENT
	
		# make a new node at the new directory
		newnode = S3KeyLayoutElement(newname)
		newnode.runtimeSetup()
		newnode.stat = oldnode.stat

		newdirnode.addChild(newnode)

		# now we copen both nodes and copy the file over
		newfile = newnode.open(self, os.O_RDWR|os.O_CREAT)
		oldfile = oldnode.open(self, os.O_RDWR)

		# wait on all io to finish on both files
		newnode.completeIo(True, True)
		oldnode.completeIo(True, True)

		# now lock the nodes
		newnode.iolock.acquire()
		oldnode.iolock.acquire()

		# now copy the file
		shutil.copyfileobj(oldfile,newfile)
	
		#mark the new file dirty
		newnode.dirty = True

		#unlock the files
		newnode.iolock.release()
		oldnode.iolock.release()

		# Close the files
		newnode.close(self)
		oldnode.close(self)

		# now we just need to remove the oldfile from
		# The tree
		oldnode.delete(self)

		return 0	

	def rmdir ( self, path ):
		node = self.FSData.findNode(path)
		if node == None:
			return -errno.ENOENT
		if ((node.stat.st_mode & stat.S_IFDIR) != stat.S_IFDIR):
			return -errno.ENOTDIR
		node.parent.removeChild(node,self)
		return 0 

	def access (self, path, mode):
		mask = 0007
		node = self.FSData.findNode(path)
		if node == None:
			return -errno.ENOENT
		if node.stat.st_uid == -1:
			uid = -1
		else:
			uid = os.getuid()

		if node.stat.st_gid == -1:
			gid = -1
		else:
			gid = os.getgid()

		if node.stat.st_uid == uid:
			return 0 
		if node.stat.st_guid == gid:
			return 0 

		return -errno.EACCESS 

		
	def getxattr(self, path, name, size):
		node = self.FSData.findNode(path)
		if node == None:
			return -errno.ENOENT

		try:
			attr = node.xattrs[name]
		except KeyError:
			return -errno.ENODATA
		if size == 0:
			return len(attr)
		return attr

	def setxattr(self, path, name, value):
		node = self.FSData.findNode(path)
		if node == None:
			return -errno.ENOENT

		node.xattrs[name] = value

	def statfs ( self ):
		st = fuse.StatVfs()
		st.f_bsize = 1024 
		st.f_frsize = 0x7fffffff
		st.f_blocks = 0x7fffffff
		st.f_bfree =  0x7fffffff
		st.f_bavail = 0x7fffffff
		st.f_files =  0x7fffffff
		st.f_ffree =  0x7fffffff
		st.f_namelen = 255 
		return st 
		
	def symlink ( self, targetPath, linkPath ):
		nodetok = linkPath.split('/')
		lname = nodetok[len(nodetok)-1]
		nodetok.remove(lname)
		dir = ""
		for i in nodetok:
			if i != '':
				dir = dir + "/" + i

		if dir == "":
			dir = "/"
	
		dirnode = self.FSData.findNode(dir)
		if dirnode == None:
			return -errno.ENOENT

		lnode = S3KeyLayoutElement(lname)
		lnode.runtimeSetup()
		lnode.stat.st_mode = stat.S_IFLNK | 0777
		lnode.target_path = targetPath
		dirnode.addChild(lnode)

	def unlink ( self, path ):
		node = self.FSData.findNode(path)
		if node == None:
			return -errno.ENOENT
		node.delete(self)
		return 0

	def utime(self, path, times):
		print "Calling utime with times ", times
		return 0

	def utimens(self, path, ts_acc, ts_mod):
		print "Calling utimens with times ", ts_acc, ts_mod
		return 0

	def main(self, *a, **kw):
		self.file_class = S3File
		S3File.myFS = self
		return Fuse.main(self, *a, **kw)

	def shutdown(self):
		print "Exiting, Cleaning up FS"

		print "Waiting on all threads to finish"
		self.FSData.root.completeAllIo(True, True)

		print "Verifying writeback"
		self.FSData.root.writebackAll(self)

		print "Waiting on all threads to finish"
		self.FSData.root.completeAllIo(True, True)

		print "Parking the Cache"
		self.cache.shutdown()

		if self.lazy_fsdata == "yes":
			print "Updating the Filesystem Metadata"
			self.updateFsData()

"""
================================================================================
Helper functions mostly for use in command line mode
"""
def check_S3_data():
	if (os.environ.get("AWS_ACCESS_KEY_ID") == None):
		print "Need to export AWS_ACCESS_KEY_ID"
		sys.exit(1)
	if (os.environ.get("AWS_SECRET_ACCESS_KEY") == None):
		print "Need to export AWS_SECRET_ACCESS_KEY"
		sys.exit(1)

def create_S3_bucket(bucket, host):
	drive = S3Drive(bucket, host)
	if drive.connection_ready == False:
		print "Unable to connect to S3"
		return -1
	try:
		drive.connection.create_bucket(bucket)
	except:
		print "Unable to create bucket"
		return -1
	return 0

def delete_S3_bucket(bucket, host):
	drive = S3Drive(bucket, host)
	if drive.connection_ready == False:
		print "Unable to connect to S3"
		return -1
	if drive.drive_ready == False:
		print "Unable to find bucket ", bucket
		return -1
	fsdata_key = drive.bucket.get_key("fsdata")
	if fsdata_key == None:
		print "Not a formatted bucket, won't delete"
		return
	fsdata_name="/tmp/"+bucket+".pkl"
	fp = open(fsdata_name,"w+b")
	fsdata_key.get_contents_to_file(fp)
	fp.close()
	fp = open(fsdata_name, "rb")
	FSData = pickle.load(fp)
	fp.close()
	os.unlink(fsdata_name)

	FSData.checkUpgrade()
	if FSData.locked_on_s3 == True:
		print "This bucket is locked, you must unlock it first" 
		return
	objs = drive.bucket.list()
	for i in objs:
		drive.bucket.delete_key(i.name)
	drive.connection.delete_bucket(bucket)

def format_S3_bucket(bucket, host):
	drive = S3Drive(bucket, host)
	if drive.drive_ready == False:
		print "Could not find Drive", bucket
		return -1

	fsdata_key = drive.bucket.get_key("fsdata")
	if fsdata_key != None:
		fsdata_name="/tmp/"+bucket+".pkl"
		fp = open(fsdata_name,"w+b")
		fsdata_key.get_contents_to_file(fp)
		fp.close()
		fp = open(fsdata_name, "rb")
		FSData = pickle.load(fp)
		fp.close()
		os.unlink(fsdata_name)

		FSData.checkUpgrade()
		if FSData.locked_on_s3 == True:
			print "This bucket is locked, you must unlock it first" 
			return
	DriveData = S3DriveMetaData()
	DriveData.root.runtimeShutdown()
	fname = "/tmp/"+bucket+".pkl" 
	pf = open(fname, "wb")
	pickle.dump(DriveData,pf,-1)
	pf.close()
	MDKey = drive.bucket.new_key("fsdata") 
	MDKey.set_contents_from_filename(fname)
	os.unlink(fname)

def toggle_S3_bucket_lock(bucket, host):
	drive = S3Drive(bucket, host)
	if drive.connection_ready == False:
		print "Unable to connect to S3"
		return -1
	if drive.drive_ready == False:
		print "Unable to find bucket ", bucket
		return -1
	fsdata_key = drive.bucket.get_key("fsdata")
	if fsdata_key == None:
		print "Not a formatted bucket, won't delete"
		return
	fsdata_name="/tmp/"+bucket+".pkl"
	fp = open(fsdata_name,"w+b")
	fsdata_key.get_contents_to_file(fp)
	fp.close()
	fp = open(fsdata_name, "rb")
	FSData = pickle.load(fp)
	fp.close()
	os.unlink(fsdata_name)

	FSData.checkUpgrade()

	if FSData.locked_on_s3 == True:
		FSData.locked_on_s3 = False
		print "Unlocking bucket ", bucket
	else:
		FSData.locked_on_s3 = True
		print "Locking bucket ", bucket

	fp = open(fsdata_name,"w+b")
	pickle.dump(FSData,fp,-1)
	fp.close()
	fsdata_key.set_contents_from_filename(fsdata_name)
	os.unlink(fsdata_name)
	
def s3fs_usage():
	print "./s3fs [<-C> [-hcdrfk]] [<bucket> <mountpoint>]"
	print "-C enter command mode"
	print "-h This help menu"
	print "-p <Access Key>"
	print "-s <Secret Access Key>"
	print "-c create bucket"
	print "-d delete bucket (and all objects in bucket!)"
	print "-r repair sefs filesystem interactively"
	print "-f format S3 bucket as s3fs filesystem"
	print "-k toggle the lock bit on the specified bucket"
 	
def handle_command_mode():
	host = None
	bucket = None
	createBucketFlag = False
	deleteBucketFlag = False
	repairBucketFlag = False
	formatBucketFlag = False
	toggleBucketLockFlag = False 
	options_selected = 0
	try:
		opts, args = getopt.getopt(sys.argv[2:], "hp:s:c:d:r:f:H:k:")
	except getopt.GetoptError:
		s3fs_usage()
		return 1
	for o, a in opts:
		if o == "-h":
			s3fs_usage()
			return 0
		if o == "-p":
                        os.environ["AWS_ACCESS_KEY_ID"] = a
		if o == "-s":
                        os.environ["AWS_SECRET_ACCESS_KEY"] = a
		if o == "-H":
			host = a
		if o == "-c":
			bucket = a
			createBucketFlag = True
			options_selected=options_selected+1
		if o == "-d":
			bucket = a
			deleteBucketFlag = True
			options_selected=options_selected+1
		if o == "-r":
			bucket = a
			repairBucketFlag = True
			options_selected=options_selected+1
		if o == "-f":
			bucket = a
			formatBucketFlag = True
			options_selected=options_selected+1
		if o == "-k":
			bucket = a
			toggleBucketLockFlag = True
			options_selected=options_selected+1

	if (options_selected == 0) or (options_selected > 1):
		s3fs_usage()
		return 0

	if createBucketFlag == True:
		return create_S3_bucket(bucket, host)

	if deleteBucketFlag == True:
		return delete_S3_bucket(bucket, host)

	if repairBucketFlag == True:
		return repair_S3_bucket_filesystem(bucket, host)

	if formatBucketFlag == True:
		return format_S3_bucket(bucket, host)

	if toggleBucketLockFlag == True:
		return toggle_S3_bucket_lock(bucket, host)

	# Someone specified more than 1 operation at once
	s3fs_usage()
	return 0
"""
================================================================================
"""

def s3fs_cleanup(filesystem):
	filesystem.shutdown()


def main():
	fuse.fuse_python_api=(0, 2)
	fs = S3Fs()

	# Add mount options here
	fs.parser.add_option(mountopt="bucket", default='',
			help="The S3 bucket to mount")
	fs.parser.add_option(mountopt="preserve_cache", default='no',
			help="Reuse cache to prevent additional downloads")
	fs.parser.add_option(mountopt="writeback_time", default=10,
			help="Hysteresis time before preforming node writeback")
	fs.parser.add_option(mountopt="cachedir", 
			default=os.environ.get("HOME") + ".fuse-s3fs-cache",
			help="Root for S3 bucket cache")
	fs.parser.add_option(mountopt="lazy_fsdata", default="yes",
			help="Write back FS metadata only on unmount")
	fs.parser.add_option(mountopt="AWS_ACCESS_KEY_ID", default=None,
			help="Specify your public access key")
	fs.parser.add_option(mountopt="AWS_SECRET_ACCESS_KEY", default=None,
			help="Specify your private access key")
	fs.parser.add_option(mountopt="host", default=None,
			help="Specify aws access host")
	fs.parse(values=fs,errex=1)
	fs.flags = 0
	fs.multithreaded = True 
	if fs.setup() == False:
		print "Unable to establish this mount"
		sys.exit(1)
	check_S3_data()
	fs.main()	
	s3fs_cleanup(fs)

if __name__ == '__main__':
	if len(sys.argv) == 1 or sys.argv[1] == "-h":
		s3fs_usage()
		sys.exit(0)
	if sys.argv[1] == "-C":
		rc = handle_command_mode()
		sys.exit(rc)
	else:
		main()

