public class SwiftNativeFileSystem
extends org.apache.hadoop.fs.FileSystem
Modifier and Type | Field and Description |
---|---|
static String |
SWIFT
filesystem prefix: "swift"
|
Constructor and Description |
---|
SwiftNativeFileSystem()
Default constructor for Hadoop
|
SwiftNativeFileSystem(SwiftNativeFileSystemStore store)
This constructor used for testing purposes
|
Modifier and Type | Method and Description |
---|---|
org.apache.hadoop.fs.FSDataOutputStream |
append(org.apache.hadoop.fs.Path f,
int bufferSize,
org.apache.hadoop.util.Progressable progress)
This optional operation is not supported
|
org.apache.hadoop.fs.FSDataOutputStream |
create(org.apache.hadoop.fs.Path file,
org.apache.hadoop.fs.permission.FsPermission permission,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress) |
protected org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream |
createSwiftOutputStream(org.apache.hadoop.fs.Path path)
Create the swift output stream
|
boolean |
delete(org.apache.hadoop.fs.Path f)
Delete a file.
|
boolean |
delete(org.apache.hadoop.fs.Path path,
boolean recursive)
Delete a file or directory
|
long |
getBlockSize(org.apache.hadoop.fs.Path path) |
static long |
getBytesUploaded(org.apache.hadoop.fs.FSDataOutputStream outputStream)
Get the the number of bytes uploaded by an output stream
to the swift cluster.
|
static long |
getBytesWritten(org.apache.hadoop.fs.FSDataOutputStream outputStream)
Get the the number of bytes written to an output stream
This is for testing
|
long |
getDefaultBlockSize()
The blocksize of this filesystem is set by the property
SwiftProtocolConstants.SWIFT_BLOCKSIZE;the default is the value of
SwiftProtocolConstants.DEFAULT_SWIFT_BLOCKSIZE;
|
long |
getDefaultBlockSize(org.apache.hadoop.fs.Path f)
The blocksize for this filesystem.
|
org.apache.hadoop.fs.BlockLocation[] |
getFileBlockLocations(org.apache.hadoop.fs.FileStatus file,
long start,
long len)
Return an array containing hostnames, offset and size of
portions of the given file.
|
org.apache.hadoop.fs.FileStatus |
getFileStatus(org.apache.hadoop.fs.Path path)
Return a file status object that represents the path.
|
List<DurationStats> |
getOperationStatistics()
Get the current operation statistics
|
static long |
getPartitionSize(org.apache.hadoop.fs.FSDataOutputStream outputStream)
Get the size of partitions written by an output stream
This is for testing
|
static int |
getPartitionsWritten(org.apache.hadoop.fs.FSDataOutputStream outputStream)
Get the number of partitions written by an output stream
This is for testing
|
String |
getScheme() |
SwiftNativeFileSystemStore |
getStore()
This is for testing
|
URI |
getUri() |
org.apache.hadoop.fs.Path |
getWorkingDirectory()
Path to user working directory
|
void |
initialize(URI fsuri,
org.apache.hadoop.conf.Configuration conf)
default class initialization
|
boolean |
isDirectory(org.apache.hadoop.fs.Path f) |
boolean |
isFile(org.apache.hadoop.fs.Path f) |
org.apache.hadoop.fs.FileStatus[] |
listRawFileStatus(org.apache.hadoop.fs.Path path,
boolean newest)
Low level method to do a deep listing of all entries, not stopping
at the next directory entry.
|
org.apache.hadoop.fs.FileStatus[] |
listStatus(org.apache.hadoop.fs.Path path)
List the statuses of the files/directories in the given path if the path is
a directory.
|
protected org.apache.hadoop.fs.Path |
makeAbsolute(org.apache.hadoop.fs.Path path)
Makes path absolute
|
boolean |
mkdirs(org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission permission)
Create the parent directories.
|
org.apache.hadoop.fs.FSDataInputStream |
open(org.apache.hadoop.fs.Path path,
int bufferSize)
Opens an FSDataInputStream at the indicated Path.
|
org.apache.hadoop.fs.FSDataInputStream |
open(org.apache.hadoop.fs.Path path,
int bufferSize,
long readBlockSize)
Low-level operation to also set the block size for this operation
|
boolean |
rename(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst)
Renames Path src to Path dst.
|
void |
setWorkingDirectory(org.apache.hadoop.fs.Path dir) |
String |
toString() |
access, addDelegationTokens, append, append, areSymlinksEnabled, cancelDeleteOnExit, canonicalizeUri, checkPath, clearStatistics, close, closeAll, closeAllForUGI, completeLocalOutput, concat, copyFromLocalFile, copyFromLocalFile, copyFromLocalFile, copyFromLocalFile, copyToLocalFile, copyToLocalFile, copyToLocalFile, create, create, create, create, create, create, create, create, create, create, create, create, createFid, createNewFile, createNonRecursive, createNonRecursive, createNonRecursive, createPathId, createSnapshot, createSnapshot, createSymlink, deleteFid, deleteOnExit, deleteSnapshot, enableSymlinks, exists, fixRelativePart, get, get, get, getAclStatus, getAllStatistics, getCanonicalServiceName, getCanonicalUri, getChildFileSystems, getContentSummary, getDefaultPort, getDefaultReplication, getDefaultReplication, getDefaultUri, getDelegationToken, getFileBlockLocations, getFileChecksum, getFileChecksum, getFileLinkStatus, getFileSystemClass, getFSofPath, getHomeDirectory, getInitialWorkingDirectory, getJobTrackerAddrs, getLength, getLinkTarget, getLocal, getName, getNamed, getReplication, getServerDefaults, getServerDefaults, getStatistics, getStatistics, getStatus, getStatus, getUsed, getXAttr, getXAttrs, getXAttrs, getZkConnectString, globStatus, globStatus, listCorruptFileBlocks, listFiles, listLocatedStatus, listLocatedStatus, listStatus, listStatus, listStatus, listStatusIterator, listXAttrs, makeQualified, mkdirs, mkdirs, mkdirsFid, mkdirsFid, modifyAclEntries, moveFromLocalFile, moveFromLocalFile, moveToLocalFile, newInstance, newInstance, newInstance, newInstanceLocal, open, openFid, openFid, openFid2, primitiveCreate, primitiveMkdir, primitiveMkdir, printStatistics, processDeleteOnExit, removeAcl, removeAclEntries, removeDefaultAcl, removeXAttr, rename, renameSnapshot, resolveLink, resolvePath, setAcl, setDefaultUri, setDefaultUri, setOwner, setOwnerFid, setPermission, setReplication, setTimes, setVerifyChecksum, setWriteChecksum, setXAttr, setXAttr, startLocalOutput, supportsSymlinks, truncate
public static final String SWIFT
public SwiftNativeFileSystem()
public SwiftNativeFileSystem(SwiftNativeFileSystemStore store)
public SwiftNativeFileSystemStore getStore()
public String getScheme()
getScheme
in class org.apache.hadoop.fs.FileSystem
public void initialize(URI fsuri, org.apache.hadoop.conf.Configuration conf) throws IOException
initialize
in class org.apache.hadoop.fs.FileSystem
fsuri
- path to Swiftconf
- Hadoop configurationIOException
public URI getUri()
getUri
in class org.apache.hadoop.fs.FileSystem
public org.apache.hadoop.fs.Path getWorkingDirectory()
getWorkingDirectory
in class org.apache.hadoop.fs.FileSystem
public void setWorkingDirectory(org.apache.hadoop.fs.Path dir)
setWorkingDirectory
in class org.apache.hadoop.fs.FileSystem
dir
- user working directorypublic org.apache.hadoop.fs.FileStatus getFileStatus(org.apache.hadoop.fs.Path path) throws IOException
getFileStatus
in class org.apache.hadoop.fs.FileSystem
path
- The path we want information fromIOException
public long getDefaultBlockSize()
getDefaultBlockSize
in class org.apache.hadoop.fs.FileSystem
public long getDefaultBlockSize(org.apache.hadoop.fs.Path f)
getDefaultBlockSize
in class org.apache.hadoop.fs.FileSystem
f
- path of filegetDefaultBlockSize()
public long getBlockSize(org.apache.hadoop.fs.Path path) throws IOException
getBlockSize
in class org.apache.hadoop.fs.FileSystem
IOException
public boolean isFile(org.apache.hadoop.fs.Path f) throws IOException
isFile
in class org.apache.hadoop.fs.FileSystem
IOException
public boolean isDirectory(org.apache.hadoop.fs.Path f) throws IOException
isDirectory
in class org.apache.hadoop.fs.FileSystem
IOException
public org.apache.hadoop.fs.BlockLocation[] getFileBlockLocations(org.apache.hadoop.fs.FileStatus file, long start, long len) throws IOException
This call is most helpful with DFS, where it returns hostnames of machines that contain the given file.
The FileSystem will simply return an elt containing 'localhost'.
getFileBlockLocations
in class org.apache.hadoop.fs.FileSystem
IOException
public boolean mkdirs(org.apache.hadoop.fs.Path path, org.apache.hadoop.fs.permission.FsPermission permission) throws IOException
mkdirs
in class org.apache.hadoop.fs.FileSystem
path
- path to create.permission
- to apply to filesIOException
- on a problempublic org.apache.hadoop.fs.FileStatus[] listStatus(org.apache.hadoop.fs.Path path) throws IOException
listStatus
in class org.apache.hadoop.fs.FileSystem
path
- given pathIOException
public org.apache.hadoop.fs.FSDataOutputStream append(org.apache.hadoop.fs.Path f, int bufferSize, org.apache.hadoop.util.Progressable progress) throws IOException
append
in class org.apache.hadoop.fs.FileSystem
IOException
public org.apache.hadoop.fs.FSDataOutputStream create(org.apache.hadoop.fs.Path file, org.apache.hadoop.fs.permission.FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, org.apache.hadoop.util.Progressable progress) throws IOException
create
in class org.apache.hadoop.fs.FileSystem
permission
- Currently ignored.IOException
protected org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream createSwiftOutputStream(org.apache.hadoop.fs.Path path) throws IOException
path
- path to write toIOException
public org.apache.hadoop.fs.FSDataInputStream open(org.apache.hadoop.fs.Path path, int bufferSize) throws IOException
open
in class org.apache.hadoop.fs.FileSystem
path
- the file name to openbufferSize
- the size of the buffer to be used.FileNotFoundException
- if the file is not foundIOException
- any IO problempublic org.apache.hadoop.fs.FSDataInputStream open(org.apache.hadoop.fs.Path path, int bufferSize, long readBlockSize) throws IOException
path
- the file name to openbufferSize
- the size of the buffer to be used.readBlockSize
- how big should the read blockk/buffer size be?FileNotFoundException
- if the file is not foundIOException
- any IO problempublic boolean rename(org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.Path dst) throws IOException
rename
in class org.apache.hadoop.fs.FileSystem
src
- pathdst
- pathIOException
- on problemspublic boolean delete(org.apache.hadoop.fs.Path path, boolean recursive) throws IOException
delete
in class org.apache.hadoop.fs.FileSystem
path
- the path to delete.recursive
- if path is a directory and set to
true, the directory is deleted else throws an exception if the
directory is not empty
case of a file the recursive can be set to either true or false.IOException
- IO problemspublic boolean delete(org.apache.hadoop.fs.Path f) throws IOException
delete
in class org.apache.hadoop.fs.FileSystem
IOException
protected org.apache.hadoop.fs.Path makeAbsolute(org.apache.hadoop.fs.Path path)
path
- path to filepublic List<DurationStats> getOperationStatistics()
@InterfaceAudience.Private public org.apache.hadoop.fs.FileStatus[] listRawFileStatus(org.apache.hadoop.fs.Path path, boolean newest) throws IOException
path
- path to recurse downnewest
- ask for the newest data, potentially slower than not.IOException
- any problem@InterfaceAudience.Private public static int getPartitionsWritten(org.apache.hadoop.fs.FSDataOutputStream outputStream)
outputStream
- output stream@InterfaceAudience.Private public static long getPartitionSize(org.apache.hadoop.fs.FSDataOutputStream outputStream)
outputStream
- output stream@InterfaceAudience.Private public static long getBytesWritten(org.apache.hadoop.fs.FSDataOutputStream outputStream)
outputStream
- output stream@InterfaceAudience.Private public static long getBytesUploaded(org.apache.hadoop.fs.FSDataOutputStream outputStream)
outputStream
- output streamCopyright © 2017 Apache Software Foundation. All Rights Reserved.