java.io.Closeable, java.io.Flushable, java.lang.AutoCloseable, org.apache.hadoop.fs.CanSetDropBehind, org.apache.hadoop.fs.StreamCapabilities, org.apache.hadoop.fs.SyncableDFSStripedOutputStream@Private
public class DFSOutputStream
extends org.apache.hadoop.fs.FSOutputSummer
implements org.apache.hadoop.fs.Syncable, org.apache.hadoop.fs.CanSetDropBehind, org.apache.hadoop.fs.StreamCapabilities
| Modifier and Type | Field | Description |
|---|---|---|
protected long |
blockSize |
|
protected ByteArrayManager |
byteArrayManager |
|
protected int |
bytesPerChecksum |
|
protected java.util.concurrent.atomic.AtomicReference<CachingStrategy> |
cachingStrategy |
|
protected int |
chunksPerPacket |
|
protected boolean |
closed |
|
protected DFSPacket |
currentPacket |
|
protected DFSClient |
dfsClient |
|
protected long |
fileId |
|
protected long |
initialFileSize |
|
protected long |
lastFlushOffset |
|
protected int |
packetSize |
|
protected boolean |
shouldSyncBlock |
|
protected java.lang.String |
src |
|
protected org.apache.hadoop.hdfs.DataStreamer |
streamer |
| Modifier | Constructor | Description |
|---|---|---|
protected |
DFSOutputStream(DFSClient dfsClient,
java.lang.String src,
HdfsFileStatus stat,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
org.apache.hadoop.util.Progressable progress,
org.apache.hadoop.util.DataChecksum checksum,
java.lang.String[] favoredNodes,
boolean createStreamer) |
Construct a new output stream for creating a file.
|
| Modifier and Type | Method | Description |
|---|---|---|
protected void |
adjustChunkBoundary() |
If the reopened file did not end at chunk boundary and the above
write filled up its partial chunk.
|
protected void |
checkClosed() |
|
void |
close() |
Closes this output stream and releases any system
resources associated with this stream.
|
protected void |
closeImpl() |
|
protected void |
closeThreads(boolean force) |
|
protected void |
completeFile(ExtendedBlock last) |
|
protected void |
computePacketChunkSize(int psize,
int csize) |
|
protected DFSPacket |
createPacket(int packetSize,
int chunksPerPkt,
long offsetInBlock,
long seqno,
boolean lastPacketInBlock) |
Use
ByteArrayManager to create buffer for non-heartbeat packets. |
protected org.apache.hadoop.tracing.TraceScope |
createWriteTraceScope() |
|
protected void |
flushInternal() |
Waits till all existing data is flushed and confirmations
received from datanodes.
|
protected long |
flushInternalWithoutWaitingAck() |
|
protected java.util.EnumSet<AddBlockFlag> |
getAddBlockFlags() |
|
int |
getCurrentBlockReplication() |
Note that this is not a public API;
use
HdfsDataOutputStream.getCurrentBlockReplication() instead. |
org.apache.hadoop.fs.FileEncryptionInfo |
getFileEncryptionInfo() |
|
long |
getFileId() |
|
long |
getInitialLen() |
Returns the size of a file as it was when this stream was opened
|
java.lang.String |
getNamespace() |
|
int |
getNumCurrentReplicas() |
Deprecated.
|
DatanodeInfo[] |
getPipeline() |
|
protected org.apache.hadoop.hdfs.DataStreamer |
getStreamer() |
Returns the data streamer object.
|
java.lang.String |
getUniqKey() |
|
boolean |
hasCapability(java.lang.String capability) |
|
void |
hflush() |
Flushes out to all replicas of the block.
|
void |
hsync() |
|
void |
hsync(java.util.EnumSet<HdfsDataOutputStream.SyncFlag> syncFlags) |
The expected semantics is all data have flushed out to all replicas
and all replicas have done posix fsync equivalent - ie the OS has
flushed it to the disk device (but the disk may have it in its cache).
|
protected void |
recoverLease(boolean recoverLeaseOnCloseException) |
If recoverLeaseOnCloseException is true and an exception occurs when
closing a file, recover lease.
|
void |
setArtificialSlowdown(long period) |
|
void |
setChunksPerPacket(int value) |
|
void |
setDropBehind(java.lang.Boolean dropBehind) |
|
protected void |
start() |
|
void |
sync() |
Deprecated.
|
java.lang.String |
toString() |
|
protected void |
writeChunk(byte[] b,
int offset,
int len,
byte[] checksum,
int ckoff,
int cklen) |
|
protected void |
writeChunk(java.nio.ByteBuffer buffer,
int len,
byte[] checksum,
int ckoff,
int cklen) |
protected final DFSClient dfsClient
protected final ByteArrayManager byteArrayManager
protected volatile boolean closed
protected final java.lang.String src
protected final long fileId
protected final long blockSize
protected final int bytesPerChecksum
protected DFSPacket currentPacket
protected org.apache.hadoop.hdfs.DataStreamer streamer
protected int packetSize
protected int chunksPerPacket
protected long lastFlushOffset
protected long initialFileSize
protected boolean shouldSyncBlock
protected final java.util.concurrent.atomic.AtomicReference<CachingStrategy> cachingStrategy
protected DFSOutputStream(DFSClient dfsClient, java.lang.String src, HdfsFileStatus stat, java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag, org.apache.hadoop.util.Progressable progress, org.apache.hadoop.util.DataChecksum checksum, java.lang.String[] favoredNodes, boolean createStreamer)
protected DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock, long seqno, boolean lastPacketInBlock) throws java.io.InterruptedIOException
ByteArrayManager to create buffer for non-heartbeat packets.java.io.InterruptedIOExceptionprotected void checkClosed()
throws java.io.IOException
checkClosed in class org.apache.hadoop.fs.FSOutputSummerjava.io.IOException@VisibleForTesting public DatanodeInfo[] getPipeline()
protected void computePacketChunkSize(int psize,
int csize)
protected org.apache.hadoop.tracing.TraceScope createWriteTraceScope()
createWriteTraceScope in class org.apache.hadoop.fs.FSOutputSummerprotected void writeChunk(byte[] b,
int offset,
int len,
byte[] checksum,
int ckoff,
int cklen)
throws java.io.IOException
writeChunk in class org.apache.hadoop.fs.FSOutputSummerjava.io.IOExceptionprotected void writeChunk(java.nio.ByteBuffer buffer,
int len,
byte[] checksum,
int ckoff,
int cklen)
throws java.io.IOException
java.io.IOExceptionprotected void adjustChunkBoundary()
public boolean hasCapability(java.lang.String capability)
hasCapability in interface org.apache.hadoop.fs.StreamCapabilitieshasCapability in class org.apache.hadoop.fs.FSOutputSummerpublic void hflush()
throws java.io.IOException
hflush in interface org.apache.hadoop.fs.Syncablejava.io.IOExceptionpublic void hsync()
throws java.io.IOException
hsync in interface org.apache.hadoop.fs.Syncablejava.io.IOException@Deprecated
public void sync()
throws java.io.IOException
sync in interface org.apache.hadoop.fs.Syncablejava.io.IOExceptionpublic void hsync(java.util.EnumSet<HdfsDataOutputStream.SyncFlag> syncFlags) throws java.io.IOException
CreateFlag.SYNC_BLOCK.syncFlags - Indicate the semantic of the sync. Currently used to specify
whether or not to update the block length in NameNode.java.io.IOException@Deprecated
public int getNumCurrentReplicas()
throws java.io.IOException
java.io.IOExceptionpublic int getCurrentBlockReplication()
throws java.io.IOException
HdfsDataOutputStream.getCurrentBlockReplication() instead.java.io.IOExceptionprotected void flushInternal()
throws java.io.IOException
java.io.IOExceptionprotected void start()
protected void closeThreads(boolean force)
throws java.io.IOException
java.io.IOExceptionpublic void close()
throws java.io.IOException
close in interface java.lang.AutoCloseableclose in interface java.io.Closeableclose in class java.io.OutputStreamjava.io.IOExceptionprotected void closeImpl()
throws java.io.IOException
java.io.IOExceptionprotected void recoverLease(boolean recoverLeaseOnCloseException)
protected void completeFile(ExtendedBlock last) throws java.io.IOException
java.io.IOException@VisibleForTesting public void setArtificialSlowdown(long period)
@VisibleForTesting public void setChunksPerPacket(int value)
public long getInitialLen()
protected java.util.EnumSet<AddBlockFlag> getAddBlockFlags()
public org.apache.hadoop.fs.FileEncryptionInfo getFileEncryptionInfo()
protected long flushInternalWithoutWaitingAck()
throws java.io.IOException
java.io.IOExceptionpublic void setDropBehind(java.lang.Boolean dropBehind)
throws java.io.IOException
setDropBehind in interface org.apache.hadoop.fs.CanSetDropBehindjava.io.IOException@VisibleForTesting public long getFileId()
@VisibleForTesting public java.lang.String getNamespace()
@VisibleForTesting public java.lang.String getUniqKey()
protected org.apache.hadoop.hdfs.DataStreamer getStreamer()
public java.lang.String toString()
toString in class java.lang.ObjectCopyright © 2008–2025 Apache Software Foundation. All rights reserved.