@Private @Unstable public class LogAggregationIndexedFileController extends LogAggregationFileController
| Modifier and Type | Class | Description |
|---|---|---|
static class |
LogAggregationIndexedFileController.IndexedFileLogMeta |
This IndexedFileLogMeta includes the meta information
for a single file which would be aggregated in one
Log aggregation cycle.
|
static class |
LogAggregationIndexedFileController.IndexedLogsMeta |
This IndexedLogsMeta includes all the meta information
for the aggregated log file.
|
static class |
LogAggregationIndexedFileController.IndexedPerAggregationLogMeta |
This IndexedPerAggregationLogMeta includes the meta information
for all files which would be aggregated in one
Log aggregation cycle.
|
| Modifier and Type | Field | Description |
|---|---|---|
static java.lang.String |
CHECK_SUM_FILE_SUFFIX |
APP_DIR_PERMISSIONS, APP_LOG_FILE_UMASK, conf, fileControllerName, fsSupportsChmod, maxRetry, remoteOlderRootLogDirSuffix, remoteRootLogDir, remoteRootLogDirSuffix, retentionSize, retryTimeout, TLDIR_PERMISSIONS, usersAclsManager| Constructor | Description |
|---|---|
LogAggregationIndexedFileController() |
| Modifier and Type | Method | Description |
|---|---|---|
void |
closeWriter() |
Close the writer.
|
org.apache.hadoop.fs.FileStatus |
getAllChecksumFiles(java.util.Map<java.lang.String,org.apache.hadoop.fs.FileStatus> fileMap,
java.lang.String fileName) |
|
java.util.Map<org.apache.hadoop.yarn.api.records.ApplicationAccessType,java.lang.String> |
getApplicationAcls(org.apache.hadoop.fs.Path aggregatedLogPath,
org.apache.hadoop.yarn.api.records.ApplicationId appId) |
Returns ACLs for the application.
|
java.lang.String |
getApplicationOwner(org.apache.hadoop.fs.Path aggregatedLogPath,
org.apache.hadoop.yarn.api.records.ApplicationId appId) |
Returns the owner of the application.
|
static int |
getFSInputBufferSize(org.apache.hadoop.conf.Configuration conf) |
|
static int |
getFSOutputBufferSize(org.apache.hadoop.conf.Configuration conf) |
|
java.util.Map<java.lang.String,java.util.List<ContainerLogFileInfo>> |
getLogMetaFilesOfNode(ExtendedLogMetaRequest logRequest,
org.apache.hadoop.fs.FileStatus currentNodeFile,
org.apache.hadoop.yarn.api.records.ApplicationId appId) |
Returns log file metadata for a node grouped by containers.
|
java.util.List<org.apache.hadoop.fs.FileStatus> |
getNodeLogFileToRead(java.util.List<org.apache.hadoop.fs.FileStatus> nodeFiles,
java.lang.String nodeId,
org.apache.hadoop.yarn.api.records.ApplicationId appId) |
|
org.apache.hadoop.fs.Path |
getOlderRemoteAppLogDir(org.apache.hadoop.yarn.api.records.ApplicationId appId,
java.lang.String user) |
Get the older remote application directory for log aggregation.
|
org.apache.hadoop.fs.Path |
getRemoteAppLogDir(org.apache.hadoop.yarn.api.records.ApplicationId appId,
java.lang.String user) |
Get the remote application directory for log aggregation.
|
long |
getRollOverLogMaxSize(org.apache.hadoop.conf.Configuration conf) |
|
Clock |
getSystemClock() |
|
void |
initializeWriter(LogAggregationFileControllerContext context) |
Initialize the writer.
|
void |
initInternal(org.apache.hadoop.conf.Configuration conf) |
Derived classes initialize themselves using this method.
|
boolean |
isRollover(org.apache.hadoop.fs.FileContext fc,
org.apache.hadoop.fs.Path candidate) |
|
LogAggregationIndexedFileController.IndexedLogsMeta |
loadIndexedLogsMeta(org.apache.hadoop.fs.Path remoteLogPath,
long end,
org.apache.hadoop.yarn.api.records.ApplicationId appId) |
|
java.util.Map<java.lang.String,java.lang.Long> |
parseCheckSumFiles(java.util.List<org.apache.hadoop.fs.FileStatus> fileList) |
|
void |
postWrite(LogAggregationFileControllerContext record) |
Operations needed after write the log content.
|
boolean |
readAggregatedLogs(ContainerLogsRequest logRequest,
java.io.OutputStream os) |
Output container log.
|
java.util.List<ContainerLogMeta> |
readAggregatedLogsMeta(ContainerLogsRequest logRequest) |
Return a list of
ContainerLogMeta for an application
from Remote FileSystem. |
void |
renderAggregatedLogsBlock(HtmlBlock.Block html,
View.ViewContext context) |
Render Aggregated Logs block.
|
void |
write(AggregatedLogFormat.LogKey logKey,
AggregatedLogFormat.LogValue logValue) |
Write the log content.
|
aggregatedLogSuffix, belongsToAppAttempt, checkExists, cleanOldLogs, closePrintStream, createAppDir, createDir, extractRemoteOlderRootLogDirSuffix, extractRemoteRootLogDir, extractRemoteRootLogDirSuffix, getApplicationDirectoriesOfUser, getFileControllerName, getFileSystem, getNodeFilesOfApplicationDirectory, getRemoteNodeLogFileForApp, getRemoteOlderRootLogDirSuffix, getRemoteRootLogDir, getRemoteRootLogDirSuffix, initialize, isFsSupportsChmod, verifyAndCreateRemoteLogDir@VisibleForTesting public static final java.lang.String CHECK_SUM_FILE_SUFFIX
public void initInternal(org.apache.hadoop.conf.Configuration conf)
LogAggregationFileControllerinitInternal in class LogAggregationFileControllerconf - the Configurationpublic void initializeWriter(LogAggregationFileControllerContext context) throws java.io.IOException
LogAggregationFileControllerinitializeWriter in class LogAggregationFileControllercontext - the LogAggregationFileControllerContextjava.io.IOException - if fails to initialize the writerpublic void closeWriter()
LogAggregationFileControllercloseWriter in class LogAggregationFileControllerpublic void write(AggregatedLogFormat.LogKey logKey, AggregatedLogFormat.LogValue logValue) throws java.io.IOException
LogAggregationFileControllerwrite in class LogAggregationFileControllerlogKey - the log keylogValue - the log contentjava.io.IOException - if fails to write the logspublic void postWrite(LogAggregationFileControllerContext record) throws java.lang.Exception
LogAggregationFileControllerpostWrite in class LogAggregationFileControllerrecord - the LogAggregationFileControllerContextjava.lang.Exception - if anything failspublic boolean readAggregatedLogs(ContainerLogsRequest logRequest, java.io.OutputStream os) throws java.io.IOException
LogAggregationFileControllerreadAggregatedLogs in class LogAggregationFileControllerlogRequest - ContainerLogsRequestos - the output streamjava.io.IOException - if we can not access the log file.public java.util.Map<java.lang.String,java.util.List<ContainerLogFileInfo>> getLogMetaFilesOfNode(ExtendedLogMetaRequest logRequest, org.apache.hadoop.fs.FileStatus currentNodeFile, org.apache.hadoop.yarn.api.records.ApplicationId appId) throws java.io.IOException
LogAggregationFileControllergetLogMetaFilesOfNode in class LogAggregationFileControllerlogRequest - extended query information holdercurrentNodeFile - file status of a node in an application directoryappId - id of the application, which is the same as in node pathjava.io.IOException - if there is no node filepublic java.util.List<ContainerLogMeta> readAggregatedLogsMeta(ContainerLogsRequest logRequest) throws java.io.IOException
LogAggregationFileControllerContainerLogMeta for an application
from Remote FileSystem.readAggregatedLogsMeta in class LogAggregationFileControllerlogRequest - ContainerLogsRequestContainerLogMetajava.io.IOException - if there is no available log file@Private
public java.util.Map<java.lang.String,java.lang.Long> parseCheckSumFiles(java.util.List<org.apache.hadoop.fs.FileStatus> fileList)
throws java.io.IOException
java.io.IOException@Private
public java.util.List<org.apache.hadoop.fs.FileStatus> getNodeLogFileToRead(java.util.List<org.apache.hadoop.fs.FileStatus> nodeFiles,
java.lang.String nodeId,
org.apache.hadoop.yarn.api.records.ApplicationId appId)
throws java.io.IOException
java.io.IOException@Private
public org.apache.hadoop.fs.FileStatus getAllChecksumFiles(java.util.Map<java.lang.String,org.apache.hadoop.fs.FileStatus> fileMap,
java.lang.String fileName)
public void renderAggregatedLogsBlock(HtmlBlock.Block html, View.ViewContext context)
LogAggregationFileControllerrenderAggregatedLogsBlock in class LogAggregationFileControllerhtml - the htmlcontext - the ViewContextpublic java.lang.String getApplicationOwner(org.apache.hadoop.fs.Path aggregatedLogPath,
org.apache.hadoop.yarn.api.records.ApplicationId appId)
throws java.io.IOException
LogAggregationFileControllergetApplicationOwner in class LogAggregationFileControlleraggregatedLogPath - the aggregatedLog pathappId - the ApplicationIdjava.io.IOException - if we can not get the application ownerpublic java.util.Map<org.apache.hadoop.yarn.api.records.ApplicationAccessType,java.lang.String> getApplicationAcls(org.apache.hadoop.fs.Path aggregatedLogPath,
org.apache.hadoop.yarn.api.records.ApplicationId appId)
throws java.io.IOException
LogAggregationFileControllergetApplicationAcls in class LogAggregationFileControlleraggregatedLogPath - the aggregatedLog path.appId - the ApplicationIdjava.io.IOException - if we can not get the application aclspublic org.apache.hadoop.fs.Path getRemoteAppLogDir(org.apache.hadoop.yarn.api.records.ApplicationId appId,
java.lang.String user)
throws java.io.IOException
LogAggregationFileControllergetRemoteAppLogDir in class LogAggregationFileControllerappId - the Application IDuser - the Application Ownerjava.io.IOException - if can not find the remote application directorypublic org.apache.hadoop.fs.Path getOlderRemoteAppLogDir(org.apache.hadoop.yarn.api.records.ApplicationId appId,
java.lang.String user)
throws java.io.IOException
LogAggregationFileControllergetOlderRemoteAppLogDir in class LogAggregationFileControllerappId - the Application IDuser - the Application Ownerjava.io.IOException - if can not find the remote application directory@Private public LogAggregationIndexedFileController.IndexedLogsMeta loadIndexedLogsMeta(org.apache.hadoop.fs.Path remoteLogPath, long end, org.apache.hadoop.yarn.api.records.ApplicationId appId) throws java.io.IOException
java.io.IOException@Private public static int getFSOutputBufferSize(org.apache.hadoop.conf.Configuration conf)
@Private public static int getFSInputBufferSize(org.apache.hadoop.conf.Configuration conf)
@Private @VisibleForTesting public long getRollOverLogMaxSize(org.apache.hadoop.conf.Configuration conf)
@Private
@VisibleForTesting
public boolean isRollover(org.apache.hadoop.fs.FileContext fc,
org.apache.hadoop.fs.Path candidate)
throws java.io.IOException
java.io.IOException@Private @VisibleForTesting public Clock getSystemClock()
Copyright © 2008–2025 Apache Software Foundation. All rights reserved.