@@ -23,7 +23,6 @@ import java.nio.file.{Files, NoSuchFileException}
23
23
import java .util .concurrent ._
24
24
import java .util .concurrent .atomic .AtomicInteger
25
25
import kafka .server .{KafkaConfig , KafkaRaftServer }
26
- import kafka .server .metadata .BrokerMetadataPublisher .info
27
26
import kafka .utils .threadsafe
28
27
import kafka .utils .{CoreUtils , Logging , Pool }
29
28
import org .apache .kafka .common .{DirectoryId , KafkaException , TopicPartition , Uuid }
@@ -42,7 +41,7 @@ import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsem
42
41
import java .util .{Collections , Optional , OptionalLong , Properties }
43
42
import org .apache .kafka .server .metrics .KafkaMetricsGroup
44
43
import org .apache .kafka .server .util .{FileLock , Scheduler }
45
- import org .apache .kafka .storage .internals .log .{CleanerConfig , LogCleaner , LogConfig , LogDirFailureChannel , LogOffsetsListener , ProducerStateManagerConfig , RemoteIndexCache , UnifiedLog }
44
+ import org .apache .kafka .storage .internals .log .{CleanerConfig , LogCleaner , LogConfig , LogDirFailureChannel , LogManager => JLogManager , LogOffsetsListener , ProducerStateManagerConfig , RemoteIndexCache , UnifiedLog }
46
45
import org .apache .kafka .storage .internals .checkpoint .{CleanShutdownFileHandler , OffsetCheckpointFile }
47
46
import org .apache .kafka .storage .log .metrics .BrokerTopicStats
48
47
@@ -80,8 +79,6 @@ class LogManager(logDirs: Seq[File],
80
79
remoteStorageSystemEnable : Boolean ,
81
80
val initialTaskDelayMs : Long ) extends Logging {
82
81
83
- import LogManager ._
84
-
85
82
private val metricsGroup = new KafkaMetricsGroup (this .getClass)
86
83
87
84
private val logCreationOrDeletionLock = new Object
@@ -127,9 +124,9 @@ class LogManager(logDirs: Seq[File],
127
124
def directoryIdsSet : Predef .Set [Uuid ] = directoryIds.values.toSet
128
125
129
126
@ volatile private var recoveryPointCheckpoints = liveLogDirs.map(dir =>
130
- (dir, new OffsetCheckpointFile (new File (dir, RecoveryPointCheckpointFile ), logDirFailureChannel))).toMap
127
+ (dir, new OffsetCheckpointFile (new File (dir, JLogManager . RECOVERY_POINT_CHECKPOINT_FILE ), logDirFailureChannel))).toMap
131
128
@ volatile private var logStartOffsetCheckpoints = liveLogDirs.map(dir =>
132
- (dir, new OffsetCheckpointFile (new File (dir, LogStartOffsetCheckpointFile ), logDirFailureChannel))).toMap
129
+ (dir, new OffsetCheckpointFile (new File (dir, JLogManager . LOG_START_OFFSET_CHECKPOINT_FILE ), logDirFailureChannel))).toMap
133
130
134
131
private val preferredLogDirs = new ConcurrentHashMap [TopicPartition , String ]()
135
132
@@ -261,7 +258,7 @@ class LogManager(logDirs: Seq[File],
261
258
private def lockLogDirs (dirs : Seq [File ]): Seq [FileLock ] = {
262
259
dirs.flatMap { dir =>
263
260
try {
264
- val lock = new FileLock (new File (dir, LockFileName ))
261
+ val lock = new FileLock (new File (dir, JLogManager . LOCK_FILE_NAME ))
265
262
if (! lock.tryLock())
266
263
throw new KafkaException (" Failed to acquire lock on file .lock in " + lock.file.getParent +
267
264
" . A Kafka instance in another process or thread is using this directory." )
@@ -680,7 +677,7 @@ class LogManager(logDirs: Seq[File],
680
677
681
678
try {
682
679
jobs.foreachEntry { (dir, dirJobs) =>
683
- if (waitForAllToComplete(dirJobs,
680
+ if (JLogManager . waitForAllToComplete(dirJobs.toList.asJava ,
684
681
e => warn(s " There was an error in one of the threads during LogManager shutdown: ${e.getCause}" ))) {
685
682
val logs = logsInDir(localLogsByDir, dir)
686
683
@@ -1520,25 +1517,6 @@ class LogManager(logDirs: Seq[File],
1520
1517
}
1521
1518
1522
1519
object LogManager {
1523
- val LockFileName = " .lock"
1524
-
1525
- /**
1526
- * Wait all jobs to complete
1527
- * @param jobs jobs
1528
- * @param callback this will be called to handle the exception caused by each Future#get
1529
- * @return true if all pass. Otherwise, false
1530
- */
1531
- private [log] def waitForAllToComplete (jobs : Seq [Future [_]], callback : Throwable => Unit ): Boolean = {
1532
- jobs.count(future => Try (future.get) match {
1533
- case Success (_) => false
1534
- case Failure (e) =>
1535
- callback(e)
1536
- true
1537
- }) == 0
1538
- }
1539
-
1540
- val RecoveryPointCheckpointFile = " recovery-point-offset-checkpoint"
1541
- val LogStartOffsetCheckpointFile = " log-start-offset-checkpoint"
1542
1520
1543
1521
def apply (config : KafkaConfig ,
1544
1522
initialOfflineDirs : Seq [String ],
@@ -1575,45 +1553,4 @@ object LogManager {
1575
1553
remoteStorageSystemEnable = config.remoteLogManagerConfig.isRemoteStorageSystemEnabled,
1576
1554
initialTaskDelayMs = config.logInitialTaskDelayMs)
1577
1555
}
1578
-
1579
- /**
1580
- * Returns true if the given log should not be on the current broker
1581
- * according to the metadata image.
1582
- *
1583
- * @param brokerId The ID of the current broker.
1584
- * @param newTopicsImage The new topics image after broker has been reloaded
1585
- * @param log The log object to check
1586
- * @return true if the log should not exist on the broker, false otherwise.
1587
- */
1588
- def isStrayKraftReplica (
1589
- brokerId : Int ,
1590
- newTopicsImage : TopicsImage ,
1591
- log : UnifiedLog
1592
- ): Boolean = {
1593
- if (log.topicId.isEmpty) {
1594
- // Missing topic ID could result from storage failure or unclean shutdown after topic creation but before flushing
1595
- // data to the `partition.metadata` file. And before appending data to the log, the `partition.metadata` is always
1596
- // flushed to disk. So if the topic ID is missing, it mostly means no data was appended, and we can treat this as
1597
- // a stray log.
1598
- info(s " The topicId does not exist in $log, treat it as a stray log " )
1599
- return true
1600
- }
1601
-
1602
- val topicId = log.topicId.get
1603
- val partitionId = log.topicPartition.partition()
1604
- Option (newTopicsImage.getPartition(topicId, partitionId)) match {
1605
- case Some (partition) =>
1606
- if (! partition.replicas.contains(brokerId)) {
1607
- info(s " Found stray log dir $log: the current replica assignment ${partition.replicas.mkString(" [" , " , " , " ]" )} " +
1608
- s " does not contain the local brokerId $brokerId. " )
1609
- true
1610
- } else {
1611
- false
1612
- }
1613
-
1614
- case None =>
1615
- info(s " Found stray log dir $log: the topicId $topicId does not exist in the metadata image " )
1616
- true
1617
- }
1618
- }
1619
1556
}
0 commit comments