1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver.wal;
21
22 import static org.junit.Assert.assertEquals;
23 import static org.junit.Assert.assertFalse;
24 import static org.junit.Assert.assertNotNull;
25 import static org.junit.Assert.assertTrue;
26 import static org.junit.Assert.fail;
27
28 import java.io.FileNotFoundException;
29 import java.io.IOException;
30 import java.lang.reflect.Method;
31 import java.util.ArrayList;
32 import java.util.Collections;
33 import java.util.List;
34 import java.util.Map;
35 import java.util.NavigableSet;
36 import java.util.concurrent.atomic.AtomicBoolean;
37 import java.util.concurrent.atomic.AtomicLong;
38
39 import org.apache.commons.logging.Log;
40 import org.apache.commons.logging.LogFactory;
41 import org.apache.hadoop.conf.Configuration;
42 import org.apache.hadoop.fs.FSDataInputStream;
43 import org.apache.hadoop.fs.FSDataOutputStream;
44 import org.apache.hadoop.fs.FileStatus;
45 import org.apache.hadoop.fs.FileSystem;
46 import org.apache.hadoop.fs.FileUtil;
47 import org.apache.hadoop.fs.Path;
48 import org.apache.hadoop.hbase.HBaseTestingUtility;
49 import org.apache.hadoop.hbase.HColumnDescriptor;
50 import org.apache.hadoop.hbase.HConstants;
51 import org.apache.hadoop.hbase.HRegionInfo;
52 import org.apache.hadoop.hbase.HTableDescriptor;
53 import org.apache.hadoop.hbase.KeyValue;
54 import org.apache.hadoop.hbase.LargeTests;
55 import org.apache.hadoop.hbase.regionserver.HRegion;
56 import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry;
57 import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
58 import org.apache.hadoop.hbase.util.Bytes;
59 import org.apache.hadoop.hbase.util.CancelableProgressable;
60 import org.apache.hadoop.hbase.util.Threads;
61 import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
62 import org.apache.hadoop.ipc.RemoteException;
63 import org.junit.After;
64 import org.junit.AfterClass;
65 import org.junit.Assert;
66 import org.junit.Before;
67 import org.junit.BeforeClass;
68 import org.junit.Ignore;
69 import org.junit.Test;
70 import org.junit.experimental.categories.Category;
71 import org.mockito.Mockito;
72 import org.mockito.invocation.InvocationOnMock;
73 import org.mockito.stubbing.Answer;
74
75 import com.google.common.base.Joiner;
76 import com.google.common.collect.ImmutableList;
77
78
79
80
81 @Category(LargeTests.class)
82 public class TestHLogSplit {
83
84 private final static Log LOG = LogFactory.getLog(TestHLogSplit.class);
85
86 private Configuration conf;
87 private FileSystem fs;
88
89 protected final static HBaseTestingUtility
90 TEST_UTIL = new HBaseTestingUtility();
91
92
93 private static final Path hbaseDir = new Path("/hbase");
94 private static final Path hlogDir = new Path(hbaseDir, "hlog");
95 private static final Path oldLogDir = new Path(hbaseDir, "hlog.old");
96 private static final Path corruptDir = new Path(hbaseDir, ".corrupt");
97
98 private static final int NUM_WRITERS = 10;
99 private static final int ENTRIES = 10;
100
101 private HLog.Writer[] writer = new HLog.Writer[NUM_WRITERS];
102 private long seq = 0;
103 private static final byte[] TABLE_NAME = "t1".getBytes();
104 private static final byte[] FAMILY = "f1".getBytes();
105 private static final byte[] QUALIFIER = "q1".getBytes();
106 private static final byte[] VALUE = "v1".getBytes();
107 private static final String HLOG_FILE_PREFIX = "hlog.dat.";
108 private static List<String> regions;
109 private static final String HBASE_SKIP_ERRORS = "hbase.hlog.split.skip.errors";
110 private static final Path tabledir =
111 new Path(hbaseDir, Bytes.toString(TABLE_NAME));
112
113 static enum Corruptions {
114 INSERT_GARBAGE_ON_FIRST_LINE,
115 INSERT_GARBAGE_IN_THE_MIDDLE,
116 APPEND_GARBAGE,
117 TRUNCATE,
118 }
119
120 @BeforeClass
121 public static void setUpBeforeClass() throws Exception {
122 TEST_UTIL.getConfiguration().
123 setStrings("hbase.rootdir", hbaseDir.toString());
124 TEST_UTIL.getConfiguration().
125 setClass("hbase.regionserver.hlog.writer.impl",
126 InstrumentedSequenceFileLogWriter.class, HLog.Writer.class);
127
128 TEST_UTIL.startMiniDFSCluster(2);
129 }
130
131 @AfterClass
132 public static void tearDownAfterClass() throws Exception {
133 TEST_UTIL.shutdownMiniDFSCluster();
134 }
135
136 @Before
137 public void setUp() throws Exception {
138 flushToConsole("Cleaning up cluster for new test\n"
139 + "--------------------------");
140 conf = TEST_UTIL.getConfiguration();
141 fs = TEST_UTIL.getDFSCluster().getFileSystem();
142 FileStatus[] entries = fs.listStatus(new Path("/"));
143 flushToConsole("Num entries in /:" + entries.length);
144 for (FileStatus dir : entries){
145 assertTrue("Deleting " + dir.getPath(),
146 fs.delete(dir.getPath(), true));
147 }
148
149 fs.mkdirs(hlogDir);
150 seq = 0;
151 regions = new ArrayList<String>();
152 Collections.addAll(regions, "bbb", "ccc");
153 InstrumentedSequenceFileLogWriter.activateFailure = false;
154 }
155
156 @After
157 public void tearDown() throws Exception {
158 }
159
160
161
162
163
164 @Test
165 public void testRecoveredEditsPathForMeta() throws IOException {
166 FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
167 byte [] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
168 Path tdir = new Path(hbaseDir, Bytes.toString(HConstants.META_TABLE_NAME));
169 Path regiondir = new Path(tdir,
170 HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
171 fs.mkdirs(regiondir);
172 long now = System.currentTimeMillis();
173 HLog.Entry entry =
174 new HLog.Entry(new HLogKey(encoded,
175 HConstants.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID),
176 new WALEdit());
177 Path p = HLogSplitter.getRegionSplitEditsPath(fs, entry, hbaseDir, true);
178 String parentOfParent = p.getParent().getParent().getName();
179 assertEquals(parentOfParent, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
180 }
181
182 @Test(expected = OrphanHLogAfterSplitException.class)
183 public void testSplitFailsIfNewHLogGetsCreatedAfterSplitStarted()
184 throws IOException {
185 AtomicBoolean stop = new AtomicBoolean(false);
186
187 assertFalse("Previous test should clean up table dir",
188 fs.exists(new Path("/hbase/t1")));
189
190 generateHLogs(-1);
191
192 try {
193 (new ZombieNewLogWriterRegionServer(stop)).start();
194 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
195 hbaseDir, hlogDir, oldLogDir, fs);
196 logSplitter.splitLog();
197 } finally {
198 stop.set(true);
199 }
200 }
201
202 @Test
203 public void testSplitPreservesEdits() throws IOException{
204 final String REGION = "region__1";
205 regions.removeAll(regions);
206 regions.add(REGION);
207
208 generateHLogs(1, 10, -1);
209 fs.initialize(fs.getUri(), conf);
210 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
211 hbaseDir, hlogDir, oldLogDir, fs);
212 logSplitter.splitLog();
213
214 Path originalLog = (fs.listStatus(oldLogDir))[0].getPath();
215 Path splitLog = getLogForRegion(hbaseDir, TABLE_NAME, REGION);
216
217 assertEquals("edits differ after split", true, logsAreEqual(originalLog, splitLog));
218 }
219
220
221 @Test
222 public void testEmptyLogFiles() throws IOException {
223
224 injectEmptyFile(".empty", true);
225 generateHLogs(Integer.MAX_VALUE);
226 injectEmptyFile("empty", true);
227
228
229
230 fs.initialize(fs.getUri(), conf);
231
232 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
233 hbaseDir, hlogDir, oldLogDir, fs);
234 logSplitter.splitLog();
235
236
237 for (String region : regions) {
238 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
239 assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
240 }
241
242 }
243
244
245 @Test
246 public void testEmptyOpenLogFiles() throws IOException {
247 injectEmptyFile(".empty", false);
248 generateHLogs(Integer.MAX_VALUE);
249 injectEmptyFile("empty", false);
250
251
252
253 fs.initialize(fs.getUri(), conf);
254
255 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
256 hbaseDir, hlogDir, oldLogDir, fs);
257 logSplitter.splitLog();
258
259 for (String region : regions) {
260 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
261 assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
262 }
263 }
264
265 @Test
266 public void testOpenZeroLengthReportedFileButWithDataGetsSplit() throws IOException {
267
268 generateHLogs(5);
269
270 fs.initialize(fs.getUri(), conf);
271
272 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
273 hbaseDir, hlogDir, oldLogDir, fs);
274 logSplitter.splitLog();
275
276 for (String region : regions) {
277 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
278 assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
279 }
280
281
282 }
283
284
285 @Test
286 public void testTralingGarbageCorruptionFileSkipErrorsPasses() throws IOException {
287 conf.setBoolean(HBASE_SKIP_ERRORS, true);
288 generateHLogs(Integer.MAX_VALUE);
289 corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
290 Corruptions.APPEND_GARBAGE, true, fs);
291 fs.initialize(fs.getUri(), conf);
292
293 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
294 hbaseDir, hlogDir, oldLogDir, fs);
295 logSplitter.splitLog();
296 for (String region : regions) {
297 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
298 assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
299 }
300
301
302 }
303
304 @Test
305 public void testFirstLineCorruptionLogFileSkipErrorsPasses() throws IOException {
306 conf.setBoolean(HBASE_SKIP_ERRORS, true);
307 generateHLogs(Integer.MAX_VALUE);
308 corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
309 Corruptions.INSERT_GARBAGE_ON_FIRST_LINE, true, fs);
310 fs.initialize(fs.getUri(), conf);
311
312 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
313 hbaseDir, hlogDir, oldLogDir, fs);
314 logSplitter.splitLog();
315 for (String region : regions) {
316 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
317 assertEquals((NUM_WRITERS - 1) * ENTRIES, countHLog(logfile, fs, conf));
318 }
319
320
321 }
322
323
324 @Test
325 public void testMiddleGarbageCorruptionSkipErrorsReadsHalfOfFile() throws IOException {
326 conf.setBoolean(HBASE_SKIP_ERRORS, true);
327 generateHLogs(Integer.MAX_VALUE);
328 corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
329 Corruptions.INSERT_GARBAGE_IN_THE_MIDDLE, false, fs);
330 fs.initialize(fs.getUri(), conf);
331 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
332 hbaseDir, hlogDir, oldLogDir, fs);
333 logSplitter.splitLog();
334
335 for (String region : regions) {
336 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
337
338
339
340 int goodEntries = (NUM_WRITERS - 1) * ENTRIES;
341 int firstHalfEntries = (int) Math.ceil(ENTRIES / 2) - 1;
342 assertTrue("The file up to the corrupted area hasn't been parsed",
343 goodEntries + firstHalfEntries <= countHLog(logfile, fs, conf));
344 }
345 }
346
347 @Test
348 public void testCorruptedFileGetsArchivedIfSkipErrors() throws IOException {
349 conf.setBoolean(HBASE_SKIP_ERRORS, true);
350 Class<?> backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
351 Reader.class);
352 InstrumentedSequenceFileLogWriter.activateFailure = false;
353 HLog.resetLogReaderClass();
354
355 try {
356 Path c1 = new Path(hlogDir, HLOG_FILE_PREFIX + "0");
357 conf.setClass("hbase.regionserver.hlog.reader.impl",
358 FaultySequenceFileLogReader.class, HLog.Reader.class);
359 for (FaultySequenceFileLogReader.FailureType failureType : FaultySequenceFileLogReader.FailureType.values()) {
360 conf.set("faultysequencefilelogreader.failuretype", failureType.name());
361 generateHLogs(1, ENTRIES, -1);
362 fs.initialize(fs.getUri(), conf);
363 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
364 hbaseDir, hlogDir, oldLogDir, fs);
365 logSplitter.splitLog();
366 FileStatus[] archivedLogs = fs.listStatus(corruptDir);
367 assertEquals("expected a different file", c1.getName(), archivedLogs[0]
368 .getPath().getName());
369 assertEquals(archivedLogs.length, 1);
370 fs.delete(new Path(oldLogDir, HLOG_FILE_PREFIX + "0"), false);
371 }
372 } finally {
373 conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass,
374 Reader.class);
375 HLog.resetLogReaderClass();
376 }
377 }
378
379 @Test(expected = IOException.class)
380 public void testTrailingGarbageCorruptionLogFileSkipErrorsFalseThrows()
381 throws IOException {
382 conf.setBoolean(HBASE_SKIP_ERRORS, false);
383 Class<?> backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
384 Reader.class);
385 InstrumentedSequenceFileLogWriter.activateFailure = false;
386 HLog.resetLogReaderClass();
387
388 try {
389 conf.setClass("hbase.regionserver.hlog.reader.impl",
390 FaultySequenceFileLogReader.class, HLog.Reader.class);
391 conf.set("faultysequencefilelogreader.failuretype", FaultySequenceFileLogReader.FailureType.BEGINNING.name());
392 generateHLogs(Integer.MAX_VALUE);
393 fs.initialize(fs.getUri(), conf);
394 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
395 hbaseDir, hlogDir, oldLogDir, fs);
396 logSplitter.splitLog();
397 } finally {
398 conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass,
399 Reader.class);
400 HLog.resetLogReaderClass();
401 }
402
403 }
404
405 @Test
406 public void testCorruptedLogFilesSkipErrorsFalseDoesNotTouchLogs()
407 throws IOException {
408 conf.setBoolean(HBASE_SKIP_ERRORS, false);
409 Class<?> backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
410 Reader.class);
411 InstrumentedSequenceFileLogWriter.activateFailure = false;
412 HLog.resetLogReaderClass();
413
414 try {
415 conf.setClass("hbase.regionserver.hlog.reader.impl",
416 FaultySequenceFileLogReader.class, HLog.Reader.class);
417 conf.set("faultysequencefilelogreader.failuretype", FaultySequenceFileLogReader.FailureType.BEGINNING.name());
418 generateHLogs(-1);
419 fs.initialize(fs.getUri(), conf);
420 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
421 hbaseDir, hlogDir, oldLogDir, fs);
422 try {
423 logSplitter.splitLog();
424 } catch (IOException e) {
425 assertEquals(
426 "if skip.errors is false all files should remain in place",
427 NUM_WRITERS, fs.listStatus(hlogDir).length);
428 }
429 } finally {
430 conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass,
431 Reader.class);
432 HLog.resetLogReaderClass();
433 }
434
435 }
436
437 @Test
438 public void testEOFisIgnored() throws IOException {
439 conf.setBoolean(HBASE_SKIP_ERRORS, false);
440
441 final String REGION = "region__1";
442 regions.removeAll(regions);
443 regions.add(REGION);
444
445 int entryCount = 10;
446 Path c1 = new Path(hlogDir, HLOG_FILE_PREFIX + "0");
447 generateHLogs(1, entryCount, -1);
448 corruptHLog(c1, Corruptions.TRUNCATE, true, fs);
449
450 fs.initialize(fs.getUri(), conf);
451 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
452 hbaseDir, hlogDir, oldLogDir, fs);
453 logSplitter.splitLog();
454
455 Path originalLog = (fs.listStatus(oldLogDir))[0].getPath();
456 Path splitLog = getLogForRegion(hbaseDir, TABLE_NAME, REGION);
457
458 int actualCount = 0;
459 HLog.Reader in = HLog.getReader(fs, splitLog, conf);
460 HLog.Entry entry;
461 while ((entry = in.next()) != null) ++actualCount;
462 assertEquals(entryCount-1, actualCount);
463
464
465 FileStatus[] archivedLogs = fs.listStatus(corruptDir);
466 assertEquals(archivedLogs.length, 0);
467 }
468
469 @Test
470 public void testLogsGetArchivedAfterSplit() throws IOException {
471 conf.setBoolean(HBASE_SKIP_ERRORS, false);
472
473 generateHLogs(-1);
474
475 fs.initialize(fs.getUri(), conf);
476 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
477 hbaseDir, hlogDir, oldLogDir, fs);
478 logSplitter.splitLog();
479
480 FileStatus[] archivedLogs = fs.listStatus(oldLogDir);
481
482 assertEquals("wrong number of files in the archive log", NUM_WRITERS, archivedLogs.length);
483 }
484
485 @Test
486 public void testSplit() throws IOException {
487 generateHLogs(-1);
488 fs.initialize(fs.getUri(), conf);
489 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
490 hbaseDir, hlogDir, oldLogDir, fs);
491 logSplitter.splitLog();
492
493 for (String region : regions) {
494 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
495 assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
496
497 }
498 }
499
500 @Test
501 public void testLogDirectoryShouldBeDeletedAfterSuccessfulSplit()
502 throws IOException {
503 generateHLogs(-1);
504 fs.initialize(fs.getUri(), conf);
505 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
506 hbaseDir, hlogDir, oldLogDir, fs);
507 logSplitter.splitLog();
508 FileStatus [] statuses = null;
509 try {
510 statuses = fs.listStatus(hlogDir);
511 if (statuses != null) {
512 Assert.fail("Files left in log dir: " +
513 Joiner.on(",").join(FileUtil.stat2Paths(statuses)));
514 }
515 } catch (FileNotFoundException e) {
516
517 }
518 }
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550 @Test
551 public void testSplitWillNotTouchLogsIfNewHLogGetsCreatedAfterSplitStarted()
552 throws IOException {
553 AtomicBoolean stop = new AtomicBoolean(false);
554 generateHLogs(-1);
555 fs.initialize(fs.getUri(), conf);
556 Thread zombie = new ZombieNewLogWriterRegionServer(stop);
557
558 try {
559 zombie.start();
560 try {
561 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
562 hbaseDir, hlogDir, oldLogDir, fs);
563 logSplitter.splitLog();
564 } catch (IOException ex) {
565 int logFilesNumber = fs.listStatus(hlogDir).length;
566
567 assertEquals("Log files should not be archived if there's an extra file after split",
568 NUM_WRITERS + 1, logFilesNumber);
569 } finally {
570 stop.set(true);
571 }
572
573 }
574
575
576
577 @Test(expected = IOException.class)
578 public void testSplitWillFailIfWritingToRegionFails() throws Exception {
579
580 generateHLogs(4);
581
582 fs.initialize(fs.getUri(), conf);
583
584 String region = "break";
585 Path regiondir = new Path(tabledir, region);
586 fs.mkdirs(regiondir);
587
588 InstrumentedSequenceFileLogWriter.activateFailure = false;
589 appendEntry(writer[4], TABLE_NAME, Bytes.toBytes(region),
590 ("r" + 999).getBytes(), FAMILY, QUALIFIER, VALUE, 0);
591 writer[4].close();
592
593 try {
594 InstrumentedSequenceFileLogWriter.activateFailure = true;
595 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
596 hbaseDir, hlogDir, oldLogDir, fs);
597 logSplitter.splitLog();
598
599 } catch (IOException e) {
600 assertEquals("This exception is instrumented and should only be thrown for testing", e.getMessage());
601 throw e;
602 } finally {
603 InstrumentedSequenceFileLogWriter.activateFailure = false;
604 }
605 }
606
607
608
609
610
611
612 public void testSplittingLargeNumberOfRegionsConsistency() throws IOException {
613
614 regions.removeAll(regions);
615 for (int i=0; i<100; i++) {
616 regions.add("region__"+i);
617 }
618
619 generateHLogs(1, 100, -1);
620 fs.initialize(fs.getUri(), conf);
621
622 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
623 hbaseDir, hlogDir, oldLogDir, fs);
624 logSplitter.splitLog();
625 fs.rename(oldLogDir, hlogDir);
626 Path firstSplitPath = new Path(hbaseDir, Bytes.toString(TABLE_NAME) + ".first");
627 Path splitPath = new Path(hbaseDir, Bytes.toString(TABLE_NAME));
628 fs.rename(splitPath,
629 firstSplitPath);
630
631
632 fs.initialize(fs.getUri(), conf);
633 logSplitter = HLogSplitter.createLogSplitter(conf,
634 hbaseDir, hlogDir, oldLogDir, fs);
635 logSplitter.splitLog();
636
637 assertEquals(0, compareHLogSplitDirs(firstSplitPath, splitPath));
638 }
639
640 @Test
641 public void testSplitDeletedRegion() throws IOException {
642 regions.removeAll(regions);
643 String region = "region_that_splits";
644 regions.add(region);
645
646 generateHLogs(1);
647
648 fs.initialize(fs.getUri(), conf);
649
650 Path regiondir = new Path(tabledir, region);
651 fs.delete(regiondir, true);
652
653 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
654 hbaseDir, hlogDir, oldLogDir, fs);
655 logSplitter.splitLog();
656
657 assertFalse(fs.exists(regiondir));
658 }
659
660 @Test
661 public void testIOEOnOutputThread() throws Exception {
662 conf.setBoolean(HBASE_SKIP_ERRORS, false);
663
664 generateHLogs(-1);
665
666 fs.initialize(fs.getUri(), conf);
667
668 HLogSplitter logSplitter = new HLogSplitter(
669 conf, hbaseDir, hlogDir, oldLogDir, fs, null) {
670 protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
671 throws IOException {
672 HLog.Writer mockWriter = Mockito.mock(HLog.Writer.class);
673 Mockito.doThrow(new IOException("Injected")).when(mockWriter).append(Mockito.<HLog.Entry>any());
674 return mockWriter;
675 }
676 };
677 try {
678 logSplitter.splitLog();
679 fail("Didn't throw!");
680 } catch (IOException ioe) {
681 assertTrue(ioe.toString().contains("Injected"));
682 }
683 }
684
685
686 @Test
687 public void testMovedHLogDuringRecovery() throws Exception {
688 generateHLogs(-1);
689
690 fs.initialize(fs.getUri(), conf);
691
692
693
694 FileSystem spiedFs = Mockito.spy(fs);
695
696
697 Mockito.doThrow(new LeaseExpiredException("Injected: File does not exist")).
698 when(spiedFs).append(Mockito.<Path>any());
699
700 HLogSplitter logSplitter = new HLogSplitter(
701 conf, hbaseDir, hlogDir, oldLogDir, spiedFs, null);
702
703 try {
704 logSplitter.splitLog();
705 assertEquals(NUM_WRITERS, fs.listStatus(oldLogDir).length);
706 assertFalse(fs.exists(hlogDir));
707 } catch (IOException e) {
708 fail("There shouldn't be any exception but: " + e.toString());
709 }
710 }
711
712
713
714
715
716 @Test
717 public void testThreading() throws Exception {
718 doTestThreading(20000, 128*1024*1024, 0);
719 }
720
721
722
723
724
725 @Test
726 public void testThreadingSlowWriterSmallBuffer() throws Exception {
727 doTestThreading(200, 1024, 50);
728 }
729
730
731
732
733
734
735
736
737
738
739
740
741
742 private void doTestThreading(final int numFakeEdits,
743 final int bufferSize,
744 final int writerSlowness) throws Exception {
745
746 Configuration localConf = new Configuration(conf);
747 localConf.setInt("hbase.regionserver.hlog.splitlog.buffersize", bufferSize);
748
749
750 FSDataOutputStream out = fs.create(new Path(hlogDir, HLOG_FILE_PREFIX + ".fake"));
751 out.close();
752
753
754 final List<String> regions = ImmutableList.of("r0", "r1", "r2", "r3", "r4");
755 makeRegionDirs(fs, regions);
756
757
758 HLogSplitter logSplitter = new HLogSplitter(
759 localConf, hbaseDir, hlogDir, oldLogDir, fs, null) {
760
761
762 protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
763 throws IOException {
764 HLog.Writer mockWriter = Mockito.mock(HLog.Writer.class);
765 Mockito.doAnswer(new Answer<Void>() {
766 int expectedIndex = 0;
767
768 @Override
769 public Void answer(InvocationOnMock invocation) {
770 if (writerSlowness > 0) {
771 try {
772 Thread.sleep(writerSlowness);
773 } catch (InterruptedException ie) {
774 Thread.currentThread().interrupt();
775 }
776 }
777 HLog.Entry entry = (Entry) invocation.getArguments()[0];
778 WALEdit edit = entry.getEdit();
779 List<KeyValue> keyValues = edit.getKeyValues();
780 assertEquals(1, keyValues.size());
781 KeyValue kv = keyValues.get(0);
782
783
784 assertEquals(expectedIndex, Bytes.toInt(kv.getRow()));
785 expectedIndex++;
786 return null;
787 }
788 }).when(mockWriter).append(Mockito.<HLog.Entry>any());
789 return mockWriter;
790 }
791
792
793
794 protected Reader getReader(FileSystem fs, Path curLogFile, Configuration conf)
795 throws IOException {
796 Reader mockReader = Mockito.mock(Reader.class);
797 Mockito.doAnswer(new Answer<HLog.Entry>() {
798 int index = 0;
799
800 @Override
801 public HLog.Entry answer(InvocationOnMock invocation) throws Throwable {
802 if (index >= numFakeEdits) return null;
803
804
805 int regionIdx = index % regions.size();
806 byte region[] = new byte[] {(byte)'r', (byte) (0x30 + regionIdx)};
807
808 HLog.Entry ret = createTestEntry(TABLE_NAME, region,
809 Bytes.toBytes((int)(index / regions.size())),
810 FAMILY, QUALIFIER, VALUE, index);
811 index++;
812 return ret;
813 }
814 }).when(mockReader).next();
815 return mockReader;
816 }
817 };
818
819 logSplitter.splitLog();
820
821
822
823 Map<byte[], Long> outputCounts = logSplitter.getOutputCounts();
824 for (Map.Entry<byte[], Long> entry : outputCounts.entrySet()) {
825 LOG.info("Got " + entry.getValue() + " output edits for region " +
826 Bytes.toString(entry.getKey()));
827
828 assertEquals((long)entry.getValue(), numFakeEdits / regions.size());
829 }
830 assertEquals(regions.size(), outputCounts.size());
831 }
832
833
834
835
836 @Test
837 @Ignore("Need HADOOP-6886, HADOOP-6840, & HDFS-617 for this. HDFS 0.20.205.1+ should have this")
838 public void testLogRollAfterSplitStart() throws IOException {
839
840 final String F_INTERVAL = "hbase.regionserver.optionallogflushinterval";
841 long oldFlushInterval = conf.getLong(F_INTERVAL, 1000);
842 conf.setLong(F_INTERVAL, 1000*1000*100);
843 HLog log = null;
844 Path thisTestsDir = new Path(hbaseDir, "testLogRollAfterSplitStart");
845
846 try {
847
848 byte [] tableName = Bytes.toBytes(this.getClass().getName());
849 HRegionInfo regioninfo = new HRegionInfo(tableName,
850 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
851 log = new HLog(fs, thisTestsDir, oldLogDir, conf);
852 final int total = 20;
853 for (int i = 0; i < total; i++) {
854 WALEdit kvs = new WALEdit();
855 kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
856 HTableDescriptor htd = new HTableDescriptor(tableName);
857 htd.addFamily(new HColumnDescriptor("column"));
858 log.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd);
859 }
860
861 log.sync();
862 log.cleanupCurrentWriter(log.getFilenum());
863
864
865
866
867
868 Path rsSplitDir = new Path(thisTestsDir.getParent(),
869 thisTestsDir.getName() + "-splitting");
870 fs.rename(thisTestsDir, rsSplitDir);
871 LOG.debug("Renamed region directory: " + rsSplitDir);
872
873
874 HLogSplitter splitter = HLogSplitter.createLogSplitter(conf,
875 hbaseDir, rsSplitDir, oldLogDir, fs);
876 splitter.splitLog();
877
878
879 try {
880 log.rollWriter();
881 Assert.fail("rollWriter() did not throw any exception.");
882 } catch (IOException ioe) {
883 if (ioe.getCause().getMessage().contains("FileNotFound")) {
884 LOG.info("Got the expected exception: ", ioe.getCause());
885 } else {
886 Assert.fail("Unexpected exception: " + ioe);
887 }
888 }
889 } finally {
890 conf.setLong(F_INTERVAL, oldFlushInterval);
891 if (log != null) {
892 log.close();
893 }
894 if (fs.exists(thisTestsDir)) {
895 fs.delete(thisTestsDir, true);
896 }
897 }
898 }
899
900
901
902
903
904
905 class ZombieLastLogWriterRegionServer extends Thread {
906 AtomicLong editsCount;
907 AtomicBoolean stop;
908 Path log;
909 HLog.Writer lastLogWriter;
910 public ZombieLastLogWriterRegionServer(HLog.Writer writer, AtomicLong counter, AtomicBoolean stop) {
911 this.stop = stop;
912 this.editsCount = counter;
913 this.lastLogWriter = writer;
914 }
915
916 @Override
917 public void run() {
918 if (stop.get()){
919 return;
920 }
921 flushToConsole("starting");
922 while (true) {
923 try {
924 String region = "juliet";
925
926 fs.mkdirs(new Path(new Path(hbaseDir, region), region));
927 appendEntry(lastLogWriter, TABLE_NAME, region.getBytes(),
928 ("r" + editsCount).getBytes(), FAMILY, QUALIFIER, VALUE, 0);
929 lastLogWriter.sync();
930 editsCount.incrementAndGet();
931 try {
932 Thread.sleep(1);
933 } catch (InterruptedException e) {
934
935 }
936
937
938 } catch (IOException ex) {
939 if (ex instanceof RemoteException) {
940 flushToConsole("Juliet: got RemoteException " +
941 ex.getMessage() + " while writing " + (editsCount.get() + 1));
942 break;
943 } else {
944 assertTrue("Failed to write " + editsCount.get(), false);
945 }
946
947 }
948 }
949
950
951 }
952 }
953
954
955
956
957
958
959 class ZombieNewLogWriterRegionServer extends Thread {
960 AtomicBoolean stop;
961 public ZombieNewLogWriterRegionServer(AtomicBoolean stop) {
962 super("ZombieNewLogWriterRegionServer");
963 this.stop = stop;
964 }
965
966 @Override
967 public void run() {
968 if (stop.get()) {
969 return;
970 }
971 Path tableDir = new Path(hbaseDir, new String(TABLE_NAME));
972 Path regionDir = new Path(tableDir, regions.get(0));
973 Path recoveredEdits = new Path(regionDir, HLogSplitter.RECOVERED_EDITS);
974 String region = "juliet";
975 Path julietLog = new Path(hlogDir, HLOG_FILE_PREFIX + ".juliet");
976 try {
977
978 while (!fs.exists(recoveredEdits) && !stop.get()) {
979 flushToConsole("Juliet: split not started, sleeping a bit...");
980 Threads.sleep(10);
981 }
982
983 fs.mkdirs(new Path(tableDir, region));
984 HLog.Writer writer = HLog.createWriter(fs,
985 julietLog, conf);
986 appendEntry(writer, "juliet".getBytes(), ("juliet").getBytes(),
987 ("r").getBytes(), FAMILY, QUALIFIER, VALUE, 0);
988 writer.close();
989 flushToConsole("Juliet file creator: created file " + julietLog);
990 } catch (IOException e1) {
991 assertTrue("Failed to create file " + julietLog, false);
992 }
993 }
994 }
995
996 private CancelableProgressable reporter = new CancelableProgressable() {
997 int count = 0;
998
999 @Override
1000 public boolean progress() {
1001 count++;
1002 LOG.debug("progress = " + count);
1003 return true;
1004 }
1005 };
1006
1007 @Test
1008 public void testSplitLogFileWithOneRegion() throws IOException {
1009 LOG.info("testSplitLogFileWithOneRegion");
1010 final String REGION = "region__1";
1011 regions.removeAll(regions);
1012 regions.add(REGION);
1013
1014
1015 generateHLogs(1, 10, -1);
1016 FileStatus logfile = fs.listStatus(hlogDir)[0];
1017 fs.initialize(fs.getUri(), conf);
1018 HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter, null);
1019 HLogSplitter.finishSplitLogFile(hbaseDir, oldLogDir, logfile.getPath()
1020 .toString(), conf);
1021
1022
1023 Path originalLog = (fs.listStatus(oldLogDir))[0].getPath();
1024 Path splitLog = getLogForRegion(hbaseDir, TABLE_NAME, REGION);
1025
1026
1027 assertEquals(true, logsAreEqual(originalLog, splitLog));
1028 }
1029
1030 @Test
1031 public void testSplitLogFileDeletedRegionDir()
1032 throws IOException {
1033 LOG.info("testSplitLogFileDeletedRegionDir");
1034 final String REGION = "region__1";
1035 regions.removeAll(regions);
1036 regions.add(REGION);
1037
1038
1039 generateHLogs(1, 10, -1);
1040 FileStatus logfile = fs.listStatus(hlogDir)[0];
1041 fs.initialize(fs.getUri(), conf);
1042
1043 Path regiondir = new Path(tabledir, REGION);
1044 LOG.info("Region directory is" + regiondir);
1045 fs.delete(regiondir, true);
1046
1047 HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter, null);
1048 HLogSplitter.finishSplitLogFile(hbaseDir, oldLogDir, logfile.getPath()
1049 .toString(), conf);
1050
1051 assertTrue(!fs.exists(regiondir));
1052 assertTrue(true);
1053 }
1054
1055
1056
1057 @Test
1058 public void testSplitLogFileEmpty() throws IOException {
1059 LOG.info("testSplitLogFileEmpty");
1060 injectEmptyFile(".empty", true);
1061 FileStatus logfile = fs.listStatus(hlogDir)[0];
1062
1063 fs.initialize(fs.getUri(), conf);
1064
1065 HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter, null);
1066 HLogSplitter.finishSplitLogFile(hbaseDir, oldLogDir, logfile.getPath()
1067 .toString(), conf);
1068 Path tdir = HTableDescriptor.getTableDir(hbaseDir, TABLE_NAME);
1069 assertFalse(fs.exists(tdir));
1070
1071 assertEquals(0, countHLog(fs.listStatus(oldLogDir)[0].getPath(), fs, conf));
1072 }
1073
1074 @Test
1075 public void testSplitLogFileMultipleRegions() throws IOException {
1076 LOG.info("testSplitLogFileMultipleRegions");
1077 generateHLogs(1, 10, -1);
1078 FileStatus logfile = fs.listStatus(hlogDir)[0];
1079 fs.initialize(fs.getUri(), conf);
1080
1081 HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter, null);
1082 HLogSplitter.finishSplitLogFile(hbaseDir, oldLogDir, logfile.getPath()
1083 .toString(), conf);
1084 for (String region : regions) {
1085 Path recovered = getLogForRegion(hbaseDir, TABLE_NAME, region);
1086 assertEquals(10, countHLog(recovered, fs, conf));
1087 }
1088 }
1089
1090 @Test
1091 public void testSplitLogFileFirstLineCorruptionLog()
1092 throws IOException {
1093 conf.setBoolean(HBASE_SKIP_ERRORS, true);
1094 generateHLogs(1, 10, -1);
1095 FileStatus logfile = fs.listStatus(hlogDir)[0];
1096
1097 corruptHLog(logfile.getPath(),
1098 Corruptions.INSERT_GARBAGE_ON_FIRST_LINE, true, fs);
1099
1100 fs.initialize(fs.getUri(), conf);
1101 HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter, null);
1102 HLogSplitter.finishSplitLogFile(hbaseDir, oldLogDir, logfile.getPath()
1103 .toString(), conf);
1104
1105 final Path corruptDir = new Path(conf.get(HConstants.HBASE_DIR), conf.get(
1106 "hbase.regionserver.hlog.splitlog.corrupt.dir", ".corrupt"));
1107 assertEquals(1, fs.listStatus(corruptDir).length);
1108 }
1109
1110
1111
1112
1113
1114 @Test
1115 public void testConcurrentSplitLogAndReplayRecoverEdit() throws IOException {
1116 LOG.info("testConcurrentSplitLogAndReplayRecoverEdit");
1117
1118 String regionName = "r0";
1119 final Path regiondir = new Path(tabledir, regionName);
1120 regions = new ArrayList<String>();
1121 regions.add(regionName);
1122 generateHLogs(-1);
1123
1124 HLogSplitter logSplitter = new HLogSplitter(
1125 conf, hbaseDir, hlogDir, oldLogDir, fs, null) {
1126 protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
1127 throws IOException {
1128 HLog.Writer writer = HLog.createWriter(fs, logfile, conf);
1129
1130
1131
1132 NavigableSet<Path> files = HLog.getSplitEditFilesSorted(this.fs,
1133 regiondir);
1134 if (files != null && !files.isEmpty()) {
1135 for (Path file : files) {
1136 if (!this.fs.delete(file, false)) {
1137 LOG.error("Failed delete of " + file);
1138 } else {
1139 LOG.debug("Deleted recovered.edits file=" + file);
1140 }
1141 }
1142 }
1143 return writer;
1144 }
1145 };
1146 try{
1147 logSplitter.splitLog();
1148 } catch (IOException e) {
1149 LOG.info(e);
1150 Assert.fail("Throws IOException when spliting "
1151 + "log, it is most likely because writing file does not "
1152 + "exist which is caused by concurrent replayRecoveredEditsIfAny()");
1153 }
1154 if (fs.exists(corruptDir)) {
1155 if (fs.listStatus(corruptDir).length > 0) {
1156 Assert.fail("There are some corrupt logs, "
1157 + "it is most likely caused by concurrent replayRecoveredEditsIfAny()");
1158 }
1159 }
1160 }
1161
1162 private void flushToConsole(String s) {
1163 System.out.println(s);
1164 System.out.flush();
1165 }
1166
1167
1168 private void generateHLogs(int leaveOpen) throws IOException {
1169 generateHLogs(NUM_WRITERS, ENTRIES, leaveOpen);
1170 }
1171
1172 private void makeRegionDirs(FileSystem fs, List<String> regions) throws IOException {
1173 for (String region : regions) {
1174 flushToConsole("Creating dir for region " + region);
1175 fs.mkdirs(new Path(tabledir, region));
1176 }
1177 }
1178
1179 private void generateHLogs(int writers, int entries, int leaveOpen) throws IOException {
1180 makeRegionDirs(fs, regions);
1181 fs.mkdirs(hlogDir);
1182 for (int i = 0; i < writers; i++) {
1183 writer[i] = HLog.createWriter(fs, new Path(hlogDir, HLOG_FILE_PREFIX + i), conf);
1184 for (int j = 0; j < entries; j++) {
1185 int prefix = 0;
1186 for (String region : regions) {
1187 String row_key = region + prefix++ + i + j;
1188 appendEntry(writer[i], TABLE_NAME, region.getBytes(),
1189 row_key.getBytes(), FAMILY, QUALIFIER, VALUE, seq);
1190 }
1191 }
1192 if (i != leaveOpen) {
1193 writer[i].close();
1194 flushToConsole("Closing writer " + i);
1195 }
1196 }
1197 }
1198
1199 private Path getLogForRegion(Path rootdir, byte[] table, String region)
1200 throws IOException {
1201 Path tdir = HTableDescriptor.getTableDir(rootdir, table);
1202 Path editsdir = HLog.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir,
1203 Bytes.toString(region.getBytes())));
1204 FileStatus [] files = this.fs.listStatus(editsdir);
1205 assertEquals(1, files.length);
1206 return files[0].getPath();
1207 }
1208
1209 private void corruptHLog(Path path, Corruptions corruption, boolean close,
1210 FileSystem fs) throws IOException {
1211
1212 FSDataOutputStream out;
1213 int fileSize = (int) fs.listStatus(path)[0].getLen();
1214
1215 FSDataInputStream in = fs.open(path);
1216 byte[] corrupted_bytes = new byte[fileSize];
1217 in.readFully(0, corrupted_bytes, 0, fileSize);
1218 in.close();
1219
1220 switch (corruption) {
1221 case APPEND_GARBAGE:
1222 fs.delete(path, false);
1223 out = fs.create(path);
1224 out.write(corrupted_bytes);
1225 out.write("-----".getBytes());
1226 closeOrFlush(close, out);
1227 break;
1228
1229 case INSERT_GARBAGE_ON_FIRST_LINE:
1230 fs.delete(path, false);
1231 out = fs.create(path);
1232 out.write(0);
1233 out.write(corrupted_bytes);
1234 closeOrFlush(close, out);
1235 break;
1236
1237 case INSERT_GARBAGE_IN_THE_MIDDLE:
1238 fs.delete(path, false);
1239 out = fs.create(path);
1240 int middle = (int) Math.floor(corrupted_bytes.length / 2);
1241 out.write(corrupted_bytes, 0, middle);
1242 out.write(0);
1243 out.write(corrupted_bytes, middle, corrupted_bytes.length - middle);
1244 closeOrFlush(close, out);
1245 break;
1246
1247 case TRUNCATE:
1248 fs.delete(path, false);
1249 out = fs.create(path);
1250 out.write(corrupted_bytes, 0, fileSize-32);
1251 closeOrFlush(close, out);
1252
1253 break;
1254 }
1255
1256
1257 }
1258
1259 private void closeOrFlush(boolean close, FSDataOutputStream out)
1260 throws IOException {
1261 if (close) {
1262 out.close();
1263 } else {
1264 Method syncMethod = null;
1265 try {
1266 syncMethod = out.getClass().getMethod("hflush", new Class<?> []{});
1267 } catch (NoSuchMethodException e) {
1268 try {
1269 syncMethod = out.getClass().getMethod("sync", new Class<?> []{});
1270 } catch (NoSuchMethodException ex) {
1271 throw new IOException("This version of Hadoop supports " +
1272 "neither Syncable.sync() nor Syncable.hflush().");
1273 }
1274 }
1275 try {
1276 syncMethod.invoke(out, new Object[]{});
1277 } catch (Exception e) {
1278 throw new IOException(e);
1279 }
1280
1281 }
1282 }
1283
1284 @SuppressWarnings("unused")
1285 private void dumpHLog(Path log, FileSystem fs, Configuration conf) throws IOException {
1286 HLog.Entry entry;
1287 HLog.Reader in = HLog.getReader(fs, log, conf);
1288 while ((entry = in.next()) != null) {
1289 System.out.println(entry);
1290 }
1291 }
1292
1293 private int countHLog(Path log, FileSystem fs, Configuration conf) throws IOException {
1294 int count = 0;
1295 HLog.Reader in = HLog.getReader(fs, log, conf);
1296 while (in.next() != null) {
1297 count++;
1298 }
1299 return count;
1300 }
1301
1302
1303 public long appendEntry(HLog.Writer writer, byte[] table, byte[] region,
1304 byte[] row, byte[] family, byte[] qualifier,
1305 byte[] value, long seq)
1306 throws IOException {
1307
1308 writer.append(createTestEntry(table, region, row, family, qualifier, value, seq));
1309 writer.sync();
1310 return seq;
1311 }
1312
1313 private HLog.Entry createTestEntry(
1314 byte[] table, byte[] region,
1315 byte[] row, byte[] family, byte[] qualifier,
1316 byte[] value, long seq) {
1317 long time = System.nanoTime();
1318 WALEdit edit = new WALEdit();
1319 seq++;
1320 edit.add(new KeyValue(row, family, qualifier, time, KeyValue.Type.Put, value));
1321 return new HLog.Entry(new HLogKey(region, table, seq, time,
1322 HConstants.DEFAULT_CLUSTER_ID), edit);
1323 }
1324
1325
1326 private void injectEmptyFile(String suffix, boolean closeFile)
1327 throws IOException {
1328 HLog.Writer writer = HLog.createWriter(
1329 fs, new Path(hlogDir, HLOG_FILE_PREFIX + suffix), conf);
1330 if (closeFile) writer.close();
1331 }
1332
1333 @SuppressWarnings("unused")
1334 private void listLogs(FileSystem fs, Path dir) throws IOException {
1335 for (FileStatus file : fs.listStatus(dir)) {
1336 System.out.println(file.getPath());
1337 }
1338
1339 }
1340
1341 private int compareHLogSplitDirs(Path p1, Path p2) throws IOException {
1342 FileStatus[] f1 = fs.listStatus(p1);
1343 FileStatus[] f2 = fs.listStatus(p2);
1344 assertNotNull("Path " + p1 + " doesn't exist", f1);
1345 assertNotNull("Path " + p2 + " doesn't exist", f2);
1346
1347 System.out.println("Files in " + p1 + ": " +
1348 Joiner.on(",").join(FileUtil.stat2Paths(f1)));
1349 System.out.println("Files in " + p2 + ": " +
1350 Joiner.on(",").join(FileUtil.stat2Paths(f2)));
1351 assertEquals(f1.length, f2.length);
1352
1353 for (int i = 0; i < f1.length; i++) {
1354
1355
1356 Path rd1 = HLog.getRegionDirRecoveredEditsDir(f1[i].getPath());
1357 FileStatus[] rd1fs = fs.listStatus(rd1);
1358 assertEquals(1, rd1fs.length);
1359 Path rd2 = HLog.getRegionDirRecoveredEditsDir(f2[i].getPath());
1360 FileStatus[] rd2fs = fs.listStatus(rd2);
1361 assertEquals(1, rd2fs.length);
1362 if (!logsAreEqual(rd1fs[0].getPath(), rd2fs[0].getPath())) {
1363 return -1;
1364 }
1365 }
1366 return 0;
1367 }
1368
1369 private boolean logsAreEqual(Path p1, Path p2) throws IOException {
1370 HLog.Reader in1, in2;
1371 in1 = HLog.getReader(fs, p1, conf);
1372 in2 = HLog.getReader(fs, p2, conf);
1373 HLog.Entry entry1;
1374 HLog.Entry entry2;
1375 while ((entry1 = in1.next()) != null) {
1376 entry2 = in2.next();
1377 if ((entry1.getKey().compareTo(entry2.getKey()) != 0) ||
1378 (!entry1.getEdit().toString().equals(entry2.getEdit().toString()))) {
1379 return false;
1380 }
1381 }
1382 return true;
1383 }
1384
1385 @org.junit.Rule
1386 public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
1387 new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
1388 }
1389