View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver;
19  
20  import java.io.IOException;
21  import java.util.ArrayList;
22  import java.util.Arrays;
23  import java.util.List;
24  
25  import junit.framework.TestCase;
26  
27  import org.apache.commons.logging.Log;
28  import org.apache.commons.logging.LogFactory;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.fs.FileSystem;
31  import org.apache.hadoop.fs.Path;
32  import org.apache.hadoop.hbase.HBaseTestingUtility;
33  import org.apache.hadoop.hbase.HColumnDescriptor;
34  import org.apache.hadoop.hbase.HConstants;
35  import org.apache.hadoop.hbase.HRegionInfo;
36  import org.apache.hadoop.hbase.HTableDescriptor;
37  import org.apache.hadoop.hbase.testclassification.SmallTests;
38  import org.apache.hadoop.hbase.TableName;
39  import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
40  import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
41  import org.junit.Assert;
42  import org.junit.Test;
43  import org.apache.hadoop.hbase.wal.DefaultWALProvider;
44  import org.apache.hadoop.hbase.wal.WALFactory;
45  import org.apache.hadoop.hbase.util.Bytes;
46  import org.apache.hadoop.hbase.util.FSUtils;
47  import org.junit.After;
48  import org.junit.experimental.categories.Category;
49  
50  import com.google.common.collect.Lists;
51  
52  @Category(SmallTests.class)
53  public class TestDefaultCompactSelection extends TestCompactionPolicy {
54    private final static Log LOG = LogFactory.getLog(TestDefaultCompactSelection.class);
55  
56    @Test
57    public void testCompactionRatio() throws IOException {
58      /**
59       * NOTE: these tests are specific to describe the implementation of the
60       * current compaction algorithm.  Developed to ensure that refactoring
61       * doesn't implicitly alter this.
62       */
63      long tooBig = maxSize + 1;
64  
65      // default case. preserve user ratio on size
66      compactEquals(sfCreate(100,50,23,12,12), 23, 12, 12);
67      // less than compact threshold = don't compact
68      compactEquals(sfCreate(100,50,25,12,12) /* empty */);
69      // greater than compact size = skip those
70      compactEquals(sfCreate(tooBig, tooBig, 700, 700, 700), 700, 700, 700);
71      // big size + threshold
72      compactEquals(sfCreate(tooBig, tooBig, 700,700) /* empty */);
73      // small files = don't care about ratio
74      compactEquals(sfCreate(7,1,1), 7,1,1);
75  
76      // don't exceed max file compact threshold
77      // note:  file selection starts with largest to smallest.
78      compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1);
79  
80      compactEquals(sfCreate(50, 10, 10 ,10, 10), 10, 10, 10, 10);
81  
82      compactEquals(sfCreate(10, 10, 10, 10, 50), 10, 10, 10, 10);
83  
84      compactEquals(sfCreate(251, 253, 251, maxSize -1), 251, 253, 251);
85  
86      compactEquals(sfCreate(maxSize -1,maxSize -1,maxSize -1) /* empty */);
87  
88      // Always try and compact something to get below blocking storefile count
89      this.conf.setLong("hbase.hstore.compaction.min.size", 1);
90      store.storeEngine.getCompactionPolicy().setConf(conf);
91      compactEquals(sfCreate(512,256,128,64,32,16,8,4,2,1), 4,2,1);
92      this.conf.setLong("hbase.hstore.compaction.min.size", minSize);
93      store.storeEngine.getCompactionPolicy().setConf(conf);
94  
95      /* MAJOR COMPACTION */
96      // if a major compaction has been forced, then compact everything
97      compactEquals(sfCreate(50,25,12,12), true, 50, 25, 12, 12);
98      // also choose files < threshold on major compaction
99      compactEquals(sfCreate(12,12), true, 12, 12);
100     // even if one of those files is too big
101     compactEquals(sfCreate(tooBig, 12,12), true, tooBig, 12, 12);
102     // don't exceed max file compact threshold, even with major compaction
103     store.forceMajor = true;
104     compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1);
105     store.forceMajor = false;
106     // if we exceed maxCompactSize, downgrade to minor
107     // if not, it creates a 'snowball effect' when files >> maxCompactSize:
108     // the last file in compaction is the aggregate of all previous compactions
109     compactEquals(sfCreate(100,50,23,12,12), true, 23, 12, 12);
110     conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1);
111     conf.setFloat("hbase.hregion.majorcompaction.jitter", 0);
112     store.storeEngine.getCompactionPolicy().setConf(conf);
113     try {
114       // trigger an aged major compaction
115       compactEquals(sfCreate(50,25,12,12), 50, 25, 12, 12);
116       // major sure exceeding maxCompactSize also downgrades aged minors
117       compactEquals(sfCreate(100,50,23,12,12), 23, 12, 12);
118     } finally {
119       conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
120       conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
121     }
122 
123     /* REFERENCES == file is from a region that was split */
124     // treat storefiles that have references like a major compaction
125     compactEquals(sfCreate(true, 100,50,25,12,12), 100, 50, 25, 12, 12);
126     // reference files shouldn't obey max threshold
127     compactEquals(sfCreate(true, tooBig, 12,12), tooBig, 12, 12);
128     // reference files should obey max file compact to avoid OOM
129     compactEquals(sfCreate(true, 7, 6, 5, 4, 3, 2, 1), 7, 6, 5, 4, 3);
130 
131     // empty case
132     compactEquals(new ArrayList<StoreFile>() /* empty */);
133     // empty case (because all files are too big)
134     compactEquals(sfCreate(tooBig, tooBig) /* empty */);
135   }
136 
137   @Test
138   public void testOffPeakCompactionRatio() throws IOException {
139     /*
140      * NOTE: these tests are specific to describe the implementation of the
141      * current compaction algorithm.  Developed to ensure that refactoring
142      * doesn't implicitly alter this.
143      */
144     // set an off-peak compaction threshold
145     this.conf.setFloat("hbase.hstore.compaction.ratio.offpeak", 5.0F);
146     store.storeEngine.getCompactionPolicy().setConf(this.conf);
147     // Test with and without the flag.
148     compactEquals(sfCreate(999, 50, 12, 12, 1), false, true, 50, 12, 12, 1);
149     compactEquals(sfCreate(999, 50, 12, 12, 1), 12, 12, 1);
150   }
151 
152   @Test
153   public void testStuckStoreCompaction() throws IOException {
154     // Select the smallest compaction if the store is stuck.
155     compactEquals(sfCreate(99,99,99,99,99,99, 30,30,30,30), 30, 30, 30);
156     // If not stuck, standard policy applies.
157     compactEquals(sfCreate(99,99,99,99,99, 30,30,30,30), 99, 30, 30, 30, 30);
158 
159     // Add sufficiently small files to compaction, though
160     compactEquals(sfCreate(99,99,99,99,99,99, 30,30,30,15), 30, 30, 30, 15);
161     // Prefer earlier compaction to latter if the benefit is not significant
162     compactEquals(sfCreate(99,99,99,99, 30,26,26,29,25,25), 30, 26, 26);
163     // Prefer later compaction if the benefit is significant.
164     compactEquals(sfCreate(99,99,99,99, 27,27,27,20,20,20), 20, 20, 20);
165   }
166 
167   @Test
168   public void testCompactionEmptyHFile() throws IOException {
169     // Set TTL
170     ScanInfo oldScanInfo = store.getScanInfo();
171     ScanInfo newScanInfo = new ScanInfo(oldScanInfo.getFamily(),
172         oldScanInfo.getMinVersions(), oldScanInfo.getMaxVersions(), 600,
173         oldScanInfo.getKeepDeletedCells(), oldScanInfo.getTimeToPurgeDeletes(),
174         oldScanInfo.getComparator());
175     store.setScanInfo(newScanInfo);
176     // Do not compact empty store file
177     List<StoreFile> candidates = sfCreate(0);
178     for (StoreFile file : candidates) {
179       if (file instanceof MockStoreFile) {
180         MockStoreFile mockFile = (MockStoreFile) file;
181         mockFile.setTimeRangeTracker(new TimeRangeTracker(-1, -1));
182         mockFile.setEntries(0);
183       }
184     }
185     // Test Default compactions
186     CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine
187         .getCompactionPolicy()).selectCompaction(candidates,
188         new ArrayList<StoreFile>(), false, false, false);
189     Assert.assertTrue(result.getFiles().size() == 0);
190     store.setScanInfo(oldScanInfo);
191   }
192 }