View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.FileNotFoundException;
22  import java.io.IOException;
23  import java.util.Comparator;
24  import java.util.HashSet;
25  import java.util.Map;
26  import java.util.TreeMap;
27  import java.util.concurrent.atomic.AtomicBoolean;
28  import java.util.concurrent.atomic.AtomicInteger;
29  
30  import org.apache.commons.logging.Log;
31  import org.apache.commons.logging.LogFactory;
32  import org.apache.hadoop.fs.FileSystem;
33  import org.apache.hadoop.fs.Path;
34  import org.apache.hadoop.hbase.HColumnDescriptor;
35  import org.apache.hadoop.hbase.HConstants;
36  import org.apache.hadoop.hbase.HRegionInfo;
37  import org.apache.hadoop.hbase.HTableDescriptor;
38  import org.apache.hadoop.hbase.MetaTableAccessor;
39  import org.apache.hadoop.hbase.ScheduledChore;
40  import org.apache.hadoop.hbase.Server;
41  import org.apache.hadoop.hbase.TableName;
42  import org.apache.hadoop.hbase.backup.HFileArchiver;
43  import org.apache.hadoop.hbase.classification.InterfaceAudience;
44  import org.apache.hadoop.hbase.client.Connection;
45  import org.apache.hadoop.hbase.client.MetaScanner;
46  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
47  import org.apache.hadoop.hbase.client.Result;
48  import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
49  import org.apache.hadoop.hbase.util.Bytes;
50  import org.apache.hadoop.hbase.util.FSUtils;
51  import org.apache.hadoop.hbase.util.Pair;
52  import org.apache.hadoop.hbase.util.PairOfSameType;
53  import org.apache.hadoop.hbase.util.Threads;
54  import org.apache.hadoop.hbase.util.Triple;
55  
56  /**
57   * A janitor for the catalog tables.  Scans the <code>hbase:meta</code> catalog
58   * table on a period looking for unused regions to garbage collect.
59   */
60  @InterfaceAudience.Private
61  public class CatalogJanitor extends ScheduledChore {
62    private static final Log LOG = LogFactory.getLog(CatalogJanitor.class.getName());
63    private final Server server;
64    private final MasterServices services;
65    private AtomicBoolean enabled = new AtomicBoolean(true);
66    private AtomicBoolean alreadyRunning = new AtomicBoolean(false);
67    private final Connection connection;
68  
69    CatalogJanitor(final Server server, final MasterServices services) {
70      super("CatalogJanitor-" + server.getServerName().toShortString(), server, server
71          .getConfiguration().getInt("hbase.catalogjanitor.interval", 300000));
72      this.server = server;
73      this.services = services;
74      this.connection = server.getConnection();
75    }
76  
77    @Override
78    protected boolean initialChore() {
79      try {
80        if (this.enabled.get()) scan();
81      } catch (IOException e) {
82        LOG.warn("Failed initial scan of catalog table", e);
83        return false;
84      }
85      return true;
86    }
87  
88    /**
89     * @param enabled
90     */
91    public boolean setEnabled(final boolean enabled) {
92      boolean alreadyEnabled = this.enabled.getAndSet(enabled);
93      // If disabling is requested on an already enabled chore, we could have an active
94      // scan still going on, callers might not be aware of that and do further action thinkng
95      // that no action would be from this chore.  In this case, the right action is to wait for
96      // the active scan to complete before exiting this function.
97      if (!enabled && alreadyEnabled) {
98        while (alreadyRunning.get()) {
99          Threads.sleepWithoutInterrupt(100);
100       }
101     }
102     return alreadyEnabled;
103   }
104 
105   boolean getEnabled() {
106     return this.enabled.get();
107   }
108 
109   @Override
110   protected void chore() {
111     try {
112       if (this.enabled.get()) {
113         scan();
114       } else {
115         LOG.warn("CatalogJanitor disabled! Not running scan.");
116       }
117     } catch (IOException e) {
118       LOG.warn("Failed scan of catalog table", e);
119     }
120   }
121 
122   /**
123    * Scans hbase:meta and returns a number of scanned rows, and a map of merged
124    * regions, and an ordered map of split parents.
125    * @return triple of scanned rows, map of merged regions and map of split
126    *         parent regioninfos
127    * @throws IOException
128    */
129   Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>> getMergedRegionsAndSplitParents()
130       throws IOException {
131     return getMergedRegionsAndSplitParents(null);
132   }
133 
134   /**
135    * Scans hbase:meta and returns a number of scanned rows, and a map of merged
136    * regions, and an ordered map of split parents. if the given table name is
137    * null, return merged regions and split parents of all tables, else only the
138    * specified table
139    * @param tableName null represents all tables
140    * @return triple of scanned rows, and map of merged regions, and map of split
141    *         parent regioninfos
142    * @throws IOException
143    */
144   Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>> getMergedRegionsAndSplitParents(
145       final TableName tableName) throws IOException {
146     final boolean isTableSpecified = (tableName != null);
147     // TODO: Only works with single hbase:meta region currently.  Fix.
148     final AtomicInteger count = new AtomicInteger(0);
149     // Keep Map of found split parents.  There are candidates for cleanup.
150     // Use a comparator that has split parents come before its daughters.
151     final Map<HRegionInfo, Result> splitParents =
152       new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
153     final Map<HRegionInfo, Result> mergedRegions = new TreeMap<HRegionInfo, Result>();
154     // This visitor collects split parents and counts rows in the hbase:meta table
155 
156     MetaScannerVisitor visitor = new MetaScanner.MetaScannerVisitorBase() {
157       @Override
158       public boolean processRow(Result r) throws IOException {
159         if (r == null || r.isEmpty()) return true;
160         count.incrementAndGet();
161         HRegionInfo info = HRegionInfo.getHRegionInfo(r);
162         if (info == null) return true; // Keep scanning
163         if (isTableSpecified
164             && info.getTable().compareTo(tableName) > 0) {
165           // Another table, stop scanning
166           return false;
167         }
168         if (info.isSplitParent()) splitParents.put(info, r);
169         if (r.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER) != null) {
170           mergedRegions.put(info, r);
171         }
172         // Returning true means "keep scanning"
173         return true;
174       }
175     };
176 
177     // Run full scan of hbase:meta catalog table passing in our custom visitor with
178     // the start row
179     MetaScanner.metaScan(this.connection, visitor, tableName);
180 
181     return new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(
182         count.get(), mergedRegions, splitParents);
183   }
184 
185   /**
186    * If merged region no longer holds reference to the merge regions, archive
187    * merge region on hdfs and perform deleting references in hbase:meta
188    * @param mergedRegion
189    * @param regionA
190    * @param regionB
191    * @return true if we delete references in merged region on hbase:meta and archive
192    *         the files on the file system
193    * @throws IOException
194    */
195   boolean cleanMergeRegion(final HRegionInfo mergedRegion,
196       final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
197     FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
198     Path rootdir = this.services.getMasterFileSystem().getRootDir();
199     Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
200     HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
201     HRegionFileSystem regionFs = null;
202     try {
203       regionFs = HRegionFileSystem.openRegionFromFileSystem(
204           this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
205     } catch (IOException e) {
206       LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
207     }
208     if (regionFs == null || !regionFs.hasReferences(htd)) {
209       LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
210           + regionB.getRegionNameAsString()
211           + " from fs because merged region no longer holds references");
212       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
213       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
214       MetaTableAccessor.deleteMergeQualifiers(server.getConnection(),
215         mergedRegion);
216       return true;
217     }
218     return false;
219   }
220 
221   /**
222    * Run janitorial scan of catalog <code>hbase:meta</code> table looking for
223    * garbage to collect.
224    * @return number of cleaned regions
225    * @throws IOException
226    */
227   int scan() throws IOException {
228     try {
229       if (!alreadyRunning.compareAndSet(false, true)) {
230         return 0;
231       }
232       Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>> scanTriple =
233         getMergedRegionsAndSplitParents();
234       int count = scanTriple.getFirst();
235       /**
236        * clean merge regions first
237        */
238       int mergeCleaned = 0;
239       Map<HRegionInfo, Result> mergedRegions = scanTriple.getSecond();
240       for (Map.Entry<HRegionInfo, Result> e : mergedRegions.entrySet()) {
241         HRegionInfo regionA = HRegionInfo.getHRegionInfo(e.getValue(),
242             HConstants.MERGEA_QUALIFIER);
243         HRegionInfo regionB = HRegionInfo.getHRegionInfo(e.getValue(),
244             HConstants.MERGEB_QUALIFIER);
245         if (regionA == null || regionB == null) {
246           LOG.warn("Unexpected references regionA="
247               + (regionA == null ? "null" : regionA.getRegionNameAsString())
248               + ",regionB="
249               + (regionB == null ? "null" : regionB.getRegionNameAsString())
250               + " in merged region " + e.getKey().getRegionNameAsString());
251         } else {
252           if (cleanMergeRegion(e.getKey(), regionA, regionB)) {
253             mergeCleaned++;
254           }
255         }
256       }
257       /**
258        * clean split parents
259        */
260       Map<HRegionInfo, Result> splitParents = scanTriple.getThird();
261 
262       // Now work on our list of found parents. See if any we can clean up.
263       int splitCleaned = 0;
264       // regions whose parents are still around
265       HashSet<String> parentNotCleaned = new HashSet<String>();
266       for (Map.Entry<HRegionInfo, Result> e : splitParents.entrySet()) {
267         if (!parentNotCleaned.contains(e.getKey().getEncodedName()) &&
268             cleanParent(e.getKey(), e.getValue())) {
269           splitCleaned++;
270         } else {
271           // We could not clean the parent, so it's daughters should not be cleaned either (HBASE-6160)
272           PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(e.getValue());
273           parentNotCleaned.add(daughters.getFirst().getEncodedName());
274           parentNotCleaned.add(daughters.getSecond().getEncodedName());
275         }
276       }
277       if ((mergeCleaned + splitCleaned) != 0) {
278         LOG.info("Scanned " + count + " catalog row(s), gc'd " + mergeCleaned
279             + " unreferenced merged region(s) and " + splitCleaned
280             + " unreferenced parent region(s)");
281       } else if (LOG.isTraceEnabled()) {
282         LOG.trace("Scanned " + count + " catalog row(s), gc'd " + mergeCleaned
283             + " unreferenced merged region(s) and " + splitCleaned
284             + " unreferenced parent region(s)");
285       }
286       return mergeCleaned + splitCleaned;
287     } finally {
288       alreadyRunning.set(false);
289     }
290   }
291 
292   /**
293    * Compare HRegionInfos in a way that has split parents sort BEFORE their
294    * daughters.
295    */
296   static class SplitParentFirstComparator implements Comparator<HRegionInfo> {
297     Comparator<byte[]> rowEndKeyComparator = new Bytes.RowEndKeyComparator();
298     @Override
299     public int compare(HRegionInfo left, HRegionInfo right) {
300       // This comparator differs from the one HRegionInfo in that it sorts
301       // parent before daughters.
302       if (left == null) return -1;
303       if (right == null) return 1;
304       // Same table name.
305       int result = left.getTable().compareTo(right.getTable());
306       if (result != 0) return result;
307       // Compare start keys.
308       result = Bytes.compareTo(left.getStartKey(), right.getStartKey());
309       if (result != 0) return result;
310       // Compare end keys, but flip the operands so parent comes first
311       result = rowEndKeyComparator.compare(right.getEndKey(), left.getEndKey());
312 
313       return result;
314     }
315   }
316 
317   /**
318    * If daughters no longer hold reference to the parents, delete the parent.
319    * @param parent HRegionInfo of split offlined parent
320    * @param rowContent Content of <code>parent</code> row in
321    * <code>metaRegionName</code>
322    * @return True if we removed <code>parent</code> from meta table and from
323    * the filesystem.
324    * @throws IOException
325    */
326   boolean cleanParent(final HRegionInfo parent, Result rowContent)
327   throws IOException {
328     boolean result = false;
329     // Check whether it is a merged region and not clean reference
330     // No necessary to check MERGEB_QUALIFIER because these two qualifiers will
331     // be inserted/deleted together
332     if (rowContent.getValue(HConstants.CATALOG_FAMILY,
333         HConstants.MERGEA_QUALIFIER) != null) {
334       // wait cleaning merge region first
335       return result;
336     }
337     // Run checks on each daughter split.
338     PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowContent);
339     Pair<Boolean, Boolean> a = checkDaughterInFs(parent, daughters.getFirst());
340     Pair<Boolean, Boolean> b = checkDaughterInFs(parent, daughters.getSecond());
341     if (hasNoReferences(a) && hasNoReferences(b)) {
342       LOG.debug("Deleting region " + parent.getRegionNameAsString() +
343         " because daughter splits no longer hold references");
344       FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
345       if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent);
346       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
347       MetaTableAccessor.deleteRegion(this.connection, parent);
348       result = true;
349     }
350     return result;
351   }
352 
353   /**
354    * @param p A pair where the first boolean says whether or not the daughter
355    * region directory exists in the filesystem and then the second boolean says
356    * whether the daughter has references to the parent.
357    * @return True the passed <code>p</code> signifies no references.
358    */
359   private boolean hasNoReferences(final Pair<Boolean, Boolean> p) {
360     return !p.getFirst() || !p.getSecond();
361   }
362 
363   /**
364    * Checks if a daughter region -- either splitA or splitB -- still holds
365    * references to parent.
366    * @param parent Parent region
367    * @param daughter Daughter region
368    * @return A pair where the first boolean says whether or not the daughter
369    * region directory exists in the filesystem and then the second boolean says
370    * whether the daughter has references to the parent.
371    * @throws IOException
372    */
373   Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter)
374   throws IOException {
375     if (daughter == null)  {
376       return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
377     }
378 
379     FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
380     Path rootdir = this.services.getMasterFileSystem().getRootDir();
381     Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable());
382 
383     Path daughterRegionDir = new Path(tabledir, daughter.getEncodedName());
384 
385     HRegionFileSystem regionFs = null;
386 
387     try {
388       if (!FSUtils.isExists(fs, daughterRegionDir)) {
389         return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
390       }
391     } catch (IOException ioe) {
392       LOG.warn("Error trying to determine if daughter region exists, " +
393                "assuming exists and has references", ioe);
394       return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.TRUE);
395     }
396 
397     try {
398       regionFs = HRegionFileSystem.openRegionFromFileSystem(
399           this.services.getConfiguration(), fs, tabledir, daughter, true);
400     } catch (IOException e) {
401       LOG.warn("Error trying to determine referenced files from : " + daughter.getEncodedName()
402           + ", to: " + parent.getEncodedName() + " assuming has references", e);
403       return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.TRUE);
404     }
405 
406     boolean references = false;
407     HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
408     for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
409       if ((references = regionFs.hasReferences(family.getNameAsString()))) {
410         break;
411       }
412     }
413     return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
414   }
415 
416   private HTableDescriptor getTableDescriptor(final TableName tableName)
417       throws FileNotFoundException, IOException {
418     return this.services.getTableDescriptors().get(tableName);
419   }
420 
421   /**
422    * Checks if the specified region has merge qualifiers, if so, try to clean
423    * them
424    * @param region
425    * @return true if the specified region doesn't have merge qualifier now
426    * @throws IOException
427    */
428   public boolean cleanMergeQualifier(final HRegionInfo region)
429       throws IOException {
430     // Get merge regions if it is a merged region and already has merge
431     // qualifier
432     Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaTableAccessor
433         .getRegionsFromMergeQualifier(this.services.getConnection(),
434           region.getRegionName());
435     if (mergeRegions == null
436         || (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) {
437       // It doesn't have merge qualifier, no need to clean
438       return true;
439     }
440     // It shouldn't happen, we must insert/delete these two qualifiers together
441     if (mergeRegions.getFirst() == null || mergeRegions.getSecond() == null) {
442       LOG.error("Merged region " + region.getRegionNameAsString()
443           + " has only one merge qualifier in META.");
444       return false;
445     }
446     return cleanMergeRegion(region, mergeRegions.getFirst(),
447         mergeRegions.getSecond());
448   }
449 }