1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.util;
20
21 import java.io.ByteArrayInputStream;
22 import java.io.DataInputStream;
23 import java.io.EOFException;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.io.InputStream;
27 import java.io.InterruptedIOException;
28 import java.lang.reflect.InvocationTargetException;
29 import java.lang.reflect.Method;
30 import java.net.InetSocketAddress;
31 import java.net.URI;
32 import java.net.URISyntaxException;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collections;
36 import java.util.HashMap;
37 import java.util.Iterator;
38 import java.util.LinkedList;
39 import java.util.List;
40 import java.util.Map;
41 import java.util.Vector;
42 import java.util.concurrent.ArrayBlockingQueue;
43 import java.util.concurrent.ConcurrentHashMap;
44 import java.util.concurrent.ExecutionException;
45 import java.util.concurrent.ExecutorService;
46 import java.util.concurrent.Future;
47 import java.util.concurrent.FutureTask;
48 import java.util.concurrent.ThreadPoolExecutor;
49 import java.util.concurrent.TimeUnit;
50 import java.util.regex.Pattern;
51
52 import org.apache.commons.logging.Log;
53 import org.apache.commons.logging.LogFactory;
54 import org.apache.hadoop.HadoopIllegalArgumentException;
55 import org.apache.hadoop.conf.Configuration;
56 import org.apache.hadoop.fs.BlockLocation;
57 import org.apache.hadoop.fs.FSDataInputStream;
58 import org.apache.hadoop.fs.FSDataOutputStream;
59 import org.apache.hadoop.fs.FileStatus;
60 import org.apache.hadoop.fs.FileSystem;
61 import org.apache.hadoop.fs.Path;
62 import org.apache.hadoop.fs.PathFilter;
63 import org.apache.hadoop.fs.permission.FsAction;
64 import org.apache.hadoop.fs.permission.FsPermission;
65 import org.apache.hadoop.hbase.ClusterId;
66 import org.apache.hadoop.hbase.HColumnDescriptor;
67 import org.apache.hadoop.hbase.HConstants;
68 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
69 import org.apache.hadoop.hbase.HRegionInfo;
70 import org.apache.hadoop.hbase.RemoteExceptionHandler;
71 import org.apache.hadoop.hbase.TableName;
72 import org.apache.hadoop.hbase.classification.InterfaceAudience;
73 import org.apache.hadoop.hbase.exceptions.DeserializationException;
74 import org.apache.hadoop.hbase.fs.HFileSystem;
75 import org.apache.hadoop.hbase.master.HMaster;
76 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
77 import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
78 import org.apache.hadoop.hbase.regionserver.HRegion;
79 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
80 import org.apache.hadoop.hbase.security.AccessDeniedException;
81 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
82 import org.apache.hadoop.hdfs.DistributedFileSystem;
83 import org.apache.hadoop.hdfs.protocol.FSConstants;
84 import org.apache.hadoop.io.IOUtils;
85 import org.apache.hadoop.io.SequenceFile;
86 import org.apache.hadoop.ipc.RemoteException;
87 import org.apache.hadoop.security.UserGroupInformation;
88 import org.apache.hadoop.util.Progressable;
89 import org.apache.hadoop.util.ReflectionUtils;
90 import org.apache.hadoop.util.StringUtils;
91
92 import com.google.common.base.Throwables;
93 import com.google.common.collect.Iterators;
94 import com.google.common.primitives.Ints;
95
96 import edu.umd.cs.findbugs.annotations.CheckForNull;
97
98
99
100
101 @InterfaceAudience.Private
102 public abstract class FSUtils {
103 private static final Log LOG = LogFactory.getLog(FSUtils.class);
104
105
106 public static final String FULL_RWX_PERMISSIONS = "777";
107 private static final String THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize";
108 private static final int DEFAULT_THREAD_POOLSIZE = 2;
109
110
111 public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
112
113 protected FSUtils() {
114 super();
115 }
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133 public static void setStoragePolicy(final FileSystem fs, final Configuration conf,
134 final Path path, final String policyKey, final String defaultPolicy) {
135 String storagePolicy = conf.get(policyKey, defaultPolicy).toUpperCase();
136 if (storagePolicy.equals(defaultPolicy)) {
137 if (LOG.isTraceEnabled()) {
138 LOG.trace("default policy of " + defaultPolicy + " requested, exiting early.");
139 }
140 return;
141 }
142 if (fs instanceof DistributedFileSystem) {
143 DistributedFileSystem dfs = (DistributedFileSystem)fs;
144
145 Class<? extends DistributedFileSystem> dfsClass = dfs.getClass();
146 Method m = null;
147 try {
148 m = dfsClass.getDeclaredMethod("setStoragePolicy",
149 new Class<?>[] { Path.class, String.class });
150 m.setAccessible(true);
151 } catch (NoSuchMethodException e) {
152 LOG.info("FileSystem doesn't support"
153 + " setStoragePolicy; --HDFS-6584 not available");
154 } catch (SecurityException e) {
155 LOG.info("Doesn't have access to setStoragePolicy on "
156 + "FileSystems --HDFS-6584 not available", e);
157 m = null;
158 }
159 if (m != null) {
160 try {
161 m.invoke(dfs, path, storagePolicy);
162 LOG.info("set " + storagePolicy + " for " + path);
163 } catch (Exception e) {
164
165 boolean probablyBadPolicy = false;
166 if (e instanceof InvocationTargetException) {
167 final Throwable exception = e.getCause();
168 if (exception instanceof RemoteException &&
169 HadoopIllegalArgumentException.class.getName().equals(
170 ((RemoteException)exception).getClassName())) {
171 LOG.warn("Given storage policy, '" + storagePolicy + "', was rejected and probably " +
172 "isn't a valid policy for the version of Hadoop you're running. I.e. if you're " +
173 "trying to use SSD related policies then you're likely missing HDFS-7228. For " +
174 "more information see the 'ArchivalStorage' docs for your Hadoop release.");
175 LOG.debug("More information about the invalid storage policy.", exception);
176 probablyBadPolicy = true;
177 }
178 }
179 if (!probablyBadPolicy) {
180
181
182 LOG.warn("Unable to set " + storagePolicy + " for " + path, e);
183 }
184 }
185 }
186 } else {
187 LOG.info("FileSystem isn't an instance of DistributedFileSystem; presuming it doesn't " +
188 "support setStoragePolicy.");
189 }
190 }
191
192
193
194
195
196
197
198
199 public static boolean isStartingWithPath(final Path rootPath, final String path) {
200 String uriRootPath = rootPath.toUri().getPath();
201 String tailUriPath = (new Path(path)).toUri().getPath();
202 return tailUriPath.startsWith(uriRootPath);
203 }
204
205
206
207
208
209
210
211
212
213 public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
214 return isMatchingTail(pathToSearch, new Path(pathTail));
215 }
216
217
218
219
220
221
222
223
224
225 public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
226 if (pathToSearch.depth() != pathTail.depth()) return false;
227 Path tailPath = pathTail;
228 String tailName;
229 Path toSearch = pathToSearch;
230 String toSearchName;
231 boolean result = false;
232 do {
233 tailName = tailPath.getName();
234 if (tailName == null || tailName.length() <= 0) {
235 result = true;
236 break;
237 }
238 toSearchName = toSearch.getName();
239 if (toSearchName == null || toSearchName.length() <= 0) break;
240
241 tailPath = tailPath.getParent();
242 toSearch = toSearch.getParent();
243 } while(tailName.equals(toSearchName));
244 return result;
245 }
246
247 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
248 String scheme = fs.getUri().getScheme();
249 if (scheme == null) {
250 LOG.warn("Could not find scheme for uri " +
251 fs.getUri() + ", default to hdfs");
252 scheme = "hdfs";
253 }
254 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
255 scheme + ".impl", FSHDFSUtils.class);
256 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
257 return fsUtils;
258 }
259
260
261
262
263
264
265
266
267 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
268 throws IOException {
269 return fs.exists(dir) && fs.delete(dir, true);
270 }
271
272
273
274
275
276
277
278
279 public static boolean deleteRegionDir(final Configuration conf, final HRegionInfo hri)
280 throws IOException {
281 Path rootDir = getRootDir(conf);
282 FileSystem fs = rootDir.getFileSystem(conf);
283 return deleteDirectory(fs,
284 new Path(getTableDir(rootDir, hri.getTable()), hri.getEncodedName()));
285 }
286
287
288
289
290
291
292
293
294
295
296
297
298 public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
299 Method m = null;
300 Class<? extends FileSystem> cls = fs.getClass();
301 try {
302 m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
303 } catch (NoSuchMethodException e) {
304 LOG.info("FileSystem doesn't support getDefaultBlockSize");
305 } catch (SecurityException e) {
306 LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
307 m = null;
308 }
309 if (m == null) {
310 return fs.getDefaultBlockSize(path);
311 } else {
312 try {
313 Object ret = m.invoke(fs, path);
314 return ((Long)ret).longValue();
315 } catch (Exception e) {
316 throw new IOException(e);
317 }
318 }
319 }
320
321
322
323
324
325
326
327
328
329
330
331
332 public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
333 Method m = null;
334 Class<? extends FileSystem> cls = fs.getClass();
335 try {
336 m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
337 } catch (NoSuchMethodException e) {
338 LOG.info("FileSystem doesn't support getDefaultReplication");
339 } catch (SecurityException e) {
340 LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
341 m = null;
342 }
343 if (m == null) {
344 return fs.getDefaultReplication(path);
345 } else {
346 try {
347 Object ret = m.invoke(fs, path);
348 return ((Number)ret).shortValue();
349 } catch (Exception e) {
350 throw new IOException(e);
351 }
352 }
353 }
354
355
356
357
358
359
360
361
362
363
364
365 public static int getDefaultBufferSize(final FileSystem fs) {
366 return fs.getConf().getInt("io.file.buffer.size", 4096);
367 }
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387 public static FSDataOutputStream create(FileSystem fs, Path path,
388 FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException {
389 if (fs instanceof HFileSystem) {
390 FileSystem backingFs = ((HFileSystem)fs).getBackingFs();
391 if (backingFs instanceof DistributedFileSystem) {
392
393
394 try {
395 return (FSDataOutputStream) (DistributedFileSystem.class
396 .getDeclaredMethod("create", Path.class, FsPermission.class,
397 boolean.class, int.class, short.class, long.class,
398 Progressable.class, InetSocketAddress[].class)
399 .invoke(backingFs, path, perm, true,
400 getDefaultBufferSize(backingFs),
401 getDefaultReplication(backingFs, path),
402 getDefaultBlockSize(backingFs, path),
403 null, favoredNodes));
404 } catch (InvocationTargetException ite) {
405
406 throw new IOException(ite.getCause());
407 } catch (NoSuchMethodException e) {
408 LOG.debug("DFS Client does not support most favored nodes create; using default create");
409 if (LOG.isTraceEnabled()) LOG.trace("Ignoring; use default create", e);
410 } catch (IllegalArgumentException e) {
411 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
412 } catch (SecurityException e) {
413 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
414 } catch (IllegalAccessException e) {
415 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
416 }
417 }
418 }
419 return create(fs, path, perm, true);
420 }
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439 public static FSDataOutputStream create(FileSystem fs, Path path,
440 FsPermission perm, boolean overwrite) throws IOException {
441 if (LOG.isTraceEnabled()) {
442 LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
443 }
444 return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
445 getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
446 }
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461 public static FsPermission getFilePermissions(final FileSystem fs,
462 final Configuration conf, final String permssionConfKey) {
463 boolean enablePermissions = conf.getBoolean(
464 HConstants.ENABLE_DATA_FILE_UMASK, false);
465
466 if (enablePermissions) {
467 try {
468 FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
469
470 String mask = conf.get(permssionConfKey);
471 if (mask == null)
472 return FsPermission.getFileDefault();
473
474 FsPermission umask = new FsPermission(mask);
475 return perm.applyUMask(umask);
476 } catch (IllegalArgumentException e) {
477 LOG.warn(
478 "Incorrect umask attempted to be created: "
479 + conf.get(permssionConfKey)
480 + ", using default file permissions.", e);
481 return FsPermission.getFileDefault();
482 }
483 }
484 return FsPermission.getFileDefault();
485 }
486
487
488
489
490
491
492
493 public static void checkFileSystemAvailable(final FileSystem fs)
494 throws IOException {
495 if (!(fs instanceof DistributedFileSystem)) {
496 return;
497 }
498 IOException exception = null;
499 DistributedFileSystem dfs = (DistributedFileSystem) fs;
500 try {
501 if (dfs.exists(new Path("/"))) {
502 return;
503 }
504 } catch (IOException e) {
505 exception = RemoteExceptionHandler.checkIOException(e);
506 }
507 try {
508 fs.close();
509 } catch (Exception e) {
510 LOG.error("file system close failed: ", e);
511 }
512 IOException io = new IOException("File system is not available");
513 io.initCause(exception);
514 throw io;
515 }
516
517
518
519
520
521
522
523
524
525 private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
526 boolean inSafeMode = false;
527 try {
528 Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
529 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
530 inSafeMode = (Boolean) m.invoke(dfs,
531 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
532 } catch (Exception e) {
533 if (e instanceof IOException) throw (IOException) e;
534
535
536 inSafeMode = dfs.setSafeMode(
537 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
538 }
539 return inSafeMode;
540 }
541
542
543
544
545
546
547 public static void checkDfsSafeMode(final Configuration conf)
548 throws IOException {
549 boolean isInSafeMode = false;
550 FileSystem fs = FileSystem.get(conf);
551 if (fs instanceof DistributedFileSystem) {
552 DistributedFileSystem dfs = (DistributedFileSystem)fs;
553 isInSafeMode = isInSafeMode(dfs);
554 }
555 if (isInSafeMode) {
556 throw new IOException("File system is in safemode, it can't be written now");
557 }
558 }
559
560
561
562
563
564
565
566
567
568
569 public static String getVersion(FileSystem fs, Path rootdir)
570 throws IOException, DeserializationException {
571 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
572 FileStatus[] status = null;
573 try {
574
575
576 status = fs.listStatus(versionFile);
577 } catch (FileNotFoundException fnfe) {
578 return null;
579 }
580 if (status == null || status.length == 0) return null;
581 String version = null;
582 byte [] content = new byte [(int)status[0].getLen()];
583 FSDataInputStream s = fs.open(versionFile);
584 try {
585 IOUtils.readFully(s, content, 0, content.length);
586 if (ProtobufUtil.isPBMagicPrefix(content)) {
587 version = parseVersionFrom(content);
588 } else {
589
590 InputStream is = new ByteArrayInputStream(content);
591 DataInputStream dis = new DataInputStream(is);
592 try {
593 version = dis.readUTF();
594 } finally {
595 dis.close();
596 }
597 }
598 } catch (EOFException eof) {
599 LOG.warn("Version file was empty, odd, will try to set it.");
600 } finally {
601 s.close();
602 }
603 return version;
604 }
605
606
607
608
609
610
611
612 static String parseVersionFrom(final byte [] bytes)
613 throws DeserializationException {
614 ProtobufUtil.expectPBMagicPrefix(bytes);
615 int pblen = ProtobufUtil.lengthOfPBMagic();
616 FSProtos.HBaseVersionFileContent.Builder builder =
617 FSProtos.HBaseVersionFileContent.newBuilder();
618 try {
619 ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
620 return builder.getVersion();
621 } catch (IOException e) {
622
623 throw new DeserializationException(e);
624 }
625 }
626
627
628
629
630
631
632 static byte [] toVersionByteArray(final String version) {
633 FSProtos.HBaseVersionFileContent.Builder builder =
634 FSProtos.HBaseVersionFileContent.newBuilder();
635 return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
636 }
637
638
639
640
641
642
643
644
645
646
647
648 public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
649 throws IOException, DeserializationException {
650 checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
651 }
652
653
654
655
656
657
658
659
660
661
662
663
664
665 public static void checkVersion(FileSystem fs, Path rootdir,
666 boolean message, int wait, int retries)
667 throws IOException, DeserializationException {
668 String version = getVersion(fs, rootdir);
669 if (version == null) {
670 if (!metaRegionExists(fs, rootdir)) {
671
672
673 setVersion(fs, rootdir, wait, retries);
674 return;
675 }
676 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return;
677
678
679
680 String msg = "HBase file layout needs to be upgraded."
681 + " You have version " + version
682 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
683 + ". Consult http://hbase.apache.org/book.html for further information about upgrading HBase."
684 + " Is your hbase.rootdir valid? If so, you may need to run "
685 + "'hbase hbck -fixVersionFile'.";
686 if (message) {
687 System.out.println("WARNING! " + msg);
688 }
689 throw new FileSystemVersionException(msg);
690 }
691
692
693
694
695
696
697
698
699 public static void setVersion(FileSystem fs, Path rootdir)
700 throws IOException {
701 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
702 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
703 }
704
705
706
707
708
709
710
711
712
713
714 public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
715 throws IOException {
716 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
717 }
718
719
720
721
722
723
724
725
726
727
728
729
730 public static void setVersion(FileSystem fs, Path rootdir, String version,
731 int wait, int retries) throws IOException {
732 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
733 Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR +
734 HConstants.VERSION_FILE_NAME);
735 while (true) {
736 try {
737
738 FSDataOutputStream s = fs.create(tempVersionFile);
739 try {
740 s.write(toVersionByteArray(version));
741 s.close();
742 s = null;
743
744
745 if (!fs.rename(tempVersionFile, versionFile)) {
746 throw new IOException("Unable to move temp version file to " + versionFile);
747 }
748 } finally {
749
750
751
752
753
754 try {
755 if (s != null) s.close();
756 } catch (IOException ignore) { }
757 }
758 LOG.info("Created version file at " + rootdir.toString() + " with version=" + version);
759 return;
760 } catch (IOException e) {
761 if (retries > 0) {
762 LOG.debug("Unable to create version file at " + rootdir.toString() + ", retrying", e);
763 fs.delete(versionFile, false);
764 try {
765 if (wait > 0) {
766 Thread.sleep(wait);
767 }
768 } catch (InterruptedException ie) {
769 throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
770 }
771 retries--;
772 } else {
773 throw e;
774 }
775 }
776 }
777 }
778
779
780
781
782
783
784
785
786
787 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
788 int wait) throws IOException {
789 while (true) {
790 try {
791 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
792 return fs.exists(filePath);
793 } catch (IOException ioe) {
794 if (wait > 0) {
795 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
796 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
797 try {
798 Thread.sleep(wait);
799 } catch (InterruptedException e) {
800 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
801 }
802 } else {
803 throw ioe;
804 }
805 }
806 }
807 }
808
809
810
811
812
813
814
815
816 public static ClusterId getClusterId(FileSystem fs, Path rootdir)
817 throws IOException {
818 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
819 ClusterId clusterId = null;
820 FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null;
821 if (status != null) {
822 int len = Ints.checkedCast(status.getLen());
823 byte [] content = new byte[len];
824 FSDataInputStream in = fs.open(idPath);
825 try {
826 in.readFully(content);
827 } catch (EOFException eof) {
828 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
829 } finally{
830 in.close();
831 }
832 try {
833 clusterId = ClusterId.parseFrom(content);
834 } catch (DeserializationException e) {
835 throw new IOException("content=" + Bytes.toString(content), e);
836 }
837
838 if (!ProtobufUtil.isPBMagicPrefix(content)) {
839 String cid = null;
840 in = fs.open(idPath);
841 try {
842 cid = in.readUTF();
843 clusterId = new ClusterId(cid);
844 } catch (EOFException eof) {
845 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
846 } finally {
847 in.close();
848 }
849 rewriteAsPb(fs, rootdir, idPath, clusterId);
850 }
851 return clusterId;
852 } else {
853 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
854 }
855 return clusterId;
856 }
857
858
859
860
861
862 private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
863 final ClusterId cid)
864 throws IOException {
865
866
867 Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
868 if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
869 setClusterId(fs, rootdir, cid, 100);
870 if (!fs.delete(movedAsideName, false)) {
871 throw new IOException("Failed delete of " + movedAsideName);
872 }
873 LOG.debug("Rewrote the hbase.id file as pb");
874 }
875
876
877
878
879
880
881
882
883
884
885 public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
886 int wait) throws IOException {
887 while (true) {
888 try {
889 Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
890 Path tempIdFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY +
891 Path.SEPARATOR + HConstants.CLUSTER_ID_FILE_NAME);
892
893 FSDataOutputStream s = fs.create(tempIdFile);
894 try {
895 s.write(clusterId.toByteArray());
896 s.close();
897 s = null;
898
899
900 if (!fs.rename(tempIdFile, idFile)) {
901 throw new IOException("Unable to move temp version file to " + idFile);
902 }
903 } finally {
904
905 try {
906 if (s != null) s.close();
907 } catch (IOException ignore) { }
908 }
909 if (LOG.isDebugEnabled()) {
910 LOG.debug("Created cluster ID file at " + idFile.toString() + " with ID: " + clusterId);
911 }
912 return;
913 } catch (IOException ioe) {
914 if (wait > 0) {
915 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
916 ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
917 try {
918 Thread.sleep(wait);
919 } catch (InterruptedException e) {
920 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
921 }
922 } else {
923 throw ioe;
924 }
925 }
926 }
927 }
928
929
930
931
932
933
934
935
936 public static Path validateRootPath(Path root) throws IOException {
937 try {
938 URI rootURI = new URI(root.toString());
939 String scheme = rootURI.getScheme();
940 if (scheme == null) {
941 throw new IOException("Root directory does not have a scheme");
942 }
943 return root;
944 } catch (URISyntaxException e) {
945 IOException io = new IOException("Root directory path is not a valid " +
946 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
947 io.initCause(e);
948 throw io;
949 }
950 }
951
952
953
954
955
956
957
958
959
960 public static String removeRootPath(Path path, final Configuration conf) throws IOException {
961 Path root = FSUtils.getRootDir(conf);
962 String pathStr = path.toString();
963
964 if (!pathStr.startsWith(root.toString())) return pathStr;
965
966 return pathStr.substring(root.toString().length() + 1);
967 }
968
969
970
971
972
973
974
975 public static void waitOnSafeMode(final Configuration conf,
976 final long wait)
977 throws IOException {
978 FileSystem fs = FileSystem.get(conf);
979 if (!(fs instanceof DistributedFileSystem)) return;
980 DistributedFileSystem dfs = (DistributedFileSystem)fs;
981
982 while (isInSafeMode(dfs)) {
983 LOG.info("Waiting for dfs to exit safe mode...");
984 try {
985 Thread.sleep(wait);
986 } catch (InterruptedException e) {
987 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
988 }
989 }
990 }
991
992
993
994
995
996
997
998
999
1000
1001
1002 public static String getPath(Path p) {
1003 return p.toUri().getPath();
1004 }
1005
1006
1007
1008
1009
1010
1011
1012 public static Path getRootDir(final Configuration c) throws IOException {
1013 Path p = new Path(c.get(HConstants.HBASE_DIR));
1014 FileSystem fs = p.getFileSystem(c);
1015 return p.makeQualified(fs);
1016 }
1017
1018 public static void setRootDir(final Configuration c, final Path root) throws IOException {
1019 c.set(HConstants.HBASE_DIR, root.toString());
1020 }
1021
1022 public static void setFsDefault(final Configuration c, final Path root) throws IOException {
1023 c.set("fs.defaultFS", root.toString());
1024 }
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 @SuppressWarnings("deprecation")
1035 public static boolean metaRegionExists(FileSystem fs, Path rootdir)
1036 throws IOException {
1037 Path metaRegionDir =
1038 HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
1039 return fs.exists(metaRegionDir);
1040 }
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
1051 final FileSystem fs, FileStatus status, long start, long length)
1052 throws IOException {
1053 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
1054 BlockLocation [] blockLocations =
1055 fs.getFileBlockLocations(status, start, length);
1056 for(BlockLocation bl : blockLocations) {
1057 String [] hosts = bl.getHosts();
1058 long len = bl.getLength();
1059 blocksDistribution.addHostsAndBlockWeight(hosts, len);
1060 }
1061
1062 return blocksDistribution;
1063 }
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076 public static boolean isMajorCompacted(final FileSystem fs,
1077 final Path hbaseRootDir)
1078 throws IOException {
1079 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1080 PathFilter regionFilter = new RegionDirFilter(fs);
1081 PathFilter familyFilter = new FamilyDirFilter(fs);
1082 for (Path d : tableDirs) {
1083 FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
1084 for (FileStatus regionDir : regionDirs) {
1085 Path dd = regionDir.getPath();
1086
1087 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1088 for (FileStatus familyDir : familyDirs) {
1089 Path family = familyDir.getPath();
1090
1091 FileStatus[] familyStatus = fs.listStatus(family);
1092 if (familyStatus.length > 1) {
1093 LOG.debug(family.toString() + " has " + familyStatus.length +
1094 " files.");
1095 return false;
1096 }
1097 }
1098 }
1099 }
1100 return true;
1101 }
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112 public static int getTotalTableFragmentation(final HMaster master)
1113 throws IOException {
1114 Map<String, Integer> map = getTableFragmentation(master);
1115 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
1116 }
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 public static Map<String, Integer> getTableFragmentation(
1129 final HMaster master)
1130 throws IOException {
1131 Path path = getRootDir(master.getConfiguration());
1132
1133 FileSystem fs = path.getFileSystem(master.getConfiguration());
1134 return getTableFragmentation(fs, path);
1135 }
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 public static Map<String, Integer> getTableFragmentation(
1148 final FileSystem fs, final Path hbaseRootDir)
1149 throws IOException {
1150 Map<String, Integer> frags = new HashMap<String, Integer>();
1151 int cfCountTotal = 0;
1152 int cfFragTotal = 0;
1153 PathFilter regionFilter = new RegionDirFilter(fs);
1154 PathFilter familyFilter = new FamilyDirFilter(fs);
1155 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1156 for (Path d : tableDirs) {
1157 int cfCount = 0;
1158 int cfFrag = 0;
1159 FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
1160 for (FileStatus regionDir : regionDirs) {
1161 Path dd = regionDir.getPath();
1162
1163 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1164 for (FileStatus familyDir : familyDirs) {
1165 cfCount++;
1166 cfCountTotal++;
1167 Path family = familyDir.getPath();
1168
1169 FileStatus[] familyStatus = fs.listStatus(family);
1170 if (familyStatus.length > 1) {
1171 cfFrag++;
1172 cfFragTotal++;
1173 }
1174 }
1175 }
1176
1177 frags.put(FSUtils.getTableName(d).getNameAsString(),
1178 cfCount == 0? 0: Math.round((float) cfFrag / cfCount * 100));
1179 }
1180
1181 frags.put("-TOTAL-",
1182 cfCountTotal == 0? 0: Math.round((float) cfFragTotal / cfCountTotal * 100));
1183 return frags;
1184 }
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 public static Path getTableDir(Path rootdir, final TableName tableName) {
1195 return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
1196 tableName.getQualifierAsString());
1197 }
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 public static TableName getTableName(Path tablePath) {
1208 return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
1209 }
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219 public static Path getNamespaceDir(Path rootdir, final String namespace) {
1220 return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1221 new Path(namespace)));
1222 }
1223
1224
1225
1226
1227 static class FileFilter extends AbstractFileStatusFilter {
1228 private final FileSystem fs;
1229
1230 public FileFilter(final FileSystem fs) {
1231 this.fs = fs;
1232 }
1233
1234 @Override
1235 protected boolean accept(Path p, @CheckForNull Boolean isDir) {
1236 try {
1237 return isFile(fs, isDir, p);
1238 } catch (IOException e) {
1239 LOG.warn("unable to verify if path=" + p + " is a regular file", e);
1240 return false;
1241 }
1242 }
1243 }
1244
1245
1246
1247
1248 public static class BlackListDirFilter extends AbstractFileStatusFilter {
1249 private final FileSystem fs;
1250 private List<String> blacklist;
1251
1252
1253
1254
1255
1256
1257
1258 @SuppressWarnings("unchecked")
1259 public BlackListDirFilter(final FileSystem fs, final List<String> directoryNameBlackList) {
1260 this.fs = fs;
1261 blacklist =
1262 (List<String>) (directoryNameBlackList == null ? Collections.emptyList()
1263 : directoryNameBlackList);
1264 }
1265
1266 @Override
1267 protected boolean accept(Path p, @CheckForNull Boolean isDir) {
1268 if (!isValidName(p.getName())) {
1269 return false;
1270 }
1271
1272 try {
1273 return isDirectory(fs, isDir, p);
1274 } catch (IOException e) {
1275 LOG.warn("An error occurred while verifying if [" + p.toString()
1276 + "] is a valid directory. Returning 'not valid' and continuing.", e);
1277 return false;
1278 }
1279 }
1280
1281 protected boolean isValidName(final String name) {
1282 return !blacklist.contains(name);
1283 }
1284 }
1285
1286
1287
1288
1289 public static class DirFilter extends BlackListDirFilter {
1290
1291 public DirFilter(FileSystem fs) {
1292 super(fs, null);
1293 }
1294 }
1295
1296
1297
1298
1299
1300 public static class UserTableDirFilter extends BlackListDirFilter {
1301 public UserTableDirFilter(FileSystem fs) {
1302 super(fs, HConstants.HBASE_NON_TABLE_DIRS);
1303 }
1304
1305 protected boolean isValidName(final String name) {
1306 if (!super.isValidName(name))
1307 return false;
1308
1309 try {
1310 TableName.isLegalTableQualifierName(Bytes.toBytes(name));
1311 } catch (IllegalArgumentException e) {
1312 LOG.info("INVALID NAME " + name);
1313 return false;
1314 }
1315 return true;
1316 }
1317 }
1318
1319
1320
1321
1322
1323
1324
1325
1326 public static boolean isAppendSupported(final Configuration conf) {
1327 boolean append = conf.getBoolean("dfs.support.append", false);
1328 if (append) {
1329 try {
1330
1331
1332
1333 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1334 append = true;
1335 } catch (SecurityException e) {
1336 } catch (NoSuchMethodException e) {
1337 append = false;
1338 }
1339 }
1340 if (!append) {
1341
1342 try {
1343 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1344 append = true;
1345 } catch (NoSuchMethodException e) {
1346 append = false;
1347 }
1348 }
1349 return append;
1350 }
1351
1352
1353
1354
1355
1356
1357 public static boolean isHDFS(final Configuration conf) throws IOException {
1358 FileSystem fs = FileSystem.get(conf);
1359 String scheme = fs.getUri().getScheme();
1360 return scheme.equalsIgnoreCase("hdfs");
1361 }
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371 public abstract void recoverFileLease(final FileSystem fs, final Path p,
1372 Configuration conf, CancelableProgressable reporter) throws IOException;
1373
1374 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1375 throws IOException {
1376 List<Path> tableDirs = new LinkedList<Path>();
1377
1378 for(FileStatus status :
1379 fs.globStatus(new Path(rootdir,
1380 new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
1381 tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
1382 }
1383 return tableDirs;
1384 }
1385
1386
1387
1388
1389
1390
1391
1392
1393 public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
1394 throws IOException {
1395
1396 FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
1397 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1398 for (FileStatus dir: dirs) {
1399 tabledirs.add(dir.getPath());
1400 }
1401 return tabledirs;
1402 }
1403
1404
1405
1406
1407
1408
1409 public static boolean isRecoveredEdits(Path path) {
1410 return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
1411 }
1412
1413
1414
1415
1416 public static class RegionDirFilter extends AbstractFileStatusFilter {
1417
1418 final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1419 final FileSystem fs;
1420
1421 public RegionDirFilter(FileSystem fs) {
1422 this.fs = fs;
1423 }
1424
1425 @Override
1426 protected boolean accept(Path p, @CheckForNull Boolean isDir) {
1427 if (!regionDirPattern.matcher(p.getName()).matches()) {
1428 return false;
1429 }
1430
1431 try {
1432 return isDirectory(fs, isDir, p);
1433 } catch (IOException ioe) {
1434
1435 LOG.warn("Skipping file " + p +" due to IOException", ioe);
1436 return false;
1437 }
1438 }
1439 }
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449 public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1450
1451 List<FileStatus> rds = listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs));
1452 if (rds == null) {
1453 return new ArrayList<Path>();
1454 }
1455 List<Path> regionDirs = new ArrayList<Path>(rds.size());
1456 for (FileStatus rdfs: rds) {
1457 Path rdPath = rdfs.getPath();
1458 regionDirs.add(rdPath);
1459 }
1460 return regionDirs;
1461 }
1462
1463
1464
1465
1466
1467 public static class FamilyDirFilter extends AbstractFileStatusFilter {
1468 final FileSystem fs;
1469
1470 public FamilyDirFilter(FileSystem fs) {
1471 this.fs = fs;
1472 }
1473
1474 @Override
1475 protected boolean accept(Path p, @CheckForNull Boolean isDir) {
1476 try {
1477
1478 HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(p.getName()));
1479 } catch (IllegalArgumentException iae) {
1480
1481 return false;
1482 }
1483
1484 try {
1485 return isDirectory(fs, isDir, p);
1486 } catch (IOException ioe) {
1487
1488 LOG.warn("Skipping file " + p +" due to IOException", ioe);
1489 return false;
1490 }
1491 }
1492 }
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502 public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1503
1504 FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1505 List<Path> familyDirs = new ArrayList<Path>(fds.length);
1506 for (FileStatus fdfs: fds) {
1507 Path fdPath = fdfs.getPath();
1508 familyDirs.add(fdPath);
1509 }
1510 return familyDirs;
1511 }
1512
1513 public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
1514 List<FileStatus> fds = listStatusWithStatusFilter(fs, familyDir, new ReferenceFileFilter(fs));
1515 if (fds == null) {
1516 return new ArrayList<Path>();
1517 }
1518 List<Path> referenceFiles = new ArrayList<Path>(fds.size());
1519 for (FileStatus fdfs: fds) {
1520 Path fdPath = fdfs.getPath();
1521 referenceFiles.add(fdPath);
1522 }
1523 return referenceFiles;
1524 }
1525
1526
1527
1528
1529 public static class HFileFilter extends AbstractFileStatusFilter {
1530 final FileSystem fs;
1531
1532 public HFileFilter(FileSystem fs) {
1533 this.fs = fs;
1534 }
1535
1536 @Override
1537 protected boolean accept(Path p, @CheckForNull Boolean isDir) {
1538 if (!StoreFileInfo.isHFile(p)) {
1539 return false;
1540 }
1541
1542 try {
1543 return isFile(fs, isDir, p);
1544 } catch (IOException ioe) {
1545
1546 LOG.warn("Skipping file " + p +" due to IOException", ioe);
1547 return false;
1548 }
1549 }
1550 }
1551
1552 public static class ReferenceFileFilter extends AbstractFileStatusFilter {
1553
1554 private final FileSystem fs;
1555
1556 public ReferenceFileFilter(FileSystem fs) {
1557 this.fs = fs;
1558 }
1559
1560 @Override
1561 protected boolean accept(Path p, @CheckForNull Boolean isDir) {
1562 if (!StoreFileInfo.isReference(p)) {
1563 return false;
1564 }
1565
1566 try {
1567
1568 return isFile(fs, isDir, p);
1569 } catch (IOException ioe) {
1570
1571 LOG.warn("Skipping file " + p +" due to IOException", ioe);
1572 return false;
1573 }
1574 }
1575 }
1576
1577
1578
1579
1580
1581
1582
1583 public static FileSystem getCurrentFileSystem(Configuration conf)
1584 throws IOException {
1585 return getRootDir(conf).getFileSystem(conf);
1586 }
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605 public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1606 final FileSystem fs, final Path hbaseRootDir, TableName tableName)
1607 throws IOException, InterruptedException {
1608 return getTableStoreFilePathMap(map, fs, hbaseRootDir, tableName, null, null, null);
1609 }
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633 public static Map<String, Path> getTableStoreFilePathMap(
1634 Map<String, Path> resultMap,
1635 final FileSystem fs, final Path hbaseRootDir, TableName tableName, final PathFilter sfFilter,
1636 ExecutorService executor, final ErrorReporter errors) throws IOException, InterruptedException {
1637
1638 final Map<String, Path> finalResultMap =
1639 resultMap == null ? new ConcurrentHashMap<String, Path>(128, 0.75f, 32) : resultMap;
1640
1641
1642 Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
1643
1644
1645 final FamilyDirFilter familyFilter = new FamilyDirFilter(fs);
1646 final Vector<Exception> exceptions = new Vector<Exception>();
1647
1648 try {
1649 List<FileStatus> regionDirs = FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs));
1650 if (regionDirs == null) {
1651 return finalResultMap;
1652 }
1653
1654 final List<Future<?>> futures = new ArrayList<Future<?>>(regionDirs.size());
1655
1656 for (FileStatus regionDir : regionDirs) {
1657 if (null != errors) {
1658 errors.progress();
1659 }
1660 final Path dd = regionDir.getPath();
1661
1662 if (!exceptions.isEmpty()) {
1663 break;
1664 }
1665
1666 Runnable getRegionStoreFileMapCall = new Runnable() {
1667 @Override
1668 public void run() {
1669 try {
1670 HashMap<String,Path> regionStoreFileMap = new HashMap<String, Path>();
1671 List<FileStatus> familyDirs = FSUtils.listStatusWithStatusFilter(fs, dd, familyFilter);
1672 if (familyDirs == null) {
1673 if (!fs.exists(dd)) {
1674 LOG.warn("Skipping region because it no longer exists: " + dd);
1675 } else {
1676 LOG.warn("Skipping region because it has no family dirs: " + dd);
1677 }
1678 return;
1679 }
1680 for (FileStatus familyDir : familyDirs) {
1681 if (null != errors) {
1682 errors.progress();
1683 }
1684 Path family = familyDir.getPath();
1685 if (family.getName().equals(HConstants.RECOVERED_EDITS_DIR)) {
1686 continue;
1687 }
1688
1689
1690 FileStatus[] familyStatus = fs.listStatus(family);
1691 for (FileStatus sfStatus : familyStatus) {
1692 if (null != errors) {
1693 errors.progress();
1694 }
1695 Path sf = sfStatus.getPath();
1696 if (sfFilter == null || sfFilter.accept(sf)) {
1697 regionStoreFileMap.put( sf.getName(), sf);
1698 }
1699 }
1700 }
1701 finalResultMap.putAll(regionStoreFileMap);
1702 } catch (Exception e) {
1703 LOG.error("Could not get region store file map for region: " + dd, e);
1704 exceptions.add(e);
1705 }
1706 }
1707 };
1708
1709
1710
1711 if (executor != null) {
1712 Future<?> future = executor.submit(getRegionStoreFileMapCall);
1713 futures.add(future);
1714 } else {
1715 FutureTask<?> future = new FutureTask<Object>(getRegionStoreFileMapCall, null);
1716 future.run();
1717 futures.add(future);
1718 }
1719 }
1720
1721
1722 for (Future<?> f : futures) {
1723 if (!exceptions.isEmpty()) {
1724 break;
1725 }
1726 try {
1727 f.get();
1728 } catch (ExecutionException e) {
1729 LOG.error("Unexpected exec exception! Should've been caught already. (Bug?)", e);
1730
1731 }
1732 }
1733 } catch (IOException e) {
1734 LOG.error("Cannot execute getTableStoreFilePathMap for " + tableName, e);
1735 exceptions.add(e);
1736 } finally {
1737 if (!exceptions.isEmpty()) {
1738
1739
1740 Throwables.propagateIfInstanceOf(exceptions.firstElement(), IOException.class);
1741 throw Throwables.propagate(exceptions.firstElement());
1742 }
1743 }
1744
1745 return finalResultMap;
1746 }
1747
1748 public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) {
1749 int result = 0;
1750 try {
1751 for (Path familyDir:getFamilyDirs(fs, p)){
1752 result += getReferenceFilePaths(fs, familyDir).size();
1753 }
1754 } catch (IOException e) {
1755 LOG.warn("Error Counting reference files.", e);
1756 }
1757 return result;
1758 }
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774 public static Map<String, Path> getTableStoreFilePathMap(
1775 final FileSystem fs, final Path hbaseRootDir)
1776 throws IOException, InterruptedException {
1777 return getTableStoreFilePathMap(fs, hbaseRootDir, null, null, null);
1778 }
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797 public static Map<String, Path> getTableStoreFilePathMap(
1798 final FileSystem fs, final Path hbaseRootDir, PathFilter sfFilter,
1799 ExecutorService executor, ErrorReporter errors)
1800 throws IOException, InterruptedException {
1801 ConcurrentHashMap<String, Path> map = new ConcurrentHashMap<String, Path>(1024, 0.75f, 32);
1802
1803
1804
1805
1806
1807 for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
1808 getTableStoreFilePathMap(map, fs, hbaseRootDir,
1809 FSUtils.getTableName(tableDir), sfFilter, executor, errors);
1810 }
1811 return map;
1812 }
1813
1814
1815
1816
1817
1818
1819
1820
1821 public static List<FileStatus> filterFileStatuses(FileStatus[] input,
1822 FileStatusFilter filter) {
1823 if (input == null) return null;
1824 return filterFileStatuses(Iterators.forArray(input), filter);
1825 }
1826
1827
1828
1829
1830
1831
1832
1833
1834 public static List<FileStatus> filterFileStatuses(Iterator<FileStatus> input,
1835 FileStatusFilter filter) {
1836 if (input == null) return null;
1837 ArrayList<FileStatus> results = new ArrayList<FileStatus>();
1838 while (input.hasNext()) {
1839 FileStatus f = input.next();
1840 if (filter.accept(f)) {
1841 results.add(f);
1842 }
1843 }
1844 return results;
1845 }
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858 public static List<FileStatus> listStatusWithStatusFilter(final FileSystem fs,
1859 final Path dir, final FileStatusFilter filter) throws IOException {
1860 FileStatus [] status = null;
1861 try {
1862 status = fs.listStatus(dir);
1863 } catch (FileNotFoundException fnfe) {
1864
1865 if (LOG.isTraceEnabled()) {
1866 LOG.trace(dir + " doesn't exist");
1867 }
1868 }
1869
1870 if (status == null || status.length < 1) {
1871 return null;
1872 }
1873
1874 if (filter == null) {
1875 return Arrays.asList(status);
1876 } else {
1877 List<FileStatus> status2 = filterFileStatuses(status, filter);
1878 if (status2 == null || status2.isEmpty()) {
1879 return null;
1880 } else {
1881 return status2;
1882 }
1883 }
1884 }
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900 public static FileStatus [] listStatus(final FileSystem fs,
1901 final Path dir, final PathFilter filter) throws IOException {
1902 FileStatus [] status = null;
1903 try {
1904 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1905 } catch (FileNotFoundException fnfe) {
1906
1907 if (LOG.isTraceEnabled()) {
1908 LOG.trace(dir + " doesn't exist");
1909 }
1910 }
1911 if (status == null || status.length < 1) return null;
1912 return status;
1913 }
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923 public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1924 return listStatus(fs, dir, null);
1925 }
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936 public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1937 throws IOException {
1938 return fs.delete(path, recursive);
1939 }
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949 public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1950 return fs.exists(path);
1951 }
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963 public static void checkAccess(UserGroupInformation ugi, FileStatus file,
1964 FsAction action) throws AccessDeniedException {
1965 if (ugi.getShortUserName().equals(file.getOwner())) {
1966 if (file.getPermission().getUserAction().implies(action)) {
1967 return;
1968 }
1969 } else if (contains(ugi.getGroupNames(), file.getGroup())) {
1970 if (file.getPermission().getGroupAction().implies(action)) {
1971 return;
1972 }
1973 } else if (file.getPermission().getOtherAction().implies(action)) {
1974 return;
1975 }
1976 throw new AccessDeniedException("Permission denied:" + " action=" + action
1977 + " path=" + file.getPath() + " user=" + ugi.getShortUserName());
1978 }
1979
1980 private static boolean contains(String[] groups, String user) {
1981 for (String group : groups) {
1982 if (group.equals(user)) {
1983 return true;
1984 }
1985 }
1986 return false;
1987 }
1988
1989
1990
1991
1992
1993
1994
1995
1996 public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1997 throws IOException {
1998 LOG.debug("Current file system:");
1999 logFSTree(LOG, fs, root, "|-");
2000 }
2001
2002
2003
2004
2005
2006
2007 private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
2008 throws IOException {
2009 FileStatus[] files = FSUtils.listStatus(fs, root, null);
2010 if (files == null) return;
2011
2012 for (FileStatus file : files) {
2013 if (file.isDirectory()) {
2014 LOG.debug(prefix + file.getPath().getName() + "/");
2015 logFSTree(LOG, fs, file.getPath(), prefix + "---");
2016 } else {
2017 LOG.debug(prefix + file.getPath().getName());
2018 }
2019 }
2020 }
2021
2022 public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
2023 throws IOException {
2024
2025 fs.setTimes(src, EnvironmentEdgeManager.currentTime(), -1);
2026 return fs.rename(src, dest);
2027 }
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
2043 final Configuration conf) throws IOException {
2044 return getRegionDegreeLocalityMappingFromFS(
2045 conf, null,
2046 conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE));
2047
2048 }
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
2067 final Configuration conf, final String desiredTable, int threadPoolSize)
2068 throws IOException {
2069 Map<String, Map<String, Float>> regionDegreeLocalityMapping =
2070 new ConcurrentHashMap<String, Map<String, Float>>();
2071 getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null,
2072 regionDegreeLocalityMapping);
2073 return regionDegreeLocalityMapping;
2074 }
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096 private static void getRegionLocalityMappingFromFS(
2097 final Configuration conf, final String desiredTable,
2098 int threadPoolSize,
2099 Map<String, String> regionToBestLocalityRSMapping,
2100 Map<String, Map<String, Float>> regionDegreeLocalityMapping)
2101 throws IOException {
2102 FileSystem fs = FileSystem.get(conf);
2103 Path rootPath = FSUtils.getRootDir(conf);
2104 long startTime = EnvironmentEdgeManager.currentTime();
2105 Path queryPath;
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206 public static void setupShortCircuitRead(final Configuration conf) {
2207
2208 boolean shortCircuitSkipChecksum =
2209 conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
2210 boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
2211 if (shortCircuitSkipChecksum) {
2212 LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
2213 "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
2214 "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
2215 assert !shortCircuitSkipChecksum;
2216 }
2217 checkShortCircuitReadBufferSize(conf);
2218 }
2219
2220
2221
2222
2223
2224 public static void checkShortCircuitReadBufferSize(final Configuration conf) {
2225 final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
2226 final int notSet = -1;
2227
2228 final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
2229 int size = conf.getInt(dfsKey, notSet);
2230
2231 if (size != notSet) return;
2232
2233 int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
2234 conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
2235 }
2236 }