View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.backup;
20  
21  
22  import java.io.IOException;
23  import java.util.ArrayList;
24  import java.util.List;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.conf.Configuration;
29  import org.apache.hadoop.fs.FileSystem;
30  import org.apache.hadoop.fs.LocatedFileStatus;
31  import org.apache.hadoop.fs.Path;
32  import org.apache.hadoop.fs.RemoteIterator;
33  import org.apache.hadoop.hbase.HBaseConfiguration;
34  import org.apache.hadoop.hbase.HBaseTestingUtility;
35  import org.apache.hadoop.hbase.HColumnDescriptor;
36  import org.apache.hadoop.hbase.HConstants;
37  import org.apache.hadoop.hbase.HTableDescriptor;
38  import org.apache.hadoop.hbase.NamespaceDescriptor;
39  import org.apache.hadoop.hbase.TableName;
40  import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
41  import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
42  import org.apache.hadoop.hbase.client.Admin;
43  import org.apache.hadoop.hbase.client.BackupAdmin;
44  import org.apache.hadoop.hbase.client.Connection;
45  import org.apache.hadoop.hbase.client.ConnectionFactory;
46  import org.apache.hadoop.hbase.client.HBaseAdmin;
47  import org.apache.hadoop.hbase.client.HTable;
48  import org.apache.hadoop.hbase.client.Put;
49  import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
50  import org.apache.hadoop.hbase.util.Bytes;
51  import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
52  import org.junit.AfterClass;
53  import org.junit.BeforeClass;
54  
55  
56  /**
57   * This class is only a base for other integration-level backup tests. Do not add tests here.
58   * TestBackupSmallTests is where tests that don't require bring machines up/down should go All other
59   * tests should have their own classes and extend this one
60   */
61  public class TestBackupBase {
62  
63    private static final Log LOG = LogFactory.getLog(TestBackupBase.class);
64  
65    protected static Configuration conf1;
66    protected static Configuration conf2;
67  
68    protected static HBaseTestingUtility TEST_UTIL;
69    protected static HBaseTestingUtility TEST_UTIL2;
70    protected static TableName table1 = TableName.valueOf("table1");
71    protected static TableName table2 = TableName.valueOf("table2");
72    protected static TableName table3 = TableName.valueOf("table3");
73    protected static TableName table4 = TableName.valueOf("table4");
74  
75    protected static TableName table1_restore = TableName.valueOf("ns1:table1_restore");
76    protected static TableName table2_restore = TableName.valueOf("ns2:table2_restore");
77    protected static TableName table3_restore = TableName.valueOf("ns3:table3_restore");
78    protected static TableName table4_restore = TableName.valueOf("ns4:table4_restore");
79  
80    protected static final int NB_ROWS_IN_BATCH = 999;
81    protected static final byte[] qualName = Bytes.toBytes("q1");
82    protected static final byte[] famName = Bytes.toBytes("f");
83  
84    protected static String BACKUP_ROOT_DIR = "/backupUT";
85    protected static String BACKUP_REMOTE_ROOT_DIR = "/backupUT";
86  
87    protected static final String BACKUP_ZNODE = "/backup/hbase";
88    protected static final String BACKUP_SUCCEED_NODE = "complete";
89    protected static final String BACKUP_FAILED_NODE = "failed";
90  
91    /**
92     * @throws java.lang.Exception
93     */
94    @BeforeClass
95    public static void setUpBeforeClass() throws Exception {
96      TEST_UTIL = new HBaseTestingUtility();
97      conf1 = TEST_UTIL.getConfiguration();
98      conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
99      // Set MultiWAL (with 2 default WAL files per RS)
100     //conf1.set(WAL_PROVIDER, "multiwal");
101     TEST_UTIL.startMiniZKCluster();
102     MiniZooKeeperCluster miniZK = TEST_UTIL.getZkCluster();
103 
104     conf2 = HBaseConfiguration.create(conf1);
105     conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
106     TEST_UTIL2 = new HBaseTestingUtility(conf2);
107     TEST_UTIL2.setZkCluster(miniZK);
108     TEST_UTIL.startMiniCluster();
109     TEST_UTIL2.startMiniCluster();
110     conf1 = TEST_UTIL.getConfiguration();
111 
112     TEST_UTIL.startMiniMapReduceCluster();
113     BACKUP_ROOT_DIR = TEST_UTIL.getConfiguration().get("fs.defaultFS") + "/backupUT";
114     LOG.info("ROOTDIR " + BACKUP_ROOT_DIR);
115     BACKUP_REMOTE_ROOT_DIR = TEST_UTIL2.getConfiguration().get("fs.defaultFS") + "/backupUT";
116     LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
117     waitForSystemTable();
118     createTables();
119   }
120   
121   public static void waitForSystemTable() throws Exception
122   {
123     try(Admin admin = TEST_UTIL.getHBaseAdmin();) {
124       while (!admin.tableExists(BackupSystemTable.getTableName()) 
125           || !admin.isTableAvailable(BackupSystemTable.getTableName())) {
126         Thread.sleep(1000);
127       }      
128     }
129     LOG.debug("backup table exists and available");
130 
131   }
132 
133   /**
134    * @throws java.lang.Exception
135    */
136   @AfterClass
137   public static void tearDownAfterClass() throws Exception {
138     SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin());
139     SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
140     TEST_UTIL2.shutdownMiniCluster();
141     TEST_UTIL.shutdownMiniCluster();
142     TEST_UTIL.shutdownMiniMapReduceCluster();
143   }
144 
145   protected String backupTables(BackupType type, List<TableName> tables, String path)
146       throws IOException {
147     Connection conn = null;
148     HBaseAdmin admin = null;
149     BackupAdmin badmin = null;
150     String backupId;
151     try {
152       conn = ConnectionFactory.createConnection(conf1);
153       admin = (HBaseAdmin) conn.getAdmin();
154       BackupRequest request = new BackupRequest();
155       request.setBackupType(type).setTableList(tables).setTargetRootDir(path);
156       badmin = admin.getBackupAdmin();
157       backupId = badmin.backupTables(request);
158     } finally {
159       if(badmin != null){
160         badmin.close();
161       }
162       if (admin != null) {
163         admin.close();
164       }
165       if (conn != null) {
166         conn.close();
167       }
168     }
169     return backupId;
170   }
171 
172   protected String fullTableBackup(List<TableName> tables) throws IOException {
173     return backupTables(BackupType.FULL, tables, BACKUP_ROOT_DIR);
174   }
175 
176   protected String incrementalTableBackup(List<TableName> tables) throws IOException {
177     return backupTables(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
178   }
179   
180   protected static void loadTable(HTable table) throws Exception {
181 
182     Put p; // 100 + 1 row to t1_syncup
183     for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
184       p = new Put(Bytes.toBytes("row" + i));
185       p.addColumn(famName, qualName, Bytes.toBytes("val" + i));
186       table.put(p);
187     }
188   }
189 
190   protected static void createTables() throws Exception {
191 
192     long tid = System.currentTimeMillis();
193     table1 = TableName.valueOf("ns1:test-" + tid);
194     HBaseAdmin ha = TEST_UTIL.getHBaseAdmin();
195     
196     // Create namespaces
197     NamespaceDescriptor desc1 = NamespaceDescriptor.create("ns1").build();
198     NamespaceDescriptor desc2 = NamespaceDescriptor.create("ns2").build();
199     NamespaceDescriptor desc3 = NamespaceDescriptor.create("ns3").build();
200     NamespaceDescriptor desc4 = NamespaceDescriptor.create("ns4").build();
201     
202     ha.createNamespace(desc1);
203     ha.createNamespace(desc2);
204     ha.createNamespace(desc3);
205     ha.createNamespace(desc4);
206 
207     
208     HTableDescriptor desc = new HTableDescriptor(table1);
209     HColumnDescriptor fam = new HColumnDescriptor(famName);
210     desc.addFamily(fam);
211     ha.createTable(desc);
212     Connection conn = ConnectionFactory.createConnection(conf1);
213     HTable table = (HTable) conn.getTable(table1);
214     loadTable(table);
215     table.close();
216     table2 = TableName.valueOf("ns2:test-" + tid + 1);
217     desc = new HTableDescriptor(table2);
218     desc.addFamily(fam);
219     ha.createTable(desc);
220     table = (HTable) conn.getTable(table2);
221     loadTable(table);
222     table.close();
223     table3 = TableName.valueOf("ns3:test-" + tid + 2);
224     table = TEST_UTIL.createTable(table3, famName);
225     table.close();
226     table4 = TableName.valueOf("ns4:test-" + tid + 3);
227     table = TEST_UTIL.createTable(table4, famName);
228     table.close();
229     ha.close();
230     conn.close();
231   }
232 
233   protected boolean checkSucceeded(String backupId) throws IOException {
234     BackupInfo status = getBackupContext(backupId);
235     if (status == null) return false;
236     return status.getState() == BackupState.COMPLETE;
237   }
238 
239   protected boolean checkFailed(String backupId) throws IOException {
240     BackupInfo status = getBackupContext(backupId);
241     if (status == null) return false;
242     return status.getState() == BackupState.FAILED;
243   }
244 
245   private BackupInfo getBackupContext(String backupId) throws IOException {
246     try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
247       BackupInfo status = table.readBackupInfo(backupId);
248       return status;
249     }
250   }
251 
252 
253   protected BackupAdmin getBackupAdmin() throws IOException {
254     return TEST_UTIL.getConnection().getAdmin().getBackupAdmin();
255   }
256   
257   /**
258    * Get restore request.
259    *  
260    */
261   public  RestoreRequest createRestoreRequest(
262       String backupRootDir,
263       String backupId, boolean check, TableName[] fromTables,
264       TableName[] toTables, boolean isOverwrite) {
265     RestoreRequest request = new RestoreRequest();
266     request.setBackupRootDir(backupRootDir).setBackupId(backupId).setCheck(check).
267     setFromTables(fromTables).setToTables(toTables).setOverwrite(isOverwrite);
268     return request;
269 }
270   
271   /**
272    * Helper method
273    */
274   protected List<TableName> toList(String... args){
275     List<TableName> ret = new ArrayList<>();
276     for(int i=0; i < args.length; i++){
277       ret.add(TableName.valueOf(args[i]));
278     }
279     return ret;
280   }
281     
282   protected void dumpBackupDir() throws IOException
283   {
284     // Dump Backup Dir
285     FileSystem fs = FileSystem.get(conf1);
286     RemoteIterator<LocatedFileStatus> it = fs.listFiles( new Path(BACKUP_ROOT_DIR), true);
287     while(it.hasNext()){
288       LOG.debug("DDEBUG: "+it.next().getPath());
289     }
290 
291   }
292 }