1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.replication;
20
21 import java.io.IOException;
22 import java.util.List;
23 import java.util.UUID;
24
25 import org.apache.hadoop.conf.Configuration;
26 import org.apache.hadoop.fs.FileSystem;
27 import org.apache.hadoop.fs.Path;
28 import org.apache.hadoop.hbase.Stoppable;
29 import org.apache.hadoop.hbase.TableName;
30 import org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
31 import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface;
32 import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager;
33
34
35
36
37 public class ReplicationSourceDummy implements ReplicationSourceInterface {
38
39 ReplicationSourceManager manager;
40 String peerClusterId;
41 Path currentPath;
42
43 @Override
44 public void init(Configuration conf, FileSystem fs, ReplicationSourceManager manager,
45 ReplicationQueues rq, ReplicationPeers rp, Stoppable stopper, String peerClusterId,
46 UUID clusterId, ReplicationEndpoint replicationEndpoint, MetricsSource metrics)
47 throws IOException {
48
49 this.manager = manager;
50 this.peerClusterId = peerClusterId;
51 }
52
53 @Override
54 public void enqueueLog(Path log) {
55 this.currentPath = log;
56 }
57
58 @Override
59 public Path getCurrentPath() {
60 return this.currentPath;
61 }
62
63 @Override
64 public void startup() {
65
66 }
67
68 @Override
69 public void terminate(String reason) {
70
71 }
72
73 @Override
74 public void terminate(String reason, Exception e) {
75
76 }
77
78 @Override
79 public String getPeerClusterZnode() {
80 return peerClusterId;
81 }
82
83 @Override
84 public String getPeerClusterId() {
85 String[] parts = peerClusterId.split("-", 2);
86 return parts.length != 1 ?
87 parts[0] : peerClusterId;
88 }
89
90 @Override
91 public String getStats() {
92 return "";
93 }
94
95 @Override
96 public void addHFileRefs(TableName tableName, byte[] family, List<String> files)
97 throws ReplicationException {
98 return;
99 }
100 }