View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.client;
21  
22  import java.io.IOException;
23  import java.util.ArrayList;
24  import java.util.Arrays;
25  import java.util.HashMap;
26  import java.util.List;
27  import java.util.Map;
28  import java.util.NavigableSet;
29  import java.util.TreeMap;
30  import java.util.TreeSet;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.hbase.classification.InterfaceAudience;
35  import org.apache.hadoop.hbase.classification.InterfaceStability;
36  import org.apache.hadoop.hbase.HConstants;
37  import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
38  import org.apache.hadoop.hbase.filter.Filter;
39  import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
40  import org.apache.hadoop.hbase.io.TimeRange;
41  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
42  import org.apache.hadoop.hbase.security.access.Permission;
43  import org.apache.hadoop.hbase.security.visibility.Authorizations;
44  import org.apache.hadoop.hbase.util.Bytes;
45  
46  /**
47   * Used to perform Scan operations.
48   * <p>
49   * All operations are identical to {@link Get} with the exception of
50   * instantiation.  Rather than specifying a single row, an optional startRow
51   * and stopRow may be defined.  If rows are not specified, the Scanner will
52   * iterate over all rows.
53   * <p>
54   * To scan everything for each row, instantiate a Scan object.
55   * <p>
56   * To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}.
57   * If caching is NOT set, we will use the caching value of the hosting {@link Table}.
58   * In addition to row caching, it is possible to specify a
59   * maximum result size, using {@link #setMaxResultSize(long)}. When both are used,
60   * single server requests are limited by either number of rows or maximum result size, whichever
61   * limit comes first.
62   * <p>
63   * To further define the scope of what to get when scanning, perform additional
64   * methods as outlined below.
65   * <p>
66   * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily}
67   * for each family to retrieve.
68   * <p>
69   * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn}
70   * for each column to retrieve.
71   * <p>
72   * To only retrieve columns within a specific range of version timestamps,
73   * execute {@link #setTimeRange(long, long) setTimeRange}.
74   * <p>
75   * To only retrieve columns with a specific timestamp, execute
76   * {@link #setTimeStamp(long) setTimestamp}.
77   * <p>
78   * To limit the number of versions of each column to be returned, execute
79   * {@link #setMaxVersions(int) setMaxVersions}.
80   * <p>
81   * To limit the maximum number of values returned for each call to next(),
82   * execute {@link #setBatch(int) setBatch}.
83   * <p>
84   * To add a filter, execute {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}.
85   * <p>
86   * Expert: To explicitly disable server-side block caching for this scan,
87   * execute {@link #setCacheBlocks(boolean)}.
88   * <p><em>Note:</em> Usage alters Scan instances. Internally, attributes are updated as the Scan
89   * runs and if enabled, metrics accumulate in the Scan instance. Be aware this is the case when
90   * you go to clone a Scan instance or if you go to reuse a created Scan instance; safer is create
91   * a Scan instance per usage.
92   */
93  @InterfaceAudience.Public
94  @InterfaceStability.Stable
95  public class Scan extends Query {
96    private static final Log LOG = LogFactory.getLog(Scan.class);
97  
98    private static final String RAW_ATTR = "_raw_";
99  
100   private byte [] startRow = HConstants.EMPTY_START_ROW;
101   private byte [] stopRow  = HConstants.EMPTY_END_ROW;
102   private int maxVersions = 1;
103   private int batch = -1;
104 
105   /**
106    * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}.
107    * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the
108    * cells in the row exceeded max result size on the server. Typically partial results will be
109    * combined client side into complete results before being delivered to the caller. However, if
110    * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e.
111    * they understand that the results returned from the Scanner may only represent part of a
112    * particular row). In such a case, any attempt to combine the partials into a complete result on
113    * the client side will be skipped, and the caller will be able to see the exact results returned
114    * from the server.
115    */
116   private boolean allowPartialResults = false;
117 
118   private int storeLimit = -1;
119   private int storeOffset = 0;
120   private boolean getScan;
121 
122   /**
123    * @deprecated since 1.0.0. Use {@link #setScanMetricsEnabled(boolean)}
124    */
125   // Make private or remove.
126   @Deprecated
127   static public final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
128 
129   /**
130    * Use {@link #getScanMetrics()}
131    */
132   // Make this private or remove.
133   @Deprecated
134   static public final String SCAN_ATTRIBUTES_METRICS_DATA = "scan.attributes.metrics.data";
135 
136   // If an application wants to use multiple scans over different tables each scan must
137   // define this attribute with the appropriate table name by calling
138   // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName))
139   static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name";
140 
141   /**
142    * @deprecated without replacement
143    *             This is now a no-op, SEEKs and SKIPs are optimizated automatically.
144    *             Will be removed in 2.0+
145    */
146   @Deprecated
147   public static final String HINT_LOOKAHEAD = "_look_ahead_";
148 
149   /*
150    * -1 means no caching
151    */
152   private int caching = -1;
153   private long maxResultSize = -1;
154   private boolean cacheBlocks = true;
155   private boolean reversed = false;
156   private TimeRange tr = new TimeRange();
157   private Map<byte [], NavigableSet<byte []>> familyMap =
158     new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
159   private Boolean loadColumnFamiliesOnDemand = null;
160 
161   /**
162    * Set it true for small scan to get better performance
163    *
164    * Small scan should use pread and big scan can use seek + read
165    *
166    * seek + read is fast but can cause two problem (1) resource contention (2)
167    * cause too much network io
168    *
169    * [89-fb] Using pread for non-compaction read request
170    * https://issues.apache.org/jira/browse/HBASE-7266
171    *
172    * On the other hand, if setting it true, we would do
173    * openScanner,next,closeScanner in one RPC call. It means the better
174    * performance for small scan. [HBASE-9488].
175    *
176    * Generally, if the scan range is within one data block(64KB), it could be
177    * considered as a small scan.
178    */
179   private boolean small = false;
180 
181   /**
182    * Create a Scan operation across all rows.
183    */
184   public Scan() {}
185 
186   public Scan(byte [] startRow, Filter filter) {
187     this(startRow);
188     this.filter = filter;
189   }
190 
191   /**
192    * Create a Scan operation starting at the specified row.
193    * <p>
194    * If the specified row does not exist, the Scanner will start from the
195    * next closest row after the specified row.
196    * @param startRow row to start scanner at or after
197    */
198   public Scan(byte [] startRow) {
199     this.startRow = startRow;
200   }
201 
202   /**
203    * Create a Scan operation for the range of rows specified.
204    * @param startRow row to start scanner at or after (inclusive)
205    * @param stopRow row to stop scanner before (exclusive)
206    */
207   public Scan(byte [] startRow, byte [] stopRow) {
208     this.startRow = startRow;
209     this.stopRow = stopRow;
210     //if the startRow and stopRow both are empty, it is not a Get
211     this.getScan = isStartRowAndEqualsStopRow();
212   }
213 
214   /**
215    * Creates a new instance of this class while copying all values.
216    *
217    * @param scan  The scan instance to copy from.
218    * @throws IOException When copying the values fails.
219    */
220   public Scan(Scan scan) throws IOException {
221     startRow = scan.getStartRow();
222     stopRow  = scan.getStopRow();
223     maxVersions = scan.getMaxVersions();
224     batch = scan.getBatch();
225     storeLimit = scan.getMaxResultsPerColumnFamily();
226     storeOffset = scan.getRowOffsetPerColumnFamily();
227     caching = scan.getCaching();
228     maxResultSize = scan.getMaxResultSize();
229     cacheBlocks = scan.getCacheBlocks();
230     getScan = scan.isGetScan();
231     filter = scan.getFilter(); // clone?
232     loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
233     consistency = scan.getConsistency();
234     reversed = scan.isReversed();
235     small = scan.isSmall();
236     TimeRange ctr = scan.getTimeRange();
237     tr = new TimeRange(ctr.getMin(), ctr.getMax());
238     Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
239     for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
240       byte [] fam = entry.getKey();
241       NavigableSet<byte[]> cols = entry.getValue();
242       if (cols != null && cols.size() > 0) {
243         for (byte[] col : cols) {
244           addColumn(fam, col);
245         }
246       } else {
247         addFamily(fam);
248       }
249     }
250     for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
251       setAttribute(attr.getKey(), attr.getValue());
252     }
253   }
254 
255   /**
256    * Builds a scan object with the same specs as get.
257    * @param get get to model scan after
258    */
259   public Scan(Get get) {
260     this.startRow = get.getRow();
261     this.stopRow = get.getRow();
262     this.filter = get.getFilter();
263     this.cacheBlocks = get.getCacheBlocks();
264     this.maxVersions = get.getMaxVersions();
265     this.storeLimit = get.getMaxResultsPerColumnFamily();
266     this.storeOffset = get.getRowOffsetPerColumnFamily();
267     this.tr = get.getTimeRange();
268     this.familyMap = get.getFamilyMap();
269     this.getScan = true;
270     this.consistency = get.getConsistency();
271     this.setIsolationLevel(get.getIsolationLevel());
272     for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
273       setAttribute(attr.getKey(), attr.getValue());
274     }
275   }
276 
277   public boolean isGetScan() {
278     return this.getScan || isStartRowAndEqualsStopRow();
279   }
280 
281   private boolean isStartRowAndEqualsStopRow() {
282     return this.startRow != null && this.startRow.length > 0 &&
283         Bytes.equals(this.startRow, this.stopRow);
284   }
285   /**
286    * Get all columns from the specified family.
287    * <p>
288    * Overrides previous calls to addColumn for this family.
289    * @param family family name
290    * @return this
291    */
292   public Scan addFamily(byte [] family) {
293     familyMap.remove(family);
294     familyMap.put(family, null);
295     return this;
296   }
297 
298   /**
299    * Get the column from the specified family with the specified qualifier.
300    * <p>
301    * Overrides previous calls to addFamily for this family.
302    * @param family family name
303    * @param qualifier column qualifier
304    * @return this
305    */
306   public Scan addColumn(byte [] family, byte [] qualifier) {
307     NavigableSet<byte []> set = familyMap.get(family);
308     if(set == null) {
309       set = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
310     }
311     if (qualifier == null) {
312       qualifier = HConstants.EMPTY_BYTE_ARRAY;
313     }
314     set.add(qualifier);
315     familyMap.put(family, set);
316     return this;
317   }
318 
319   /**
320    * Get versions of columns only within the specified timestamp range,
321    * [minStamp, maxStamp).  Note, default maximum versions to return is 1.  If
322    * your time range spans more than one version and you want all versions
323    * returned, up the number of versions beyond the default.
324    * @param minStamp minimum timestamp value, inclusive
325    * @param maxStamp maximum timestamp value, exclusive
326    * @throws IOException if invalid time range
327    * @see #setMaxVersions()
328    * @see #setMaxVersions(int)
329    * @return this
330    */
331   public Scan setTimeRange(long minStamp, long maxStamp)
332   throws IOException {
333     tr = new TimeRange(minStamp, maxStamp);
334     return this;
335   }
336 
337   /**
338    * Get versions of columns with the specified timestamp. Note, default maximum
339    * versions to return is 1.  If your time range spans more than one version
340    * and you want all versions returned, up the number of versions beyond the
341    * defaut.
342    * @param timestamp version timestamp
343    * @see #setMaxVersions()
344    * @see #setMaxVersions(int)
345    * @return this
346    */
347   public Scan setTimeStamp(long timestamp)
348   throws IOException {
349     try {
350       tr = new TimeRange(timestamp, timestamp+1);
351     } catch(IOException e) {
352       // This should never happen, unless integer overflow or something extremely wrong...
353       LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
354       throw e;
355     }
356     return this;
357   }
358 
359   /**
360    * Set the start row of the scan.
361    * @param startRow row to start scan on (inclusive)
362    * Note: In order to make startRow exclusive add a trailing 0 byte
363    * @return this
364    */
365   public Scan setStartRow(byte [] startRow) {
366     this.startRow = startRow;
367     return this;
368   }
369 
370   /**
371    * Set the stop row.
372    * @param stopRow row to end at (exclusive)
373    * <p><b>Note:</b> In order to make stopRow inclusive add a trailing 0 byte</p>
374    * <p><b>Note:</b> When doing a filter for a rowKey <u>Prefix</u>
375    * use {@link #setRowPrefixFilter(byte[])}.
376    * The 'trailing 0' will not yield the desired result.</p>
377    * @return this
378    */
379   public Scan setStopRow(byte [] stopRow) {
380     this.stopRow = stopRow;
381     return this;
382   }
383 
384   /**
385    * <p>Set a filter (using stopRow and startRow) so the result set only contains rows where the
386    * rowKey starts with the specified prefix.</p>
387    * <p>This is a utility method that converts the desired rowPrefix into the appropriate values
388    * for the startRow and stopRow to achieve the desired result.</p>
389    * <p>This can safely be used in combination with setFilter.</p>
390    * <p><b>NOTE: Doing a {@link #setStartRow(byte[])} and/or {@link #setStopRow(byte[])}
391    * after this method will yield undefined results.</b></p>
392    * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
393    * @return this
394    */
395   public Scan setRowPrefixFilter(byte[] rowPrefix) {
396     if (rowPrefix == null) {
397       setStartRow(HConstants.EMPTY_START_ROW);
398       setStopRow(HConstants.EMPTY_END_ROW);
399     } else {
400       this.setStartRow(rowPrefix);
401       this.setStopRow(calculateTheClosestNextRowKeyForPrefix(rowPrefix));
402     }
403     return this;
404   }
405 
406   /**
407    * <p>When scanning for a prefix the scan should stop immediately after the the last row that
408    * has the specified prefix. This method calculates the closest next rowKey immediately following
409    * the given rowKeyPrefix.</p>
410    * <p><b>IMPORTANT: This converts a rowKey<u>Prefix</u> into a rowKey</b>.</p>
411    * <p>If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can
412    * simply increment the last byte of the array.
413    * But if your application uses real binary rowids you may run into the scenario that your
414    * prefix is something like:</p>
415    * &nbsp;&nbsp;&nbsp;<b>{ 0x12, 0x23, 0xFF, 0xFF }</b><br/>
416    * Then this stopRow needs to be fed into the actual scan<br/>
417    * &nbsp;&nbsp;&nbsp;<b>{ 0x12, 0x24 }</b> (Notice that it is shorter now)<br/>
418    * This method calculates the correct stop row value for this usecase.
419    *
420    * @param rowKeyPrefix the rowKey<u>Prefix</u>.
421    * @return the closest next rowKey immediately following the given rowKeyPrefix.
422    */
423   private byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) {
424     // Essentially we are treating it like an 'unsigned very very long' and doing +1 manually.
425     // Search for the place where the trailing 0xFFs start
426     int offset = rowKeyPrefix.length;
427     while (offset > 0) {
428       if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
429         break;
430       }
431       offset--;
432     }
433 
434     if (offset == 0) {
435       // We got an 0xFFFF... (only FFs) stopRow value which is
436       // the last possible prefix before the end of the table.
437       // So set it to stop at the 'end of the table'
438       return HConstants.EMPTY_END_ROW;
439     }
440 
441     // Copy the right length of the original
442     byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
443     // And increment the last one
444     newStopRow[newStopRow.length - 1]++;
445     return newStopRow;
446   }
447 
448   /**
449    * Get all available versions.
450    * @return this
451    */
452   public Scan setMaxVersions() {
453     this.maxVersions = Integer.MAX_VALUE;
454     return this;
455   }
456 
457   /**
458    * Get up to the specified number of versions of each column.
459    * @param maxVersions maximum versions for each column
460    * @return this
461    */
462   public Scan setMaxVersions(int maxVersions) {
463     this.maxVersions = maxVersions;
464     return this;
465   }
466 
467   /**
468    * Set the maximum number of values to return for each call to next()
469    * @param batch the maximum number of values
470    */
471   public Scan setBatch(int batch) {
472     if (this.hasFilter() && this.filter.hasFilterRow()) {
473       throw new IncompatibleFilterException(
474         "Cannot set batch on a scan using a filter" +
475         " that returns true for filter.hasFilterRow");
476     }
477     this.batch = batch;
478     return this;
479   }
480 
481   /**
482    * Set the maximum number of values to return per row per Column Family
483    * @param limit the maximum number of values returned / row / CF
484    */
485   public Scan setMaxResultsPerColumnFamily(int limit) {
486     this.storeLimit = limit;
487     return this;
488   }
489 
490   /**
491    * Set offset for the row per Column Family.
492    * @param offset is the number of kvs that will be skipped.
493    */
494   public Scan setRowOffsetPerColumnFamily(int offset) {
495     this.storeOffset = offset;
496     return this;
497   }
498 
499   /**
500    * Set the number of rows for caching that will be passed to scanners.
501    * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
502    * apply.
503    * Higher caching values will enable faster scanners but will use more memory.
504    * @param caching the number of rows for caching
505    */
506   public Scan setCaching(int caching) {
507     this.caching = caching;
508     return this;
509   }
510 
511   /**
512    * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)}
513    */
514   public long getMaxResultSize() {
515     return maxResultSize;
516   }
517 
518   /**
519    * Set the maximum result size. The default is -1; this means that no specific
520    * maximum result size will be set for this scan, and the global configured
521    * value will be used instead. (Defaults to unlimited).
522    *
523    * @param maxResultSize The maximum result size in bytes.
524    */
525   public Scan setMaxResultSize(long maxResultSize) {
526     this.maxResultSize = maxResultSize;
527     return this;
528   }
529 
530   @Override
531   public Scan setFilter(Filter filter) {
532     super.setFilter(filter);
533     return this;
534   }
535 
536   /**
537    * Setting the familyMap
538    * @param familyMap map of family to qualifier
539    * @return this
540    */
541   public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) {
542     this.familyMap = familyMap;
543     return this;
544   }
545 
546   /**
547    * Getting the familyMap
548    * @return familyMap
549    */
550   public Map<byte [], NavigableSet<byte []>> getFamilyMap() {
551     return this.familyMap;
552   }
553 
554   /**
555    * @return the number of families in familyMap
556    */
557   public int numFamilies() {
558     if(hasFamilies()) {
559       return this.familyMap.size();
560     }
561     return 0;
562   }
563 
564   /**
565    * @return true if familyMap is non empty, false otherwise
566    */
567   public boolean hasFamilies() {
568     return !this.familyMap.isEmpty();
569   }
570 
571   /**
572    * @return the keys of the familyMap
573    */
574   public byte[][] getFamilies() {
575     if(hasFamilies()) {
576       return this.familyMap.keySet().toArray(new byte[0][0]);
577     }
578     return null;
579   }
580 
581   /**
582    * @return the startrow
583    */
584   public byte [] getStartRow() {
585     return this.startRow;
586   }
587 
588   /**
589    * @return the stoprow
590    */
591   public byte [] getStopRow() {
592     return this.stopRow;
593   }
594 
595   /**
596    * @return the max number of versions to fetch
597    */
598   public int getMaxVersions() {
599     return this.maxVersions;
600   }
601 
602   /**
603    * @return maximum number of values to return for a single call to next()
604    */
605   public int getBatch() {
606     return this.batch;
607   }
608 
609   /**
610    * @return maximum number of values to return per row per CF
611    */
612   public int getMaxResultsPerColumnFamily() {
613     return this.storeLimit;
614   }
615 
616   /**
617    * Method for retrieving the scan's offset per row per column
618    * family (#kvs to be skipped)
619    * @return row offset
620    */
621   public int getRowOffsetPerColumnFamily() {
622     return this.storeOffset;
623   }
624 
625   /**
626    * @return caching the number of rows fetched when calling next on a scanner
627    */
628   public int getCaching() {
629     return this.caching;
630   }
631 
632   /**
633    * @return TimeRange
634    */
635   public TimeRange getTimeRange() {
636     return this.tr;
637   }
638 
639   /**
640    * @return RowFilter
641    */
642   @Override
643   public Filter getFilter() {
644     return filter;
645   }
646 
647   /**
648    * @return true is a filter has been specified, false if not
649    */
650   public boolean hasFilter() {
651     return filter != null;
652   }
653 
654   /**
655    * Set whether blocks should be cached for this Scan.
656    * <p>
657    * This is true by default.  When true, default settings of the table and
658    * family are used (this will never override caching blocks if the block
659    * cache is disabled for that family or entirely).
660    *
661    * @param cacheBlocks if false, default settings are overridden and blocks
662    * will not be cached
663    */
664   public Scan setCacheBlocks(boolean cacheBlocks) {
665     this.cacheBlocks = cacheBlocks;
666     return this;
667   }
668 
669   /**
670    * Get whether blocks should be cached for this Scan.
671    * @return true if default caching should be used, false if blocks should not
672    * be cached
673    */
674   public boolean getCacheBlocks() {
675     return cacheBlocks;
676   }
677 
678   /**
679    * Set whether this scan is a reversed one
680    * <p>
681    * This is false by default which means forward(normal) scan.
682    *
683    * @param reversed if true, scan will be backward order
684    * @return this
685    */
686   public Scan setReversed(boolean reversed) {
687     this.reversed = reversed;
688     return this;
689   }
690 
691   /**
692    * Get whether this scan is a reversed one.
693    * @return true if backward scan, false if forward(default) scan
694    */
695   public boolean isReversed() {
696     return reversed;
697   }
698 
699   /**
700    * Setting whether the caller wants to see the partial results that may be returned from the
701    * server. By default this value is false and the complete results will be assembled client side
702    * before being delivered to the caller.
703    * @param allowPartialResults
704    * @return this
705    */
706   public Scan setAllowPartialResults(final boolean allowPartialResults) {
707     this.allowPartialResults = allowPartialResults;
708     return this;
709   }
710 
711   /**
712    * @return true when the constructor of this scan understands that the results they will see may
713    *         only represent a partial portion of a row. The entire row would be retrieved by
714    *         subsequent calls to {@link ResultScanner#next()}
715    */
716   public boolean getAllowPartialResults() {
717     return allowPartialResults;
718   }
719 
720   /**
721    * Set the value indicating whether loading CFs on demand should be allowed (cluster
722    * default is false). On-demand CF loading doesn't load column families until necessary, e.g.
723    * if you filter on one column, the other column family data will be loaded only for the rows
724    * that are included in result, not all rows like in normal case.
725    * With column-specific filters, like SingleColumnValueFilter w/filterIfMissing == true,
726    * this can deliver huge perf gains when there's a cf with lots of data; however, it can
727    * also lead to some inconsistent results, as follows:
728    * - if someone does a concurrent update to both column families in question you may get a row
729    *   that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } }
730    *   someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan
731    *   filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 },
732    *   { video => "my dog" } }.
733    * - if there's a concurrent split and you have more than 2 column families, some rows may be
734    *   missing some column families.
735    */
736   public Scan setLoadColumnFamiliesOnDemand(boolean value) {
737     this.loadColumnFamiliesOnDemand = value;
738     return this;
739   }
740 
741   /**
742    * Get the raw loadColumnFamiliesOnDemand setting; if it's not set, can be null.
743    */
744   public Boolean getLoadColumnFamiliesOnDemandValue() {
745     return this.loadColumnFamiliesOnDemand;
746   }
747 
748   /**
749    * Get the logical value indicating whether on-demand CF loading should be allowed.
750    */
751   public boolean doLoadColumnFamiliesOnDemand() {
752     return (this.loadColumnFamiliesOnDemand != null)
753       && this.loadColumnFamiliesOnDemand.booleanValue();
754   }
755 
756   /**
757    * Compile the table and column family (i.e. schema) information
758    * into a String. Useful for parsing and aggregation by debugging,
759    * logging, and administration tools.
760    * @return Map
761    */
762   @Override
763   public Map<String, Object> getFingerprint() {
764     Map<String, Object> map = new HashMap<String, Object>();
765     List<String> families = new ArrayList<String>();
766     if(this.familyMap.size() == 0) {
767       map.put("families", "ALL");
768       return map;
769     } else {
770       map.put("families", families);
771     }
772     for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
773         this.familyMap.entrySet()) {
774       families.add(Bytes.toStringBinary(entry.getKey()));
775     }
776     return map;
777   }
778 
779   /**
780    * Compile the details beyond the scope of getFingerprint (row, columns,
781    * timestamps, etc.) into a Map along with the fingerprinted information.
782    * Useful for debugging, logging, and administration tools.
783    * @param maxCols a limit on the number of columns output prior to truncation
784    * @return Map
785    */
786   @Override
787   public Map<String, Object> toMap(int maxCols) {
788     // start with the fingerpring map and build on top of it
789     Map<String, Object> map = getFingerprint();
790     // map from families to column list replaces fingerprint's list of families
791     Map<String, List<String>> familyColumns =
792       new HashMap<String, List<String>>();
793     map.put("families", familyColumns);
794     // add scalar information first
795     map.put("startRow", Bytes.toStringBinary(this.startRow));
796     map.put("stopRow", Bytes.toStringBinary(this.stopRow));
797     map.put("maxVersions", this.maxVersions);
798     map.put("batch", this.batch);
799     map.put("caching", this.caching);
800     map.put("maxResultSize", this.maxResultSize);
801     map.put("cacheBlocks", this.cacheBlocks);
802     map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
803     List<Long> timeRange = new ArrayList<Long>();
804     timeRange.add(this.tr.getMin());
805     timeRange.add(this.tr.getMax());
806     map.put("timeRange", timeRange);
807     int colCount = 0;
808     // iterate through affected families and list out up to maxCols columns
809     for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
810       this.familyMap.entrySet()) {
811       List<String> columns = new ArrayList<String>();
812       familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
813       if(entry.getValue() == null) {
814         colCount++;
815         --maxCols;
816         columns.add("ALL");
817       } else {
818         colCount += entry.getValue().size();
819         if (maxCols <= 0) {
820           continue;
821         }
822         for (byte [] column : entry.getValue()) {
823           if (--maxCols <= 0) {
824             continue;
825           }
826           columns.add(Bytes.toStringBinary(column));
827         }
828       }
829     }
830     map.put("totalColumns", colCount);
831     if (this.filter != null) {
832       map.put("filter", this.filter.toString());
833     }
834     // add the id if set
835     if (getId() != null) {
836       map.put("id", getId());
837     }
838     return map;
839   }
840 
841   /**
842    * Enable/disable "raw" mode for this scan.
843    * If "raw" is enabled the scan will return all
844    * delete marker and deleted rows that have not
845    * been collected, yet.
846    * This is mostly useful for Scan on column families
847    * that have KEEP_DELETED_ROWS enabled.
848    * It is an error to specify any column when "raw" is set.
849    * @param raw True/False to enable/disable "raw" mode.
850    */
851   public Scan setRaw(boolean raw) {
852     setAttribute(RAW_ATTR, Bytes.toBytes(raw));
853     return this;
854   }
855 
856   /**
857    * @return True if this Scan is in "raw" mode.
858    */
859   public boolean isRaw() {
860     byte[] attr = getAttribute(RAW_ATTR);
861     return attr == null ? false : Bytes.toBoolean(attr);
862   }
863 
864 
865 
866   /**
867    * Set whether this scan is a small scan
868    * <p>
869    * Small scan should use pread and big scan can use seek + read
870    *
871    * seek + read is fast but can cause two problem (1) resource contention (2)
872    * cause too much network io
873    *
874    * [89-fb] Using pread for non-compaction read request
875    * https://issues.apache.org/jira/browse/HBASE-7266
876    *
877    * On the other hand, if setting it true, we would do
878    * openScanner,next,closeScanner in one RPC call. It means the better
879    * performance for small scan. [HBASE-9488].
880    *
881    * Generally, if the scan range is within one data block(64KB), it could be
882    * considered as a small scan.
883    *
884    * @param small
885    */
886   public Scan setSmall(boolean small) {
887     this.small = small;
888     return this;
889   }
890 
891   /**
892    * Get whether this scan is a small scan
893    * @return true if small scan
894    */
895   public boolean isSmall() {
896     return small;
897   }
898 
899   @Override
900   public Scan setAttribute(String name, byte[] value) {
901     return (Scan) super.setAttribute(name, value);
902   }
903 
904   @Override
905   public Scan setId(String id) {
906     return (Scan) super.setId(id);
907   }
908 
909   @Override
910   public Scan setAuthorizations(Authorizations authorizations) {
911     return (Scan) super.setAuthorizations(authorizations);
912   }
913 
914   @Override
915   public Scan setACL(Map<String, Permission> perms) {
916     return (Scan) super.setACL(perms);
917   }
918 
919   @Override
920   public Scan setACL(String user, Permission perms) {
921     return (Scan) super.setACL(user, perms);
922   }
923 
924   @Override
925   public Scan setConsistency(Consistency consistency) {
926     return (Scan) super.setConsistency(consistency);
927   }
928 
929   @Override
930   public Scan setReplicaId(int Id) {
931     return (Scan) super.setReplicaId(Id);
932   }
933 
934   @Override
935   public Scan setIsolationLevel(IsolationLevel level) {
936     return (Scan) super.setIsolationLevel(level);
937   }
938 
939   /**
940    * Utility that creates a Scan that will do a  small scan in reverse from passed row
941    * looking for next closest row.
942    * @param row
943    * @param family
944    * @return An instance of Scan primed with passed <code>row</code> and <code>family</code> to
945    * scan in reverse for one row only.
946    */
947   static Scan createGetClosestRowOrBeforeReverseScan(byte[] row) {
948     // Below does not work if you add in family; need to add the family qualifier that is highest
949     // possible family qualifier.  Do we have such a notion?  Would have to be magic.
950     Scan scan = new Scan(row);
951     scan.setSmall(true);
952     scan.setReversed(true);
953     scan.setCaching(1);
954     return scan;
955   }
956 
957   /**
958    * Enable collection of {@link ScanMetrics}. For advanced users.
959    * @param enabled Set to true to enable accumulating scan metrics
960    */
961   public Scan setScanMetricsEnabled(final boolean enabled) {
962     setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled)));
963     return this;
964   }
965 
966   /**
967    * @return True if collection of scan metrics is enabled. For advanced users.
968    */
969   public boolean isScanMetricsEnabled() {
970     byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
971     return attr == null ? false : Bytes.toBoolean(attr);
972   }
973 
974   /**
975    * @return Metrics on this Scan, if metrics were enabled.
976    * @see #setScanMetricsEnabled(boolean)
977    */
978   public ScanMetrics getScanMetrics() {
979     byte [] bytes = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
980     if (bytes == null) return null;
981     return ProtobufUtil.toScanMetrics(bytes);
982   }
983 }