Spark Plan 之 SQLMetric
SQLMetric
Spark Plan 包含以下基本 方法,
/**
* @return All metrics containing metrics of this SparkPlan.
*/
def metrics: Map[String, SQLMetric] = Map.empty
/**
* @return [[SQLMetric]] for the `name`.
*/
def longMetric(name: String): SQLMetric = metrics(name)
BytesToBytesMap
public final class BytesToBytesMap extends MemoryConsumer {
/**
* Returns the average number of probes per key lookup.
*/
public double getAvgHashProbesPerKey() {
return (1.0 * numProbes) / numKeyLookups;
}
/**
* Looks up a key, and saves the result in provided `loc`.
*
* This is a thread-safe version of `lookup`, could be used by multiple threads.
*/
public void safeLookup(Object keyBase, long keyOffset, int keyLength, Location loc, int hash) {
assert(longArray != null);
numKeyLookups++;
int pos = hash & mask;
int step = 1;
while (true) {
numProbes++;
if (longArray.get(pos * 2) == 0) {
// This is a new key.
loc.with(pos, hash, false);
return;
} else {
long stored = longArray.get(pos * 2 + 1);
if ((int) (stored) == hash) {
// Full hash code matches. Let's compare the keys for equality.
loc.with(pos, hash, true);
if (loc.getKeyLength() == keyLength) {
final boolean areEqual = ByteArrayMethods.arrayEquals(
keyBase,
keyOffset,
loc.getKeyBase(),
loc.getKeyOffset(),
keyLength
);
if (areEqual) {
return;
}
}
}
}
pos = (pos + step) & mask;
step++;
}
}
这里, hashprobe 本质上就是一个 hash lookup 的过程