Skip to content

Commit

Permalink
[MINOR] Fix typos
Browse files Browse the repository at this point in the history
### What changes were proposed in this pull request?

Fix typo in ReceiverSupervisorImpl.

### Does this PR introduce _any_ user-facing change?

No.

### How was this patch tested?

No need.

Closes apache#39340 from smallzhongfeng/fix-typos.

Authored-by: smallzhongfeng <zhongjingxiong@didiglobal.com>
Signed-off-by: Hyukjin Kwon <gurwls223@apache.org>
  • Loading branch information
smallzhongfeng authored and HyukjinKwon committed Jan 3, 2023
1 parent c0f07d3 commit a09e9dc
Show file tree
Hide file tree
Showing 7 changed files with 9 additions and 9 deletions.
4 changes: 2 additions & 2 deletions core/src/main/scala/org/apache/spark/SparkContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -3173,7 +3173,7 @@ object WritableConverter {

implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = {
() => simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
// getBytes method returns array which is longer than data to be returned
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
Expand Down Expand Up @@ -3204,7 +3204,7 @@ object WritableConverter {

implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = {
simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
// getBytes method returns array which is longer than data to be returned
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -397,7 +397,7 @@ private[evaluation] object SquaredEuclideanSilhouette extends Silhouette {
val clustersStatsMap = SquaredEuclideanSilhouette
.computeClusterStats(dfWithSquaredNorm, predictionCol, featuresCol, weightCol)

// Silhouette is reasonable only when the number of clusters is greater then 1
// Silhouette is reasonable only when the number of clusters is greater than 1
assert(clustersStatsMap.size > 1, "Number of clusters must be greater than one.")

val bClustersStatsMap = dataset.sparkSession.sparkContext.broadcast(clustersStatsMap)
Expand Down Expand Up @@ -604,7 +604,7 @@ private[evaluation] object CosineSilhouette extends Silhouette {
val clustersStatsMap = computeClusterStats(dfWithNormalizedFeatures, featuresCol,
predictionCol, weightCol)

// Silhouette is reasonable only when the number of clusters is greater then 1
// Silhouette is reasonable only when the number of clusters is greater than 1
assert(clustersStatsMap.size > 1, "Number of clusters must be greater than one.")

val bClustersStatsMap = dataset.sparkSession.sparkContext.broadcast(clustersStatsMap)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ object Summarizer extends Logging {
* @return a builder.
* @throws IllegalArgumentException if one of the metric names is not understood.
*
* Note: Currently, the performance of this interface is about 2x~3x slower then using the RDD
* Note: Currently, the performance of this interface is about 2x~3x slower than using the RDD
* interface.
*/
@Since("2.3.0")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ private[yarn] class YarnAllocator(

// The default profile is always present so we need to initialize the datastructures keyed by
// ResourceProfile id to ensure its present if things start running before a request for
// executors could add it. This approach is easier then going and special casing everywhere.
// executors could add it. This approach is easier than going and special casing everywhere.
private def initDefaultProfile(): Unit = synchronized {
allocatedHostToContainersMapPerRPId(DEFAULT_RESOURCE_PROFILE_ID) =
new HashMap[String, mutable.Set[ContainerId]]()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2534,7 +2534,7 @@ class CollectionExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper
Literal.create(Seq(Float.NaN, null, 1f), ArrayType(FloatType))), true)
}

test("SPARK-36740: ArrayMin/ArrayMax/SortArray should handle NaN greater then non-NaN value") {
test("SPARK-36740: ArrayMin/ArrayMax/SortArray should handle NaN greater than non-NaN value") {
// ArrayMin
checkEvaluation(ArrayMin(
Literal.create(Seq(Double.NaN, 1d, 2d), ArrayType(DoubleType))), 1d)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -836,7 +836,7 @@ class HigherOrderFunctionsSuite extends SparkFunSuite with ExpressionEvalHelper
assert(!mapFilter2_1.semanticEquals(mapFilter2_3))
}

test("SPARK-36740: ArraySort should handle NaN greater then non-NaN value") {
test("SPARK-36740: ArraySort should handle NaN greater than non-NaN value") {
checkEvaluation(arraySort(
Literal.create(Seq(Double.NaN, 1d, 2d, null), ArrayType(DoubleType))),
Seq(1d, 2d, Double.NaN, null))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ private[streaming] class ReceiverSupervisorImpl(
private def nextBlockId = StreamBlockId(streamId, newBlockId.getAndIncrement)

private def cleanupOldBlocks(cleanupThreshTime: Time): Unit = {
logDebug(s"Cleaning up blocks older then $cleanupThreshTime")
logDebug(s"Cleaning up blocks older than $cleanupThreshTime")
receivedBlockHandler.cleanupOldBlocks(cleanupThreshTime.milliseconds)
}
}

0 comments on commit a09e9dc

Please sign in to comment.