Skip to content

Commit b85cfc4

Browse files
committed
Addressed more comments
1 parent c386186 commit b85cfc4

File tree

3 files changed

+19
-19
lines changed

3 files changed

+19
-19
lines changed

docs/configuration.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1397,7 +1397,7 @@ Apart from these, the following properties are also available, and may be useful
13971397
</tr>
13981398
<tr>
13991399
<td><code>spark.scheduler.minRegisteredResourcesRatio</code></td>
1400-
<td>2.3.0 for KUBERNETES mode; 0.8 for YARN mode; 0.0 for standalone mode and Mesos coarse-grained mode</td>
1400+
<td>0.8 for KUBERNETES mode; 0.8 for YARN mode; 0.0 for standalone mode and Mesos coarse-grained mode</td>
14011401
<td>
14021402
The minimum ratio of registered resources (registered resources / total expected resources)
14031403
(resources are executors in yarn mode and Kubernetes mode, CPU cores in standalone mode and Mesos coarsed-grained

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -24,16 +24,16 @@ private[spark] object Config extends Logging {
2424

2525
val KUBERNETES_NAMESPACE =
2626
ConfigBuilder("spark.kubernetes.namespace")
27-
.doc("The namespace that will be used for running the driver and executor pods. When using" +
28-
" spark-submit in cluster mode, this can also be passed to spark-submit via the" +
29-
" --kubernetes-namespace command line argument.")
27+
.doc("The namespace that will be used for running the driver and executor pods. When using " +
28+
"spark-submit in cluster mode, this can also be passed to spark-submit via the " +
29+
"--kubernetes-namespace command line argument.")
3030
.stringConf
3131
.createWithDefault("default")
3232

3333
val EXECUTOR_DOCKER_IMAGE =
3434
ConfigBuilder("spark.kubernetes.executor.docker.image")
35-
.doc("Docker image to use for the executors. Specify this using the standard Docker tag" +
36-
" format.")
35+
.doc("Docker image to use for the executors. Specify this using the standard Docker tag " +
36+
"format.")
3737
.stringConf
3838
.createOptional
3939

@@ -56,10 +56,10 @@ private[spark] object Config extends Logging {
5656

5757
val KUBERNETES_SERVICE_ACCOUNT_NAME =
5858
ConfigBuilder(s"$APISERVER_AUTH_DRIVER_CONF_PREFIX.serviceAccountName")
59-
.doc("Service account that is used when running the driver pod. The driver pod uses" +
60-
" this service account when requesting executor pods from the API server. If specific" +
61-
" credentials are given for the driver pod to use, the driver will favor" +
62-
" using those credentials instead.")
59+
.doc("Service account that is used when running the driver pod. The driver pod uses " +
60+
"this service account when requesting executor pods from the API server. If specific " +
61+
"credentials are given for the driver pod to use, the driver will favor " +
62+
"using those credentials instead.")
6363
.stringConf
6464
.createOptional
6565

@@ -68,9 +68,9 @@ private[spark] object Config extends Logging {
6868
// based on the executor memory.
6969
val KUBERNETES_EXECUTOR_MEMORY_OVERHEAD =
7070
ConfigBuilder("spark.kubernetes.executor.memoryOverhead")
71-
.doc("The amount of off-heap memory (in megabytes) to be allocated per executor. This" +
72-
" is memory that accounts for things like VM overheads, interned strings, other native" +
73-
" overheads, etc. This tends to grow with the executor size. (typically 6-10%).")
71+
.doc("The amount of off-heap memory (in megabytes) to be allocated per executor. This " +
72+
"is memory that accounts for things like VM overheads, interned strings, other native " +
73+
"overheads, etc. This tends to grow with the executor size. (typically 6-10%).")
7474
.bytesConf(ByteUnit.MiB)
7575
.createOptional
7676

@@ -117,7 +117,7 @@ private[spark] object Config extends Logging {
117117
.intConf
118118
.checkValue(value => value > 0, "Maximum attempts of checks of executor lost reason " +
119119
"must be a positive integer")
120-
.createWithDefault(5)
120+
.createWithDefault(10)
121121

122122
val KUBERNETES_NODE_SELECTOR_PREFIX = "spark.kubernetes.node.selector."
123123
}

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -344,9 +344,9 @@ private[spark] class KubernetesClusterSchedulerBackend(
344344
podsWithKnownExitReasons.put(pod.getMetadata.getName, executorExitReason)
345345

346346
if (!disconnectedPodsByExecutorIdPendingRemoval.containsKey(executorId)) {
347-
log.warn(s"Executor with id $executorId was not marked as disconnected, but the" +
348-
s" watch received an event of type $action for this executor. The executor may" +
349-
" have failed to start in the first place and never registered with the driver.")
347+
log.warn(s"Executor with id $executorId was not marked as disconnected, but the " +
348+
s"watch received an event of type $action for this executor. The executor may " +
349+
"have failed to start in the first place and never registered with the driver.")
350350
}
351351
disconnectedPodsByExecutorIdPendingRemoval.put(executorId, pod)
352352

@@ -388,8 +388,8 @@ private[spark] class KubernetesClusterSchedulerBackend(
388388
// container was probably actively killed by the driver.
389389
if (isPodAlreadyReleased(pod)) {
390390
ExecutorExited(containerExitStatus, exitCausedByApp = false,
391-
s"Container in pod ${pod.getMetadata.getName} exited from explicit termination" +
392-
" request.")
391+
s"Container in pod ${pod.getMetadata.getName} exited from explicit termination " +
392+
"request.")
393393
} else {
394394
val containerExitReason = s"Pod ${pod.getMetadata.getName}'s executor container " +
395395
s"exited with exit status code $containerExitStatus."

0 commit comments

Comments
 (0)