@@ -251,32 +251,30 @@ func (c *Cluster) NominateNodeForPod(ctx context.Context, providerID string) {
251
251
}
252
252
}
253
253
254
- // TODO remove this when v1alpha5 APIs are deprecated. With v1 APIs Karpenter relies on the existence
255
- // of the karpenter.sh/disruption taint to know when a node is marked for deletion.
256
254
// UnmarkForDeletion removes the marking on the node as a node the controller intends to delete
257
255
func (c * Cluster ) UnmarkForDeletion (providerIDs ... string ) {
258
256
c .mu .Lock ()
259
257
defer c .mu .Unlock ()
260
258
261
259
for _ , id := range providerIDs {
262
260
if n , ok := c .nodes [id ]; ok {
263
- c . updateNodePoolResources ( nil , c . nodes [ id ] )
261
+ oldNode := n . ShallowCopy ( )
264
262
n .markedForDeletion = false
263
+ c .updateNodePoolResources (oldNode , n )
265
264
}
266
265
}
267
266
}
268
267
269
- // TODO remove this when v1alpha5 APIs are deprecated. With v1 APIs Karpenter relies on the existence
270
- // of the karpenter.sh/disruption taint to know when a node is marked for deletion.
271
268
// MarkForDeletion marks the node as pending deletion in the internal cluster state
272
269
func (c * Cluster ) MarkForDeletion (providerIDs ... string ) {
273
270
c .mu .Lock ()
274
271
defer c .mu .Unlock ()
275
272
276
273
for _ , id := range providerIDs {
277
274
if n , ok := c .nodes [id ]; ok {
278
- c . updateNodePoolResources ( c . nodes [ id ], nil )
275
+ oldNode := n . ShallowCopy ( )
279
276
n .markedForDeletion = true
277
+ c .updateNodePoolResources (oldNode , n )
280
278
}
281
279
}
282
280
}
0 commit comments