@@ -223,7 +223,8 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
223
223
if (vdev -> fw -> sched_mode == VPU_SCHEDULING_MODE_HW ) {
224
224
ret = ivpu_jsm_hws_destroy_cmdq (vdev , file_priv -> ctx .id , cmdq -> id );
225
225
if (!ret )
226
- ivpu_dbg (vdev , JOB , "Command queue %d destroyed\n" , cmdq -> id );
226
+ ivpu_dbg (vdev , JOB , "Command queue %d destroyed, ctx %d\n" ,
227
+ cmdq -> id , file_priv -> ctx .id );
227
228
}
228
229
229
230
ret = ivpu_jsm_unregister_db (vdev , cmdq -> db_id );
@@ -324,6 +325,8 @@ void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
324
325
325
326
if (vdev -> fw -> sched_mode == VPU_SCHEDULING_MODE_OS )
326
327
ivpu_jsm_context_release (vdev , file_priv -> ctx .id );
328
+
329
+ file_priv -> aborted = true;
327
330
}
328
331
329
332
static int ivpu_cmdq_push_job (struct ivpu_cmdq * cmdq , struct ivpu_job * job )
@@ -462,23 +465,23 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *
462
465
{
463
466
struct ivpu_job * job ;
464
467
465
- xa_lock (& vdev -> submitted_jobs_xa );
466
- job = __xa_erase (& vdev -> submitted_jobs_xa , job_id );
468
+ lockdep_assert_held (& vdev -> submitted_jobs_lock );
467
469
470
+ job = xa_erase (& vdev -> submitted_jobs_xa , job_id );
468
471
if (xa_empty (& vdev -> submitted_jobs_xa ) && job ) {
469
472
vdev -> busy_time = ktime_add (ktime_sub (ktime_get (), vdev -> busy_start_ts ),
470
473
vdev -> busy_time );
471
474
}
472
475
473
- xa_unlock (& vdev -> submitted_jobs_xa );
474
-
475
476
return job ;
476
477
}
477
478
478
479
static int ivpu_job_signal_and_destroy (struct ivpu_device * vdev , u32 job_id , u32 job_status )
479
480
{
480
481
struct ivpu_job * job ;
481
482
483
+ lockdep_assert_held (& vdev -> submitted_jobs_lock );
484
+
482
485
job = ivpu_job_remove_from_submitted_jobs (vdev , job_id );
483
486
if (!job )
484
487
return - ENOENT ;
@@ -497,6 +500,10 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
497
500
ivpu_stop_job_timeout_detection (vdev );
498
501
499
502
ivpu_rpm_put (vdev );
503
+
504
+ if (!xa_empty (& vdev -> submitted_jobs_xa ))
505
+ ivpu_start_job_timeout_detection (vdev );
506
+
500
507
return 0 ;
501
508
}
502
509
@@ -505,8 +512,12 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
505
512
struct ivpu_job * job ;
506
513
unsigned long id ;
507
514
515
+ mutex_lock (& vdev -> submitted_jobs_lock );
516
+
508
517
xa_for_each (& vdev -> submitted_jobs_xa , id , job )
509
518
ivpu_job_signal_and_destroy (vdev , id , DRM_IVPU_JOB_STATUS_ABORTED );
519
+
520
+ mutex_unlock (& vdev -> submitted_jobs_lock );
510
521
}
511
522
512
523
static int ivpu_job_submit (struct ivpu_job * job , u8 priority )
@@ -531,15 +542,16 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
531
542
goto err_unlock_file_priv ;
532
543
}
533
544
534
- xa_lock (& vdev -> submitted_jobs_xa );
545
+ mutex_lock (& vdev -> submitted_jobs_lock );
546
+
535
547
is_first_job = xa_empty (& vdev -> submitted_jobs_xa );
536
- ret = __xa_alloc_cyclic (& vdev -> submitted_jobs_xa , & job -> job_id , job , file_priv -> job_limit ,
537
- & file_priv -> job_id_next , GFP_KERNEL );
548
+ ret = xa_alloc_cyclic (& vdev -> submitted_jobs_xa , & job -> job_id , job , file_priv -> job_limit ,
549
+ & file_priv -> job_id_next , GFP_KERNEL );
538
550
if (ret < 0 ) {
539
551
ivpu_dbg (vdev , JOB , "Too many active jobs in ctx %d\n" ,
540
552
file_priv -> ctx .id );
541
553
ret = - EBUSY ;
542
- goto err_unlock_submitted_jobs_xa ;
554
+ goto err_unlock_submitted_jobs ;
543
555
}
544
556
545
557
ret = ivpu_cmdq_push_job (cmdq , job );
@@ -562,19 +574,21 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
562
574
job -> job_id , file_priv -> ctx .id , job -> engine_idx , priority ,
563
575
job -> cmd_buf_vpu_addr , cmdq -> jobq -> header .tail );
564
576
565
- xa_unlock (& vdev -> submitted_jobs_xa );
566
-
577
+ mutex_unlock (& vdev -> submitted_jobs_lock );
567
578
mutex_unlock (& file_priv -> lock );
568
579
569
- if (unlikely (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW ))
580
+ if (unlikely (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW )) {
581
+ mutex_lock (& vdev -> submitted_jobs_lock );
570
582
ivpu_job_signal_and_destroy (vdev , job -> job_id , VPU_JSM_STATUS_SUCCESS );
583
+ mutex_unlock (& vdev -> submitted_jobs_lock );
584
+ }
571
585
572
586
return 0 ;
573
587
574
588
err_erase_xa :
575
- __xa_erase (& vdev -> submitted_jobs_xa , job -> job_id );
576
- err_unlock_submitted_jobs_xa :
577
- xa_unlock (& vdev -> submitted_jobs_xa );
589
+ xa_erase (& vdev -> submitted_jobs_xa , job -> job_id );
590
+ err_unlock_submitted_jobs :
591
+ mutex_unlock (& vdev -> submitted_jobs_lock );
578
592
err_unlock_file_priv :
579
593
mutex_unlock (& file_priv -> lock );
580
594
ivpu_rpm_put (vdev );
@@ -745,7 +759,6 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
745
759
struct vpu_jsm_msg * jsm_msg )
746
760
{
747
761
struct vpu_ipc_msg_payload_job_done * payload ;
748
- int ret ;
749
762
750
763
if (!jsm_msg ) {
751
764
ivpu_err (vdev , "IPC message has no JSM payload\n" );
@@ -758,9 +771,10 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
758
771
}
759
772
760
773
payload = (struct vpu_ipc_msg_payload_job_done * )& jsm_msg -> payload ;
761
- ret = ivpu_job_signal_and_destroy (vdev , payload -> job_id , payload -> job_status );
762
- if (!ret && !xa_empty (& vdev -> submitted_jobs_xa ))
763
- ivpu_start_job_timeout_detection (vdev );
774
+
775
+ mutex_lock (& vdev -> submitted_jobs_lock );
776
+ ivpu_job_signal_and_destroy (vdev , payload -> job_id , payload -> job_status );
777
+ mutex_unlock (& vdev -> submitted_jobs_lock );
764
778
}
765
779
766
780
void ivpu_job_done_consumer_init (struct ivpu_device * vdev )
@@ -773,3 +787,36 @@ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
773
787
{
774
788
ivpu_ipc_consumer_del (vdev , & vdev -> job_done_consumer );
775
789
}
790
+
791
+ void ivpu_context_abort_thread_handler (struct work_struct * work )
792
+ {
793
+ struct ivpu_device * vdev = container_of (work , struct ivpu_device , context_abort_work );
794
+ struct ivpu_file_priv * file_priv ;
795
+ unsigned long ctx_id ;
796
+ struct ivpu_job * job ;
797
+ unsigned long id ;
798
+
799
+ mutex_lock (& vdev -> context_list_lock );
800
+ xa_for_each (& vdev -> context_xa , ctx_id , file_priv ) {
801
+ if (!file_priv -> has_mmu_faults || file_priv -> aborted )
802
+ continue ;
803
+
804
+ mutex_lock (& file_priv -> lock );
805
+ ivpu_context_abort_locked (file_priv );
806
+ mutex_unlock (& file_priv -> lock );
807
+ }
808
+ mutex_unlock (& vdev -> context_list_lock );
809
+
810
+ if (vdev -> fw -> sched_mode != VPU_SCHEDULING_MODE_HW )
811
+ return ;
812
+ /*
813
+ * In hardware scheduling mode NPU already has stopped processing jobs
814
+ * and won't send us any further notifications, thus we have to free job related resources
815
+ * and notify userspace
816
+ */
817
+ mutex_lock (& vdev -> submitted_jobs_lock );
818
+ xa_for_each (& vdev -> submitted_jobs_xa , id , job )
819
+ if (job -> file_priv -> aborted )
820
+ ivpu_job_signal_and_destroy (vdev , job -> job_id , DRM_IVPU_JOB_STATUS_ABORTED );
821
+ mutex_unlock (& vdev -> submitted_jobs_lock );
822
+ }
0 commit comments