2
2
extern crate lazy_static;
3
3
mod util;
4
4
5
- use akri_shared:: akri:: { metrics:: run_metrics_server, API_NAMESPACE } ;
5
+ use akri_shared:: {
6
+ akri:: { metrics:: run_metrics_server, API_NAMESPACE } ,
7
+ k8s:: AKRI_CONFIGURATION_LABEL_NAME ,
8
+ } ;
9
+ use futures:: StreamExt ;
10
+ use kube:: runtime:: { watcher:: Config , Controller } ;
6
11
use prometheus:: IntGaugeVec ;
7
12
use std:: sync:: Arc ;
8
- use util:: {
9
- controller_ctx:: { ControllerContext , CONTROLLER_FIELD_MANAGER_ID } ,
10
- instance_action, node_watcher, pod_watcher,
11
- } ;
13
+ use util:: { controller_ctx:: ControllerContext , instance_action, node_watcher, pod_watcher} ;
12
14
13
15
/// Length of time to sleep between controller system validation checks
14
16
pub const SYSTEM_CHECK_DELAY_SECS : u64 = 30 ;
@@ -34,33 +36,46 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>
34
36
) ;
35
37
36
38
log:: info!( "{} Controller logging started" , API_NAMESPACE ) ;
37
- let mut tasks = Vec :: new ( ) ;
38
39
39
40
// Start server for prometheus metrics
40
41
tokio:: spawn ( run_metrics_server ( ) ) ;
41
-
42
- let controller_ctx = Arc :: new ( ControllerContext :: new (
43
- Arc :: new ( kube:: Client :: try_default ( ) . await ?) ,
44
- CONTROLLER_FIELD_MANAGER_ID ,
45
- ) ) ;
46
- let instance_watcher_ctx = controller_ctx. clone ( ) ;
42
+ let client = Arc :: new ( kube:: Client :: try_default ( ) . await ?) ;
43
+ let controller_ctx = Arc :: new ( ControllerContext :: new ( client. clone ( ) ) ) ;
47
44
let node_watcher_ctx = controller_ctx. clone ( ) ;
48
45
let pod_watcher_ctx = controller_ctx. clone ( ) ;
49
46
50
- // Handle instance changes
51
- tasks. push ( tokio:: spawn ( async {
52
- instance_action:: run ( instance_watcher_ctx) . await ;
53
- } ) ) ;
54
- // Watch for node disappearance
55
- tasks. push ( tokio:: spawn ( async {
56
- node_watcher:: run ( node_watcher_ctx) . await ;
57
- } ) ) ;
58
- // Watch for broker Pod state changes
59
- tasks. push ( tokio:: spawn ( async {
60
- pod_watcher:: run ( pod_watcher_ctx) . await ;
61
- } ) ) ;
47
+ node_watcher:: check ( client. clone ( ) ) . await ?;
48
+ let node_controller = Controller :: new (
49
+ node_watcher_ctx. client . all ( ) . as_inner ( ) ,
50
+ Config :: default ( ) . any_semantic ( ) ,
51
+ )
52
+ . shutdown_on_signal ( )
53
+ . run (
54
+ node_watcher:: reconcile,
55
+ node_watcher:: error_policy,
56
+ node_watcher_ctx,
57
+ )
58
+ . filter_map ( |x| async move { std:: result:: Result :: ok ( x) } )
59
+ . for_each ( |_| futures:: future:: ready ( ( ) ) ) ;
60
+
61
+ pod_watcher:: check ( client. clone ( ) ) . await ?;
62
+ let pod_controller = Controller :: new (
63
+ pod_watcher_ctx. client . all ( ) . as_inner ( ) ,
64
+ Config :: default ( ) . labels ( AKRI_CONFIGURATION_LABEL_NAME ) ,
65
+ )
66
+ . shutdown_on_signal ( )
67
+ . run (
68
+ pod_watcher:: reconcile,
69
+ pod_watcher:: error_policy,
70
+ pod_watcher_ctx,
71
+ )
72
+ . filter_map ( |x| async move { std:: result:: Result :: ok ( x) } )
73
+ . for_each ( |_| futures:: future:: ready ( ( ) ) ) ;
62
74
63
- futures:: future:: try_join_all ( tasks) . await ?;
75
+ tokio:: select! {
76
+ _ = futures:: future:: join( node_controller, pod_controller) => { } ,
77
+ _ = instance_action:: run( client) => { }
78
+ }
64
79
65
80
log:: info!( "{} Controller end" , API_NAMESPACE ) ;
66
81
Ok ( ( ) )
0 commit comments