• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

oasisprotocol / oasis-core / #5141

24 Jul 2024 09:54AM UTC coverage: 45.22% (-0.3%) from 45.494%
#5141

Pull #5784

peternose
keymanager/src/churp: Serve key shares to lagging clients
Pull Request #5784: keymanager/src/churp: Serve key shares to lagging clients

2 of 367 new or added lines in 4 files covered. (0.54%)

19 existing lines in 1 file now uncovered.

3983 of 8808 relevant lines covered (45.22%)

1.01 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/keymanager/src/churp/handler.rs
1
//! CHURP handler.
2
use std::{
3
    cmp,
4
    collections::HashMap,
5
    convert::TryInto,
6
    sync::{Arc, Mutex},
7
};
8

9
use anyhow::Result;
10
use group::{Group, GroupEncoding};
11
use rand::rngs::OsRng;
12
use sp800_185::KMac;
13

14
use oasis_core_runtime::{
15
    common::{
16
        crypto::{
17
            hash::Hash,
18
            signature::{PublicKey, Signer},
19
        },
20
        namespace::Namespace,
21
        sgx::EnclaveIdentity,
22
    },
23
    consensus::{
24
        beacon::EpochTime,
25
        keymanager::churp::{SignedPolicySGX, Status, SuiteId},
26
        verifier::Verifier,
27
    },
28
    enclave_rpc::Context as RpcContext,
29
    future::block_on,
30
    identity::Identity,
31
    protocol::ProtocolUntrustedLocalStorage,
32
    Protocol,
33
};
34

35
use secret_sharing::{
36
    churp::{encode_shareholder, Dealer, Handoff, HandoffKind, Shareholder, VerifiableSecretShare},
37
    kdc::KeySharer,
38
    poly::{scalar_from_bytes, scalar_to_bytes},
39
    suites::{p384, Suite},
40
    vss::VerificationMatrix,
41
};
42

43
use crate::{
44
    beacon::State as BeaconState,
45
    client::{KeyManagerClient, RemoteClient},
46
    registry::State as RegistryState,
47
};
48

49
use super::{
50
    storage::Storage, ApplicationRequest, ConfirmationRequest, EncodedEncryptedPoint,
51
    EncodedVerifiableSecretShare, Error, FetchRequest, FetchResponse, HandoffRequest,
52
    KeyShareRequest, QueryRequest, SignedApplicationRequest, SignedConfirmationRequest,
53
    State as ChurpState, VerifiedPolicies,
54
};
55

56
/// A handoff interval that disables handoffs.
57
pub const HANDOFFS_DISABLED: EpochTime = 0xffffffffffffffff;
58

59
/// Signature context for signing application requests.
60
const APPLICATION_REQUEST_SIGNATURE_CONTEXT: &[u8] =
61
    b"oasis-core/keymanager/churp: application request";
62

63
/// Signature context for signing confirmation requests.
64
const CONFIRMATION_REQUEST_SIGNATURE_CONTEXT: &[u8] =
65
    b"oasis-core/keymanager/churp: confirmation request";
66

67
/// Custom KMAC domain separation for checksums of verification matrices.
68
const CHECKSUM_VERIFICATION_MATRIX_CUSTOM: &[u8] =
69
    b"oasis-core/keymanager/churp: verification matrix";
70

71
/// Domain separation tag for encoding shareholder identifiers.
72
const ENCODE_SHAREHOLDER_CONTEXT: &[u8] = b"oasis-core/keymanager/churp: encode shareholder";
73

74
/// Domain separation tag for encoding key identifiers for key share derivation
75
/// approved by an SGX policy.
76
///
77
/// SGX policies specify which enclave identities are authorized to access
78
/// runtime key shares.
79
const ENCODE_SGX_POLICY_KEY_ID_CONTEXT: &[u8] =
80
    b"oasis-core/keymanager/churp: encode SGX policy key ID";
81

82
/// Domain separation tag for encoding key identifiers for key share derivation
83
/// approved by a custom policy.
84
///
85
/// Custom policies allow access to key shares only for clients that submit
86
/// a proof, which can be validated against the policy. The hash of the policy
87
/// is part of the key identifier and is integral to the key derivation process.
88
#[allow(dead_code)]
89
const ENCODE_CUSTOM_POLICY_KEY_ID_CONTEXT: &[u8] =
90
    b"oasis-core/keymanager/churp: encode custom policy key ID";
91

92
/// The runtime separator used to add additional domain separation based
93
/// on the runtime ID.
94
const RUNTIME_CONTEXT_SEPARATOR: &[u8] = b" for runtime ";
95

96
/// The churp separator used to add additional domain separation based
97
/// on the churp ID.
98
const CHURP_CONTEXT_SEPARATOR: &[u8] = b" for churp ";
99

100
/// The number of blocks a remote client is allowed to be behind when querying
101
/// past key shares.
102
const ALLOWED_BLOCKS_BEHIND: u64 = 5;
103

104
/// Represents information about a dealer.
105
struct DealerInfo<G: Group + GroupEncoding> {
106
    /// The epoch during which this dealer is active.
107
    epoch: EpochTime,
108
    /// The dealer associated with this information.
109
    dealer: Arc<Dealer<G>>,
110
}
111

112
/// Represents information about a handoff.
113
struct HandoffInfo<G: Group + GroupEncoding> {
114
    /// The handoff epoch.
115
    epoch: EpochTime,
116
    /// The handoff associated with this information.
117
    handoff: Arc<Handoff<G>>,
118
}
119

120
pub(crate) trait Handler {
121
    /// Returns the verification matrix of the shared secret bivariate
122
    /// polynomial from the last successfully completed handoff.
123
    ///
124
    /// The verification matrix is a matrix of dimensions t_n x t_m, where
125
    /// t_n = threshold and t_m = 2 * threshold + 1. It contains encrypted
126
    /// coefficients of the secret bivariate polynomial whose zero coefficient
127
    /// represents the shared secret.
128
    ///
129
    /// Verification matrix:
130
    /// ```text
131
    ///     M = [b_{i,j} * G]
132
    /// ```
133
    /// Bivariate polynomial:
134
    /// ```text
135
    ///     B(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j
136
    /// ```
137
    /// Shared secret:
138
    /// ```text
139
    ///     Secret = B(0, 0)
140
    /// ```
141
    ///
142
    /// This matrix is used to verify switch points derived from the bivariate
143
    /// polynomial share in handoffs.
144
    ///
145
    /// NOTE: This method can be called over an insecure channel, as the matrix
146
    /// does not contain any sensitive information. However, the checksum
147
    /// of the matrix should always be verified against the consensus layer.
148
    fn verification_matrix(&self, req: &QueryRequest) -> Result<Vec<u8>>;
149

150
    /// Returns switch point for share reduction for the calling node.
151
    ///
152
    /// The point is evaluation of the shared secret bivariate polynomial
153
    /// at the given x (me) and y value (node ID).
154
    ///
155
    /// Switch point:
156
    /// ```text
157
    ///     Point = B(me, node_id)
158
    /// ```
159
    /// Bivariate polynomial:
160
    /// ```text
161
    ///     B(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j
162
    /// ```
163
    ///
164
    /// WARNING: This method must be called over a secure channel as the point
165
    /// needs to be kept secret and generated only for authorized nodes.
166
    fn share_reduction_switch_point(&self, ctx: &RpcContext, req: &QueryRequest)
167
        -> Result<Vec<u8>>;
168

169
    /// Returns switch point for full share distribution for the calling node.
170
    ///
171
    /// The point is evaluation of the proactivized shared secret bivariate
172
    /// polynomial at the given x (node ID) and y value (me).
173
    ///
174
    /// Switch point:
175
    /// ```text
176
    ///     Point = B(node_id, me) + \sum Q_i(node_id, me)
177
    /// ```
178
    /// Bivariate polynomial:
179
    /// ```text
180
    ///     B(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j
181
    /// ```
182
    /// Proactive bivariate polynomial:
183
    /// ```text
184
    ///     Q_i(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j
185
    /// ```
186
    ///
187
    /// WARNING: This method must be called over a secure channel as the point
188
    /// needs to be kept secret and generated only for authorized nodes.
189
    fn share_distribution_switch_point(
190
        &self,
191
        ctx: &RpcContext,
192
        req: &QueryRequest,
193
    ) -> Result<Vec<u8>>;
194

195
    /// Returns proactive bivariate polynomial share for the calling node.
196
    ///
197
    /// A bivariate share is a partial evaluation of a randomly selected
198
    /// bivariate polynomial at a specified x or y value (node ID). Each node
199
    /// interested in joining the new committee selects a bivariate polynomial
200
    /// before the next handoff and commits to it by submitting the checksum
201
    /// of the corresponding verification matrix to the consensus layer.
202
    /// The latter can be used to verify the received bivariate shares.
203
    ///
204
    /// Bivariate polynomial share:
205
    /// ```text
206
    ///     S_i(y) = Q_i(node_id, y) (dealing phase or unchanged committee)
207
    ///     S_i(x) = Q_i(x, node_id) (committee changes)
208
    /// ```
209
    /// Proactive bivariate polynomial:
210
    /// ```text
211
    ///     Q_i(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j
212
    /// ```
213
    ///
214
    /// WARNING: This method must be called over a secure channel as
215
    /// the polynomial needs to be kept secret and generated only
216
    /// for authorized nodes.
217
    fn bivariate_share(
218
        &self,
219
        ctx: &RpcContext,
220
        req: &QueryRequest,
221
    ) -> Result<EncodedVerifiableSecretShare>;
222

223
    /// Returns the key share for the given key ID generated by the key
224
    /// derivation center.
225
    ///
226
    /// Key share:
227
    /// ```text
228
    ///     KS_i = s_i * H(key_id)
229
    /// ```
230
    ///
231
    /// WARNING: This method must be called over a secure channel as the key
232
    /// share needs to be kept secret and generated only for authorized nodes.
233
    fn sgx_policy_key_share(
234
        &self,
235
        ctx: &RpcContext,
236
        req: &KeyShareRequest,
237
    ) -> Result<EncodedEncryptedPoint>;
238

239
    /// Prepare CHURP for participation in the given handoff of the protocol.
240
    ///
241
    /// Initialization randomly selects a bivariate polynomial for the given
242
    /// handoff, computes the corresponding verification matrix and its
243
    /// checksum, and signs the latter.
244
    ///
245
    /// Bivariate polynomial:
246
    ///     B(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j
247
    ///
248
    /// Verification matrix:
249
    ///     M = [b_{i,j} * G]
250
    ///
251
    /// Checksum:
252
    ///     H = KMAC256(M, runtime ID, handoff)
253
    ///
254
    /// The bivariate polynomial is zero-hole in all handoffs expect in the
255
    /// first one (dealing phase).
256
    ///
257
    /// This method must be called locally.
258
    fn apply(&self, req: &HandoffRequest) -> Result<SignedApplicationRequest>;
259

260
    /// Tries to fetch switch points for share reduction from the given nodes.
261
    ///
262
    /// Switch points should be obtained from (at least) t distinct nodes
263
    /// belonging to the old committee, verified against verification matrix
264
    /// whose checksum was published in the consensus layer, merged into
265
    /// a reduced share using Lagrange interpolation and proactivized with
266
    /// bivariate shares.
267
    ///
268
    /// Switch point:
269
    /// ```text
270
    ///     P_i = B(node_i, me)
271
    ///```
272
    /// Reduced share:
273
    /// ```text
274
    ///     RS(x) = B(x, me)
275
    /// ````
276
    /// Proactive reduced share:
277
    /// ```text
278
    ///     QR(x) = RS(x) + \sum Q_i(x, me)
279
    /// ````
280
    fn share_reduction(&self, req: &FetchRequest) -> Result<FetchResponse>;
281

282
    /// Tries to fetch switch data points for full share distribution from
283
    /// the given nodes.
284
    ///
285
    /// Switch points should be obtained from (at least) 2t distinct nodes
286
    /// belonging to the new committee, verified against the sum of the
287
    /// verification matrix and the verification matrices of proactive
288
    /// bivariate shares, whose checksums were published in the consensus
289
    /// layer, and merged into a full share using Lagrange interpolation.
290
    ///
291
    /// Switch point:
292
    /// ```text
293
    ///     P_i = B(me, node_i) + \sum Q_i(me, node_i)
294
    ///```
295
    /// Full share:
296
    /// ```text
297
    ///     FS(x) = B(me, y) + \sum Q_i(me, y) = B'(me, y)
298
    /// ````
299
    fn share_distribution(&self, req: &FetchRequest) -> Result<FetchResponse>;
300

301
    /// Tries to fetch proactive bivariate shares from the given nodes.
302
    ///
303
    /// Bivariate shares should be fetched from all candidates for the new
304
    /// committee, including our own, verified against verification matrices
305
    /// whose checksums were published in the consensus layer, and summed
306
    /// into a bivariate polynomial.
307
    ///
308
    /// Bivariate polynomial share:
309
    /// ```text
310
    ///     S_i(y) = Q_i(me, y) (dealing phase or unchanged committee)
311
    ///     S_i(x) = Q_i(x, me) (committee changes)
312
    /// ```
313
    fn proactivization(&self, req: &FetchRequest) -> Result<FetchResponse>;
314

315
    /// Returns a signed confirmation request containing the checksum
316
    /// of the merged verification matrix.
317
    fn confirmation(&self, req: &HandoffRequest) -> Result<SignedConfirmationRequest>;
318

319
    /// Finalizes the specified scheme by cleaning up obsolete dealers,
320
    /// handoffs, and shareholders. If the handoff was just completed,
321
    /// the shareholder is made available, and its share is persisted
322
    /// to the local storage.
323
    fn finalize(&self, req: &HandoffRequest) -> Result<()>;
324
}
325

326
/// Key manager application that implements churn-robust proactive secret
327
/// sharing scheme (CHURP).
328
pub struct Churp {
329
    /// Host node identifier.
330
    node_id: PublicKey,
331
    /// Key manager runtime ID.
332
    runtime_id: Namespace,
333
    /// Runtime identity.
334
    identity: Arc<Identity>,
335
    /// Low-level access to the underlying Runtime Host Protocol.
336
    protocol: Arc<Protocol>,
337
    /// Consensus verifier.
338
    consensus_verifier: Arc<dyn Verifier>,
339
    /// Verified churp state.
340
    churp_state: ChurpState,
341

342
    /// Cached instances.
343
    instances: Mutex<HashMap<u8, Arc<dyn Handler + Send + Sync>>>,
344
    /// Cached verified policies.
345
    policies: Arc<VerifiedPolicies>,
346
}
347

348
impl Churp {
NEW
349
    pub fn new(
×
350
        node_id: PublicKey,
351
        identity: Arc<Identity>,
352
        protocol: Arc<Protocol>,
353
        consensus_verifier: Arc<dyn Verifier>,
354
    ) -> Self {
NEW
355
        let runtime_id = protocol.get_runtime_id();
×
NEW
356
        let churp_state = ChurpState::new(consensus_verifier.clone());
×
NEW
357
        let instances = Mutex::new(HashMap::new());
×
NEW
358
        let policies = Arc::new(VerifiedPolicies::new());
×
359

360
        Self {
361
            node_id,
362
            runtime_id,
363
            identity,
364
            protocol,
365
            consensus_verifier,
366
            churp_state,
367
            instances,
368
            policies,
369
        }
370
    }
371

NEW
372
    fn get_instance(
×
373
        &self,
374
        churp_id: u8,
375
        runtime_id: Namespace,
376
    ) -> Result<Arc<dyn Handler + Send + Sync>> {
377
        // Ensure runtime_id matches.
NEW
378
        if self.runtime_id != runtime_id {
×
NEW
379
            return Err(Error::RuntimeMismatch.into());
×
380
        }
381

382
        // Return the instance if it exists.
NEW
383
        let mut instances = self.instances.lock().unwrap();
×
NEW
384
        if let Some(instance) = instances.get(&churp_id) {
×
NEW
385
            return Ok(instance.clone());
×
386
        }
387

388
        // Create a new instance based on the suite type.
NEW
389
        let status = self.churp_state.status(self.runtime_id, churp_id)?;
×
390
        let instance = match status.suite_id {
391
            SuiteId::NistP384Sha3_384 => Instance::<p384::Sha3_384>::new(
392
                churp_id,
NEW
393
                self.node_id,
×
NEW
394
                self.identity.clone(),
×
NEW
395
                self.protocol.clone(),
×
NEW
396
                self.consensus_verifier.clone(),
×
NEW
397
                self.policies.clone(),
×
398
            ),
399
        };
400

401
        // Load secret shares and bivariate share.
NEW
402
        instance.init(&status)?;
×
403

404
        // Store the new instance.
NEW
405
        let instance = Arc::new(instance);
×
NEW
406
        instances.insert(churp_id, instance.clone());
×
407

NEW
408
        Ok(instance)
×
409
    }
410
}
411

412
impl Handler for Churp {
NEW
413
    fn verification_matrix(&self, req: &QueryRequest) -> Result<Vec<u8>> {
×
NEW
414
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
NEW
415
        instance.verification_matrix(req)
×
416
    }
417

NEW
418
    fn share_reduction_switch_point(
×
419
        &self,
420
        ctx: &RpcContext,
421
        req: &QueryRequest,
422
    ) -> Result<Vec<u8>> {
NEW
423
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
NEW
424
        instance.share_reduction_switch_point(ctx, req)
×
425
    }
426

NEW
427
    fn share_distribution_switch_point(
×
428
        &self,
429
        ctx: &RpcContext,
430
        req: &QueryRequest,
431
    ) -> Result<Vec<u8>> {
NEW
432
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
NEW
433
        instance.share_distribution_switch_point(ctx, req)
×
434
    }
435

NEW
436
    fn bivariate_share(
×
437
        &self,
438
        ctx: &RpcContext,
439
        req: &QueryRequest,
440
    ) -> Result<EncodedVerifiableSecretShare> {
NEW
441
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
NEW
442
        instance.bivariate_share(ctx, req)
×
443
    }
444

NEW
445
    fn sgx_policy_key_share(
×
446
        &self,
447
        ctx: &RpcContext,
448
        req: &KeyShareRequest,
449
    ) -> Result<EncodedEncryptedPoint> {
NEW
450
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
NEW
451
        instance.sgx_policy_key_share(ctx, req)
×
452
    }
453

NEW
454
    fn apply(&self, req: &HandoffRequest) -> Result<SignedApplicationRequest> {
×
NEW
455
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
NEW
456
        instance.apply(req)
×
457
    }
458

NEW
459
    fn share_reduction(&self, req: &FetchRequest) -> Result<FetchResponse> {
×
NEW
460
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
NEW
461
        instance.share_reduction(req)
×
462
    }
463

NEW
464
    fn share_distribution(&self, req: &FetchRequest) -> Result<FetchResponse> {
×
NEW
465
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
NEW
466
        instance.share_distribution(req)
×
467
    }
468

NEW
469
    fn proactivization(&self, req: &FetchRequest) -> Result<FetchResponse> {
×
NEW
470
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
NEW
471
        instance.proactivization(req)
×
472
    }
473

NEW
474
    fn confirmation(&self, req: &HandoffRequest) -> Result<SignedConfirmationRequest> {
×
NEW
475
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
NEW
476
        instance.confirmation(req)
×
477
    }
478

NEW
479
    fn finalize(&self, req: &HandoffRequest) -> Result<()> {
×
NEW
480
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
NEW
481
        instance.finalize(req)
×
482
    }
483
}
484

485
struct Instance<S: Suite> {
486
    /// Host node identifier.
487
    node_id: PublicKey,
488
    /// Instance identifier.
489
    churp_id: u8,
490
    /// Key manager runtime ID.
491
    runtime_id: Namespace,
492
    /// Runtime identity.
493
    identity: Arc<Identity>,
494
    /// Runtime attestation key signer.
495
    signer: Arc<dyn Signer>,
496

497
    /// Storage handler.
498
    storage: Storage,
499
    /// Consensus verifier.
500
    consensus_verifier: Arc<dyn Verifier>,
501
    /// Low-level access to the underlying Runtime Host Protocol.
502
    protocol: Arc<Protocol>,
503

504
    /// Verified beacon state.
505
    beacon_state: BeaconState,
506
    /// Verified churp state.
507
    churp_state: ChurpState,
508
    /// Verified registry state.
509
    registry_state: RegistryState,
510

511
    /// Shareholders with secret shares for completed handoffs.
512
    ///
513
    /// The map may also contain shareholders for failed or unfinished
514
    /// handoffs, so always verify if the handoff succeeded in the consensus.
515
    shareholders: Mutex<HashMap<EpochTime, Arc<Shareholder<S::Group>>>>,
516
    /// Dealer of bivariate shares for the next handoff.
517
    dealer: Mutex<Option<DealerInfo<S::Group>>>,
518
    /// Next handoff.
519
    handoff: Mutex<Option<HandoffInfo<S::Group>>>,
520

521
    /// Cached verified policies.
522
    policies: Arc<VerifiedPolicies>,
523

524
    /// Domain separation tag for encoding shareholder identifiers.
525
    shareholder_dst: Vec<u8>,
526
    /// Domain separation tag for encoding key identifiers for key share
527
    /// derivation approved by an SGX policy.
528
    sgx_policy_key_id_dst: Vec<u8>,
529
}
530

531
impl<S: Suite> Instance<S> {
532
    /// Creates a new CHURP instance.
NEW
533
    pub fn new(
×
534
        churp_id: u8,
535
        node_id: PublicKey,
536
        identity: Arc<Identity>,
537
        protocol: Arc<Protocol>,
538
        consensus_verifier: Arc<dyn Verifier>,
539
        policies: Arc<VerifiedPolicies>,
540
    ) -> Self {
NEW
541
        let runtime_id = protocol.get_runtime_id();
×
NEW
542
        let storage = Storage::new(Arc::new(ProtocolUntrustedLocalStorage::new(
×
NEW
543
            protocol.clone(),
×
544
        )));
NEW
545
        let signer: Arc<dyn Signer> = identity.clone();
×
546

NEW
547
        let beacon_state = BeaconState::new(consensus_verifier.clone());
×
NEW
548
        let churp_state = ChurpState::new(consensus_verifier.clone());
×
NEW
549
        let registry_state = RegistryState::new(consensus_verifier.clone());
×
550

NEW
551
        let shareholders = Mutex::new(HashMap::new());
×
NEW
552
        let dealer = Mutex::new(None);
×
NEW
553
        let handoff = Mutex::new(None);
×
554

NEW
555
        let shareholder_dst =
×
NEW
556
            Self::domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, &runtime_id, churp_id);
×
NEW
557
        let sgx_policy_key_id_dst =
×
NEW
558
            Self::domain_separation_tag(ENCODE_SGX_POLICY_KEY_ID_CONTEXT, &runtime_id, churp_id);
×
559

560
        Self {
561
            churp_id,
562
            identity,
563
            signer,
564
            node_id,
565
            runtime_id,
566
            protocol,
567
            consensus_verifier,
568
            storage,
569
            beacon_state,
570
            shareholders,
571
            churp_state,
572
            registry_state,
573
            dealer,
574
            handoff,
575
            policies,
576
            shareholder_dst,
577
            sgx_policy_key_id_dst,
578
        }
579
    }
580

581
    /// Initializes the instance by loading the shareholder for the last
582
    /// successfully completed handoff, as well as the shareholder and
583
    /// the dealer for the upcoming handoff, if they are available.
NEW
584
    pub fn init(&self, status: &Status) -> Result<()> {
×
NEW
585
        let checksum = status
×
NEW
586
            .applications
×
NEW
587
            .get(&self.node_id)
×
NEW
588
            .map(|app| app.checksum);
×
589

NEW
590
        self.load_shareholder(status.handoff)?;
×
NEW
591
        self.load_next_shareholder(status.next_handoff)?;
×
NEW
592
        self.load_dealer(status.next_handoff, checksum)
×
593
    }
594

595
    /// Tries to fetch switch point for share reduction from the given node.
NEW
596
    pub fn fetch_share_reduction_switch_point(
×
597
        &self,
598
        node_id: PublicKey,
599
        status: &Status,
600
        handoff: &Handoff<S::Group>,
601
        client: &RemoteClient,
602
    ) -> Result<bool> {
NEW
603
        let x = encode_shareholder::<S>(&node_id.0, &self.shareholder_dst)?;
×
604

605
        if !handoff.needs_share_reduction_switch_point(&x)? {
×
606
            return Err(Error::InvalidShareholder.into());
×
607
        }
608

609
        // Fetch from the host node.
610
        if node_id == self.node_id {
×
NEW
611
            let shareholder = self.get_shareholder(status.handoff)?;
×
612
            let point = shareholder.switch_point(&x);
×
613

614
            if handoff.needs_verification_matrix()? {
×
615
                // Local verification matrix is trusted.
616
                let vm = shareholder.verifiable_share().verification_matrix().clone();
×
617
                handoff.set_verification_matrix(vm)?;
×
618
            }
619

620
            return handoff.add_share_reduction_switch_point(x, point);
×
621
        }
622

623
        // Fetch from the remote node.
624
        client.set_nodes(vec![node_id]);
×
625

626
        if handoff.needs_verification_matrix()? {
×
627
            // The remote verification matrix needs to be verified.
NEW
628
            let vm = block_on(client.churp_verification_matrix(self.churp_id, status.handoff))?;
×
NEW
629
            let checksum = self.checksum_verification_matrix_bytes(&vm, status.handoff);
×
630
            let status_checksum = status.checksum.ok_or(Error::InvalidHandoff)?; // Should never happen.
×
631
            if checksum != status_checksum {
×
632
                return Err(Error::InvalidVerificationMatrixChecksum.into());
×
633
            }
634

635
            let vm = VerificationMatrix::from_bytes(&vm)
×
636
                .ok_or(Error::VerificationMatrixDecodingFailed)?;
×
637
            handoff.set_verification_matrix(vm)?;
×
638
        }
639

640
        let point = block_on(client.churp_share_reduction_point(
×
NEW
641
            self.churp_id,
×
642
            status.next_handoff,
×
643
            self.node_id,
×
644
        ))?;
645
        let point = scalar_from_bytes(&point).ok_or(Error::PointDecodingFailed)?;
×
646

647
        handoff.add_share_reduction_switch_point(x, point)
×
648
    }
649

650
    /// Tries to fetch switch point for share reduction from the given node.
NEW
651
    pub fn fetch_share_distribution_switch_point(
×
652
        &self,
653
        node_id: PublicKey,
654
        status: &Status,
655
        handoff: &Handoff<S::Group>,
656
        client: &RemoteClient,
657
    ) -> Result<bool> {
NEW
658
        let x = encode_shareholder::<S>(&node_id.0, &self.shareholder_dst)?;
×
659

660
        if !handoff.needs_full_share_distribution_switch_point(&x)? {
×
661
            return Err(Error::InvalidShareholder.into());
×
662
        }
663

664
        // Fetch from the host node.
665
        if node_id == self.node_id {
×
666
            let shareholder = handoff.get_reduced_shareholder()?;
×
667
            let point = shareholder.switch_point(&x);
×
668

669
            return handoff.add_full_share_distribution_switch_point(x, point);
×
670
        }
671

672
        // Fetch from the remote node.
673
        client.set_nodes(vec![node_id]);
×
674
        let point = block_on(client.churp_share_distribution_point(
×
NEW
675
            self.churp_id,
×
676
            status.next_handoff,
×
677
            self.node_id,
×
678
        ))?;
679
        let point = scalar_from_bytes(&point).ok_or(Error::PointDecodingFailed)?;
×
680

681
        handoff.add_full_share_distribution_switch_point(x, point)
×
682
    }
683

684
    /// Tries to fetch proactive bivariate share from the given node.
NEW
685
    pub fn fetch_bivariate_share(
×
686
        &self,
687
        node_id: PublicKey,
688
        status: &Status,
689
        handoff: &Handoff<S::Group>,
690
        client: &RemoteClient,
691
    ) -> Result<bool> {
NEW
692
        let x = encode_shareholder::<S>(&node_id.0, &self.shareholder_dst)?;
×
693

694
        if !handoff.needs_bivariate_share(&x)? {
×
695
            return Err(Error::InvalidShareholder.into());
×
696
        }
697

698
        // Fetch from the host node.
699
        if node_id == self.node_id {
×
700
            let kind = Self::handoff_kind(status);
×
NEW
701
            let dealer = self.get_dealer(status.next_handoff)?;
×
702
            let share = dealer.make_share(x, kind);
×
703
            let vm = dealer.verification_matrix().clone();
×
704
            let verifiable_share = VerifiableSecretShare::new(share, vm);
×
705

706
            return handoff.add_bivariate_share(&x, verifiable_share);
×
707
        }
708

709
        // Fetch from the remote node.
710
        client.set_nodes(vec![node_id]);
×
NEW
711
        let share = block_on(client.churp_bivariate_share(
×
NEW
712
            self.churp_id,
×
NEW
713
            status.next_handoff,
×
NEW
714
            self.node_id,
×
715
        ))?;
716

717
        // The remote verification matrix needs to be verified.
NEW
718
        let checksum = self
×
NEW
719
            .checksum_verification_matrix_bytes(&share.verification_matrix, status.next_handoff);
×
720
        let application = status
×
721
            .applications
×
722
            .get(&node_id)
×
723
            .ok_or(Error::InvalidShareholder)?; // Should never happen, as we verify if we require this share.
×
724

725
        if checksum != application.checksum {
×
726
            return Err(Error::InvalidVerificationMatrixChecksum.into());
×
727
        }
728

729
        let verifiable_share: VerifiableSecretShare<S::Group> = share.try_into()?;
×
730

731
        handoff.add_bivariate_share(&x, verifiable_share)
×
732
    }
733

734
    /// Returns the shareholder for the given epoch.
NEW
735
    fn get_shareholder(&self, epoch: EpochTime) -> Result<Arc<Shareholder<S::Group>>> {
×
NEW
736
        let shareholders = self.shareholders.lock().unwrap();
×
NEW
737
        shareholders
×
NEW
738
            .get(&epoch)
×
739
            .cloned()
NEW
740
            .ok_or(Error::ShareholderNotFound.into())
×
741
    }
742

743
    /// Adds a shareholder for the given epoch.
NEW
744
    fn add_shareholder(&self, shareholder: Arc<Shareholder<S::Group>>, epoch: EpochTime) {
×
NEW
745
        let mut shareholders = self.shareholders.lock().unwrap();
×
NEW
746
        shareholders.insert(epoch, shareholder);
×
747
    }
748

749
    /// Keeps only the shareholder for the given epoch and the shareholder
750
    /// preceding that one.
NEW
751
    fn clean_shareholders(&self, epoch: EpochTime) {
×
UNCOV
752
        let mut shareholders = self.shareholders.lock().unwrap();
×
NEW
753
        let second_last = shareholders.keys().filter(|&&e| e < epoch).max().cloned();
×
NEW
754
        shareholders.retain(|&e, _| e == epoch || Some(e) == second_last);
×
755
    }
756

757
    /// Loads the shareholder from local storage for the given epoch.
NEW
758
    fn load_shareholder(&self, epoch: EpochTime) -> Result<()> {
×
759
        // Skip if no handoffs have been completed so far.
NEW
760
        if epoch == 0 {
×
NEW
761
            return Ok(());
×
762
        }
763

UNCOV
764
        let share = self
×
UNCOV
765
            .storage
×
NEW
766
            .load_secret_share(self.churp_id, epoch)
×
767
            .or_else(|err| ignore_error(err, Error::InvalidSecretShare))?; // Ignore previous shares.
×
768

769
        // If the secret share is not available, check if the next handoff
770
        // succeeded as it might have been confirmed while we were away.
771
        let share = match share {
×
772
            Some(share) => Some(share),
×
773
            None => {
×
UNCOV
774
                let share = self
×
UNCOV
775
                    .storage
×
NEW
776
                    .load_next_secret_share(self.churp_id, epoch)
×
777
                    .or_else(|err| ignore_error(err, Error::InvalidSecretShare))?; // Ignore previous shares.
×
778

779
                // // Back up the secret share, if it is valid.
780
                if let Some(share) = share.as_ref() {
×
NEW
781
                    self.storage
×
NEW
782
                        .store_secret_share(share, self.churp_id, epoch)?;
×
783
                }
784

785
                share
×
786
            }
787
        };
788

NEW
789
        self.verify_and_add_shareholder(share, epoch)
×
790
    }
791

792
    /// Loads the next shareholder from local storage for the given epoch.
NEW
793
    fn load_next_shareholder(&self, epoch: EpochTime) -> Result<()> {
×
NEW
794
        let share = self
×
NEW
795
            .storage
×
NEW
796
            .load_next_secret_share(self.churp_id, epoch)
×
NEW
797
            .or_else(|err| ignore_error(err, Error::InvalidSecretShare))?; // Ignore previous shares.
×
798

NEW
799
        self.verify_and_add_shareholder(share, epoch)
×
800
    }
801

NEW
802
    fn verify_and_add_shareholder(
×
803
        &self,
804
        share: Option<VerifiableSecretShare<S::Group>>,
805
        epoch: EpochTime,
806
    ) -> Result<()> {
NEW
807
        let share = match share {
×
NEW
808
            Some(share) => share,
×
NEW
809
            None => return Ok(()),
×
810
        };
811

812
        // Verify that the host hasn't changed.
NEW
813
        let me = encode_shareholder::<S>(&self.node_id.0, &self.shareholder_dst)?;
×
NEW
814
        if share.secret_share().coordinate_x() != &me {
×
UNCOV
815
            return Err(Error::InvalidHost.into());
×
816
        }
817

818
        // Create a new shareholder.
819
        let shareholder = Arc::new(Shareholder::from(share));
×
820

821
        // Store the shareholder.
NEW
822
        self.add_shareholder(shareholder, epoch);
×
823

NEW
824
        Ok(())
×
825
    }
826

827
    /// Returns the dealer for the given epoch.
NEW
828
    fn get_dealer(&self, epoch: EpochTime) -> Result<Arc<Dealer<S::Group>>> {
×
NEW
829
        let dealer_guard = self.dealer.lock().unwrap();
×
830

NEW
831
        let dealer_info = match dealer_guard.as_ref() {
×
NEW
832
            Some(dealer_info) => dealer_info,
×
NEW
833
            None => return Err(Error::DealerNotFound.into()),
×
834
        };
NEW
835
        if dealer_info.epoch != epoch {
×
NEW
836
            return Err(Error::DealerNotFound.into());
×
837
        }
838

NEW
839
        Ok(dealer_info.dealer.clone())
×
840
    }
841

842
    /// Adds a dealer for the given epoch. If a dealer is already set,
843
    /// it will be overwritten.
NEW
844
    fn add_dealer(&self, dealer: Arc<Dealer<S::Group>>, epoch: EpochTime) {
×
NEW
845
        let mut dealer_guard = self.dealer.lock().unwrap();
×
NEW
846
        *dealer_guard = Some(DealerInfo { epoch, dealer });
×
847
    }
848

849
    /// Creates a new dealer for the given epoch.
850
    ///
851
    /// If a dealer for the same or any other epoch already exists, it will
852
    /// be removed, its bivariate polynomial overwritten, and permanently
853
    /// lost.
854
    ///
855
    /// Note that since the host controls the local storage, he can restart
856
    /// the enclave to create multiple dealers for the same epoch and then
857
    /// replace the last backup with a bivariate polynomial from a dealer
858
    /// of his choice. Therefore, it is essential to verify the bivariate
859
    /// polynomial after loading or when deriving bivariate shares.
NEW
860
    fn create_dealer(
×
861
        &self,
862
        epoch: EpochTime,
863
        threshold: u8,
864
        dealing_phase: bool,
865
    ) -> Result<Arc<Dealer<S::Group>>> {
866
        // Create a new dealer.
NEW
867
        let dealer = Dealer::create(threshold, dealing_phase, &mut OsRng)?;
×
NEW
868
        let dealer = Arc::new(dealer);
×
869

870
        // Encrypt and store the polynomial in case of a restart.
NEW
871
        let polynomial = dealer.bivariate_polynomial();
×
NEW
872
        self.storage
×
NEW
873
            .store_bivariate_polynomial(polynomial, self.churp_id, epoch)?;
×
874

875
        // Store the dealer.
NEW
876
        self.add_dealer(dealer.clone(), epoch);
×
877

NEW
878
        Ok(dealer)
×
879
    }
880

881
    /// Loads the dealer for the given epoch from the local storage and verifies
882
    /// it against the provided checksum.
NEW
883
    fn load_dealer(&self, epoch: EpochTime, checksum: Option<Hash>) -> Result<()> {
×
884
        // Skip if handoffs are disabled.
NEW
885
        if epoch == HANDOFFS_DISABLED {
×
NEW
886
            return Ok(());
×
887
        }
888

889
        // Load untrusted polynomial.
UNCOV
890
        let polynomial = self
×
891
            .storage
×
NEW
892
            .load_bivariate_polynomial(self.churp_id, epoch)
×
893
            .or_else(|err| ignore_error(err, Error::InvalidBivariatePolynomial))?; // Ignore previous dealers.
×
894

NEW
895
        let polynomial = match polynomial {
×
NEW
896
            Some(polynomial) => polynomial,
×
NEW
897
            None => return Ok(()),
×
898
        };
899

900
        // Create untrusted dealer.
NEW
901
        let dealer = Arc::new(Dealer::from(polynomial));
×
902

903
        // Verify that the host hasn't created multiple dealers for the same
904
        // epoch and replaced the polynomial that was used to prepare
905
        // the application.
NEW
906
        if let Some(checksum) = checksum {
×
NEW
907
            let verification_matrix = dealer.verification_matrix();
×
NEW
908
            let computed_checksum = self.checksum_verification_matrix(verification_matrix, epoch);
×
909

NEW
910
            if checksum != computed_checksum {
×
NEW
911
                return Err(Error::InvalidBivariatePolynomial.into());
×
912
            }
913
        }
914

915
        // Store the dealer.
NEW
916
        self.add_dealer(dealer, epoch);
×
917

NEW
918
        Ok(())
×
919
    }
920

921
    /// Removes the dealer if it belongs to a handoff that occurred
922
    /// at or before the given epoch.
NEW
923
    fn remove_dealer(&self, max_epoch: EpochTime) {
×
NEW
924
        let mut dealer_guard = self.dealer.lock().unwrap();
×
NEW
925
        if let Some(dealer_info) = dealer_guard.as_ref() {
×
NEW
926
            if dealer_info.epoch <= max_epoch {
×
NEW
927
                *dealer_guard = None;
×
928
            }
929
        }
930
    }
931

932
    /// Returns the handoff for the given epoch.
NEW
933
    fn get_handoff(&self, epoch: EpochTime) -> Result<Arc<Handoff<S::Group>>> {
×
NEW
934
        let handoff_guard = self.handoff.lock().unwrap();
×
935

NEW
936
        let handoff_info = handoff_guard
×
937
            .as_ref()
NEW
938
            .filter(|hi| hi.epoch == epoch)
×
NEW
939
            .ok_or(Error::HandoffNotFound)?;
×
940

NEW
941
        Ok(handoff_info.handoff.clone())
×
942
    }
943

944
    /// Creates a handoff for the next handoff epoch. If a handoff already
945
    /// exists, the existing one is returned.
NEW
946
    fn get_or_create_handoff(&self, status: &Status) -> Result<Arc<Handoff<S::Group>>> {
×
947
        // Make sure to lock the handoff so that we don't create two handoffs
948
        // for the same epoch.
NEW
949
        let mut handoff_guard = self.handoff.lock().unwrap();
×
950

NEW
951
        if let Some(handoff_info) = handoff_guard.as_ref() {
×
NEW
952
            match status.next_handoff.cmp(&handoff_info.epoch) {
×
953
                cmp::Ordering::Less => return Err(Error::InvalidHandoff.into()),
×
NEW
954
                cmp::Ordering::Equal => return Ok(handoff_info.handoff.clone()),
×
UNCOV
955
                cmp::Ordering::Greater => (),
×
956
            }
957
        }
958

959
        // Create a new handoff.
960
        let threshold = status.threshold;
×
NEW
961
        let me = encode_shareholder::<S>(&self.node_id.0, &self.shareholder_dst)?;
×
962
        let mut shareholders = Vec::with_capacity(status.applications.len());
×
963
        for id in status.applications.keys() {
×
NEW
964
            let x = encode_shareholder::<S>(&id.0, &self.shareholder_dst)?;
×
965
            shareholders.push(x);
×
966
        }
967
        let kind = Self::handoff_kind(status);
×
968
        let handoff = Handoff::new(threshold, me, shareholders, kind)?;
×
NEW
969
        let handoff = Arc::new(handoff);
×
970

971
        // If the committee hasn't changed, we need the latest shareholder
972
        // to randomize its share.
973
        if kind == HandoffKind::CommitteeUnchanged {
×
NEW
974
            let shareholder = self.get_shareholder(status.handoff)?;
×
975
            handoff.set_shareholder(shareholder)?;
×
976
        }
977

978
        // Store the handoff.
NEW
979
        *handoff_guard = Some(HandoffInfo {
×
NEW
980
            epoch: status.next_handoff,
×
NEW
981
            handoff: handoff.clone(),
×
982
        });
983

984
        Ok(handoff)
×
985
    }
986

987
    // Removes the handoff if it happened at or before the given epoch.
NEW
988
    fn remove_handoff(&self, max_epoch: EpochTime) {
×
NEW
989
        let mut handoff_guard = self.handoff.lock().unwrap();
×
NEW
990
        if let Some(handoff_info) = handoff_guard.as_ref() {
×
NEW
991
            if handoff_info.epoch <= max_epoch {
×
NEW
992
                *handoff_guard = None;
×
993
            }
994
        }
995
    }
996

997
    /// Verifies parameters of the last successfully completed handoff against
998
    /// the latest status.
NEW
999
    fn verify_last_handoff(&self, epoch: EpochTime) -> Result<Status> {
×
NEW
1000
        let status = self.churp_state.status(self.runtime_id, self.churp_id)?;
×
1001
        if status.handoff != epoch {
×
1002
            return Err(Error::HandoffMismatch.into());
×
1003
        }
1004

1005
        Ok(status)
×
1006
    }
1007

1008
    /// Verifies parameters of the next handoff against the latest status
1009
    /// and checks whether the handoff is in progress.
NEW
1010
    fn verify_next_handoff(&self, epoch: EpochTime) -> Result<Status> {
×
NEW
1011
        let status = self.churp_state.status(self.runtime_id, self.churp_id)?;
×
1012
        if status.next_handoff != epoch {
×
1013
            return Err(Error::HandoffMismatch.into());
×
1014
        }
1015

1016
        let now = self.beacon_state.epoch()?;
×
1017
        if status.next_handoff != now {
×
1018
            return Err(Error::HandoffClosed.into());
×
1019
        }
1020

1021
        Ok(status)
×
1022
    }
1023

1024
    /// Verifies the node ID by comparing the session's runtime attestation
1025
    /// key (RAK) with the one published in the consensus layer.
1026
    fn verify_node_id(&self, ctx: &RpcContext, node_id: &PublicKey) -> Result<()> {
×
UNCOV
1027
        if !cfg!(any(target_env = "sgx", feature = "debug-mock-sgx")) {
×
1028
            // Skip verification in non-SGX environments because those
1029
            // nodes do not publish RAK in the consensus nor do they
1030
            // send RAK binding when establishing Noise sessions.
1031
            return Ok(());
×
1032
        }
1033

UNCOV
1034
        let remote_rak = Self::remote_rak(ctx)?;
×
UNCOV
1035
        let rak = self
×
UNCOV
1036
            .registry_state
×
UNCOV
1037
            .rak(node_id, &self.runtime_id)?
×
UNCOV
1038
            .ok_or(Error::NotAuthenticated)?;
×
1039

UNCOV
1040
        if remote_rak != rak {
×
UNCOV
1041
            return Err(Error::NotAuthorized.into());
×
1042
        }
1043

UNCOV
1044
        Ok(())
×
1045
    }
1046

1047
    /// Authorizes the remote key manager enclave so that secret data is never
1048
    /// revealed to an unauthorized enclave.
1049
    fn verify_km_enclave(&self, ctx: &RpcContext, policy: &SignedPolicySGX) -> Result<()> {
×
1050
        if Self::ignore_policy() {
×
1051
            return Ok(());
×
1052
        }
1053
        let remote_enclave = Self::remote_enclave(ctx)?;
×
1054
        let policy = self.policies.verify(policy)?;
×
1055
        if !policy.may_join(remote_enclave) {
×
1056
            return Err(Error::NotAuthorized.into());
×
1057
        }
1058
        Ok(())
×
1059
    }
1060

1061
    /// Authorizes the remote runtime enclave so that secret data is never
1062
    /// revealed to an unauthorized enclave.
1063
    fn verify_rt_enclave(
×
1064
        &self,
1065
        ctx: &RpcContext,
1066
        policy: &SignedPolicySGX,
1067
        runtime_id: &Namespace,
1068
    ) -> Result<()> {
1069
        if Self::ignore_policy() {
×
1070
            return Ok(());
×
1071
        }
1072
        let remote_enclave = Self::remote_enclave(ctx)?;
×
1073
        let policy = self.policies.verify(policy)?;
×
1074
        if !policy.may_query(remote_enclave, runtime_id) {
×
1075
            return Err(Error::NotAuthorized.into());
×
1076
        }
1077
        Ok(())
×
1078
    }
1079

1080
    /// Returns the session RAK of the remote enclave.
1081
    fn remote_rak(ctx: &RpcContext) -> Result<PublicKey> {
×
1082
        let si = ctx.session_info.as_ref();
×
1083
        let si = si.ok_or(Error::NotAuthenticated)?;
×
1084
        Ok(si.rak_binding.rak_pub())
×
1085
    }
1086

1087
    /// Returns the identity of the remote enclave.
1088
    fn remote_enclave(ctx: &RpcContext) -> Result<&EnclaveIdentity> {
×
1089
        let si = ctx.session_info.as_ref();
×
1090
        let si = si.ok_or(Error::NotAuthenticated)?;
×
1091
        Ok(&si.verified_attestation.quote.identity)
×
1092
    }
1093

1094
    /// Returns true if key manager policies should be ignored.
1095
    fn ignore_policy() -> bool {
×
1096
        option_env!("OASIS_UNSAFE_SKIP_KM_POLICY").is_some()
×
1097
    }
1098

1099
    /// Returns a key manager client that connects only to enclaves eligible
1100
    /// to form a new committee or to enclaves belonging the old committee.
1101
    fn key_manager_client(&self, status: &Status, new_committee: bool) -> Result<RemoteClient> {
×
1102
        let enclaves = if Self::ignore_policy() {
×
1103
            None
×
1104
        } else {
1105
            let policy = self.policies.verify(&status.policy)?;
×
1106
            let enclaves = match new_committee {
×
1107
                true => policy.may_join.clone(),
×
1108
                false => policy.may_share.clone(),
×
1109
            };
1110
            Some(enclaves)
×
1111
        };
1112

1113
        let client = RemoteClient::new_runtime_with_enclaves_and_policy(
1114
            self.runtime_id,
×
1115
            Some(self.runtime_id),
×
1116
            enclaves,
×
1117
            self.identity.quote_policy(),
×
1118
            self.protocol.clone(),
×
1119
            self.consensus_verifier.clone(),
×
1120
            self.identity.clone(),
×
1121
            1, // Not used, doesn't matter.
1122
            vec![],
×
1123
        );
1124

1125
        Ok(client)
×
1126
    }
1127

1128
    /// Computes the checksum of the verification matrix.
1129
    fn checksum_verification_matrix<G>(
×
1130
        &self,
1131
        matrix: &VerificationMatrix<G>,
1132
        epoch: EpochTime,
1133
    ) -> Hash
1134
    where
1135
        G: Group + GroupEncoding,
1136
    {
NEW
1137
        self.checksum_verification_matrix_bytes(&matrix.to_bytes(), epoch)
×
1138
    }
1139

1140
    /// Computes the checksum of the verification matrix bytes.
NEW
1141
    fn checksum_verification_matrix_bytes(&self, bytes: &Vec<u8>, epoch: EpochTime) -> Hash {
×
UNCOV
1142
        let mut checksum = [0u8; 32];
×
1143
        let mut f = KMac::new_kmac256(bytes, CHECKSUM_VERIFICATION_MATRIX_CUSTOM);
×
NEW
1144
        f.update(&self.runtime_id.0);
×
NEW
1145
        f.update(&[self.churp_id]);
×
1146
        f.update(&epoch.to_le_bytes());
×
1147
        f.finalize(&mut checksum);
×
1148
        Hash(checksum)
×
1149
    }
1150

1151
    /// Returns the type of the next handoff depending on which nodes submitted
1152
    /// an application to form the next committee.
1153
    fn handoff_kind(status: &Status) -> HandoffKind {
×
1154
        if status.committee.is_empty() {
×
1155
            return HandoffKind::DealingPhase;
×
1156
        }
1157
        if status.committee.len() != status.applications.len() {
×
1158
            return HandoffKind::CommitteeChanged;
×
1159
        }
1160
        if status
×
UNCOV
1161
            .committee
×
1162
            .iter()
1163
            .all(|value| status.applications.contains_key(value))
×
1164
        {
1165
            return HandoffKind::CommitteeUnchanged;
×
1166
        }
1167
        HandoffKind::CommitteeChanged
×
1168
    }
1169

1170
    /// Extends the given domain separation tag with key manager runtime ID
1171
    /// and churp ID.
NEW
1172
    fn domain_separation_tag(context: &[u8], runtime_id: &Namespace, churp_id: u8) -> Vec<u8> {
×
1173
        let mut dst = context.to_vec();
×
1174
        dst.extend(RUNTIME_CONTEXT_SEPARATOR);
×
NEW
1175
        dst.extend(runtime_id.0);
×
1176
        dst.extend(CHURP_CONTEXT_SEPARATOR);
×
1177
        dst.extend(&[churp_id]);
×
1178
        dst
×
1179
    }
1180
}
1181

1182
impl<S: Suite> Handler for Instance<S> {
NEW
1183
    fn verification_matrix(&self, req: &QueryRequest) -> Result<Vec<u8>> {
×
NEW
1184
        let status = self.verify_last_handoff(req.epoch)?;
×
NEW
1185
        let shareholder = match status.suite_id {
×
NEW
1186
            SuiteId::NistP384Sha3_384 => self.get_shareholder(req.epoch)?,
×
1187
        };
NEW
1188
        let vm = shareholder
×
1189
            .verifiable_share()
1190
            .verification_matrix()
1191
            .to_bytes();
1192

NEW
1193
        Ok(vm)
×
1194
    }
1195

NEW
1196
    fn share_reduction_switch_point(
×
1197
        &self,
1198
        ctx: &RpcContext,
1199
        req: &QueryRequest,
1200
    ) -> Result<Vec<u8>> {
NEW
1201
        let status = self.verify_next_handoff(req.epoch)?;
×
1202

NEW
1203
        let kind = Self::handoff_kind(&status);
×
NEW
1204
        if !matches!(kind, HandoffKind::CommitteeChanged) {
×
NEW
1205
            return Err(Error::InvalidHandoff.into());
×
1206
        }
1207

NEW
1208
        let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?;
×
NEW
1209
        if !status.applications.contains_key(node_id) {
×
NEW
1210
            return Err(Error::NotInCommittee.into());
×
1211
        }
1212

NEW
1213
        self.verify_node_id(ctx, node_id)?;
×
NEW
1214
        self.verify_km_enclave(ctx, &status.policy)?;
×
1215

NEW
1216
        let x = encode_shareholder::<S>(&node_id.0, &self.shareholder_dst)?;
×
NEW
1217
        let shareholder = self.get_shareholder(status.handoff)?;
×
NEW
1218
        let point = shareholder.switch_point(&x);
×
NEW
1219
        let point = scalar_to_bytes(&point);
×
1220

NEW
1221
        Ok(point)
×
1222
    }
1223

NEW
1224
    fn share_distribution_switch_point(
×
1225
        &self,
1226
        ctx: &RpcContext,
1227
        req: &QueryRequest,
1228
    ) -> Result<Vec<u8>> {
NEW
1229
        let status = self.verify_next_handoff(req.epoch)?;
×
1230

NEW
1231
        let kind = Self::handoff_kind(&status);
×
NEW
1232
        if !matches!(kind, HandoffKind::CommitteeChanged) {
×
NEW
1233
            return Err(Error::InvalidHandoff.into());
×
1234
        }
1235

NEW
1236
        let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?;
×
NEW
1237
        if !status.applications.contains_key(node_id) {
×
NEW
1238
            return Err(Error::NotInCommittee.into());
×
1239
        }
1240

NEW
1241
        self.verify_node_id(ctx, node_id)?;
×
NEW
1242
        self.verify_km_enclave(ctx, &status.policy)?;
×
1243

NEW
1244
        let x = encode_shareholder::<S>(&node_id.0, &self.shareholder_dst)?;
×
NEW
1245
        let handoff = self.get_handoff(status.next_handoff)?;
×
NEW
1246
        let shareholder = handoff.get_reduced_shareholder()?;
×
NEW
1247
        let point = shareholder.switch_point(&x);
×
NEW
1248
        let point = scalar_to_bytes(&point);
×
1249

NEW
1250
        Ok(point)
×
1251
    }
1252

NEW
1253
    fn bivariate_share(
×
1254
        &self,
1255
        ctx: &RpcContext,
1256
        req: &QueryRequest,
1257
    ) -> Result<EncodedVerifiableSecretShare> {
NEW
1258
        let status = self.verify_next_handoff(req.epoch)?;
×
1259

NEW
1260
        let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?;
×
NEW
1261
        if !status.applications.contains_key(node_id) {
×
NEW
1262
            return Err(Error::NotInCommittee.into());
×
1263
        };
1264

NEW
1265
        let application = status
×
NEW
1266
            .applications
×
NEW
1267
            .get(&self.node_id)
×
NEW
1268
            .ok_or(Error::NotInCommittee)?;
×
1269

NEW
1270
        self.verify_node_id(ctx, node_id)?;
×
NEW
1271
        self.verify_km_enclave(ctx, &status.policy)?;
×
1272

NEW
1273
        let x = encode_shareholder::<S>(&node_id.0, &self.shareholder_dst)?;
×
NEW
1274
        let kind = Self::handoff_kind(&status);
×
NEW
1275
        let dealer = self.get_dealer(status.next_handoff)?;
×
NEW
1276
        let share = dealer.make_share(x, kind);
×
NEW
1277
        let share = (&share).into();
×
NEW
1278
        let verification_matrix = dealer.verification_matrix().to_bytes();
×
1279

1280
        // Verify that the host hasn't created multiple dealers for the same
1281
        // epoch and replaced the polynomial that was used to prepare
1282
        // the application.
NEW
1283
        let computed_checksum =
×
NEW
1284
            self.checksum_verification_matrix_bytes(&verification_matrix, status.next_handoff);
×
NEW
1285
        if application.checksum != computed_checksum {
×
NEW
1286
            return Err(Error::InvalidBivariatePolynomial.into());
×
1287
        }
1288

NEW
1289
        Ok(EncodedVerifiableSecretShare {
×
NEW
1290
            share,
×
NEW
1291
            verification_matrix,
×
1292
        })
1293
    }
1294

NEW
1295
    fn sgx_policy_key_share(
×
1296
        &self,
1297
        ctx: &RpcContext,
1298
        req: &KeyShareRequest,
1299
    ) -> Result<EncodedEncryptedPoint> {
NEW
1300
        let status = self.churp_state.status(self.runtime_id, self.churp_id)?;
×
NEW
1301
        let status = if status.handoff != req.epoch {
×
1302
            // Allow querying past key shares if the client is a few blocks behind.
NEW
1303
            self.churp_state
×
NEW
1304
                .status_before(self.runtime_id, self.churp_id, ALLOWED_BLOCKS_BEHIND)?
×
1305
        } else {
NEW
1306
            status
×
1307
        };
1308

NEW
1309
        if status.handoff != req.epoch {
×
NEW
1310
            return Err(Error::HandoffMismatch.into());
×
1311
        }
1312

1313
        // Note that querying past key shares can fail at this point
1314
        // if the policy has changed.
NEW
1315
        self.verify_rt_enclave(ctx, &status.policy, &req.key_runtime_id)?;
×
1316

1317
        // Prepare key share.
NEW
1318
        let shareholder = self.get_shareholder(status.handoff)?;
×
NEW
1319
        let point = shareholder.make_key_share::<S>(&req.key_id.0, &self.sgx_policy_key_id_dst)?;
×
1320

NEW
1321
        Ok((&point).into())
×
1322
    }
1323

NEW
1324
    fn apply(&self, req: &HandoffRequest) -> Result<SignedApplicationRequest> {
×
NEW
1325
        let status = self.churp_state.status(self.runtime_id, self.churp_id)?;
×
NEW
1326
        if status.next_handoff != req.epoch {
×
NEW
1327
            return Err(Error::HandoffMismatch.into());
×
1328
        }
NEW
1329
        if status.next_handoff == HANDOFFS_DISABLED {
×
NEW
1330
            return Err(Error::HandoffsDisabled.into());
×
1331
        }
NEW
1332
        if status.applications.contains_key(&self.node_id) {
×
NEW
1333
            return Err(Error::ApplicationSubmitted.into());
×
1334
        }
1335

1336
        // Ensure application is submitted one epoch before the next handoff.
NEW
1337
        let now = self.beacon_state.epoch()?;
×
NEW
1338
        if status.next_handoff != now + 1 {
×
NEW
1339
            return Err(Error::ApplicationsClosed.into());
×
1340
        }
1341

1342
        // Create a new dealer.
NEW
1343
        let dealing_phase = status.committee.is_empty();
×
NEW
1344
        let dealer = self.create_dealer(status.next_handoff, status.threshold, dealing_phase)?;
×
1345

1346
        // Fetch verification matrix and compute its checksum.
NEW
1347
        let matrix = dealer.verification_matrix();
×
NEW
1348
        let checksum = self.checksum_verification_matrix(matrix, req.epoch);
×
1349

1350
        // Prepare response and sign it with RAK.
1351
        let application = ApplicationRequest {
NEW
1352
            id: self.churp_id,
×
NEW
1353
            runtime_id: self.runtime_id,
×
NEW
1354
            epoch: status.next_handoff,
×
1355
            checksum,
1356
        };
NEW
1357
        let body = cbor::to_vec(application.clone());
×
NEW
1358
        let signature = self
×
NEW
1359
            .signer
×
NEW
1360
            .sign(APPLICATION_REQUEST_SIGNATURE_CONTEXT, &body)?;
×
1361

NEW
1362
        Ok(SignedApplicationRequest {
×
NEW
1363
            application,
×
NEW
1364
            signature,
×
1365
        })
1366
    }
1367

NEW
1368
    fn share_reduction(&self, req: &FetchRequest) -> Result<FetchResponse> {
×
NEW
1369
        let status = self.verify_next_handoff(req.epoch)?;
×
1370

NEW
1371
        let handoff = self.get_or_create_handoff(&status)?;
×
NEW
1372
        let client = self.key_manager_client(&status, false)?;
×
NEW
1373
        let f =
×
NEW
1374
            |node_id| self.fetch_share_reduction_switch_point(node_id, &status, &handoff, &client);
×
NEW
1375
        fetch(f, &req.node_ids)
×
1376
    }
1377

NEW
1378
    fn share_distribution(&self, req: &FetchRequest) -> Result<FetchResponse> {
×
NEW
1379
        let status = self.verify_next_handoff(req.epoch)?;
×
NEW
1380
        let handoff = self.get_handoff(status.next_handoff)?;
×
NEW
1381
        let client = self.key_manager_client(&status, true)?;
×
NEW
1382
        let f = |node_id| {
×
NEW
1383
            self.fetch_share_distribution_switch_point(node_id, &status, &handoff, &client)
×
1384
        };
NEW
1385
        fetch(f, &req.node_ids)
×
1386
    }
1387

NEW
1388
    fn proactivization(&self, req: &FetchRequest) -> Result<FetchResponse> {
×
NEW
1389
        let status = self.verify_next_handoff(req.epoch)?;
×
NEW
1390
        let handoff = match Self::handoff_kind(&status) {
×
NEW
1391
            HandoffKind::CommitteeChanged => self.get_handoff(status.next_handoff)?,
×
NEW
1392
            _ => self.get_or_create_handoff(&status)?,
×
1393
        };
NEW
1394
        let client = self.key_manager_client(&status, true)?;
×
NEW
1395
        let f = |node_id| self.fetch_bivariate_share(node_id, &status, &handoff, &client);
×
NEW
1396
        fetch(f, &req.node_ids)
×
1397
    }
1398

NEW
1399
    fn confirmation(&self, req: &HandoffRequest) -> Result<SignedConfirmationRequest> {
×
NEW
1400
        let status = self.verify_next_handoff(req.epoch)?;
×
1401

NEW
1402
        if !status.applications.contains_key(&self.node_id) {
×
NEW
1403
            return Err(Error::ApplicationNotSubmitted.into());
×
1404
        }
1405

1406
        // Fetch the next shareholder and its secret share.
NEW
1407
        let handoff = self.get_handoff(status.next_handoff)?;
×
NEW
1408
        let shareholder = handoff.get_full_shareholder()?;
×
NEW
1409
        let share = shareholder.verifiable_share();
×
1410

1411
        // Back up the secret share before sending confirmation.
NEW
1412
        self.storage
×
NEW
1413
            .store_next_secret_share(share, self.churp_id, status.next_handoff)?;
×
1414

1415
        // Store the shareholder. Observe that we are adding the shareholder
1416
        // before the consensus has confirmed that the handoff was completed.
1417
        // This is fine, as we always verify the handoff epoch before fetching
1418
        // a shareholder.
NEW
1419
        self.add_shareholder(shareholder.clone(), status.next_handoff);
×
1420

1421
        // Prepare response and sign it with RAK.
NEW
1422
        let vm = share.verification_matrix();
×
NEW
1423
        let checksum = self.checksum_verification_matrix(vm, status.next_handoff);
×
1424
        let confirmation = ConfirmationRequest {
NEW
1425
            id: self.churp_id,
×
NEW
1426
            runtime_id: self.runtime_id,
×
NEW
1427
            epoch: status.next_handoff,
×
1428
            checksum,
1429
        };
NEW
1430
        let body = cbor::to_vec(confirmation.clone());
×
NEW
1431
        let signature = self
×
NEW
1432
            .signer
×
NEW
1433
            .sign(CONFIRMATION_REQUEST_SIGNATURE_CONTEXT, &body)?;
×
1434

NEW
1435
        Ok(SignedConfirmationRequest {
×
NEW
1436
            confirmation,
×
NEW
1437
            signature,
×
1438
        })
1439
    }
1440

NEW
1441
    fn finalize(&self, req: &HandoffRequest) -> Result<()> {
×
NEW
1442
        let status = self.verify_last_handoff(req.epoch)?;
×
1443

1444
        // Keep only the last two shareholders. The second-last shareholder
1445
        // could be removed after a few blocks, as we need it only to serve
1446
        // clients that are lagging behind.
NEW
1447
        self.clean_shareholders(status.handoff);
×
1448

1449
        // Cleaning up dealers and handoffs is optional,
1450
        // as they are overwritten during the next handoff.
NEW
1451
        let max_epoch = status.next_handoff.saturating_sub(1);
×
NEW
1452
        self.remove_dealer(max_epoch);
×
NEW
1453
        self.remove_handoff(max_epoch);
×
1454

1455
        // Fetch the last shareholder and its secret share.
NEW
1456
        let shareholder = match self.get_shareholder(status.handoff) {
×
NEW
1457
            Ok(shareholder) => shareholder,
×
NEW
1458
            Err(_) => return Ok(()), // Not found.
×
1459
        };
NEW
1460
        let share = shareholder.verifiable_share();
×
1461

1462
        // Back up the secret share. This operation will be a no-op
1463
        // if the handoff failed, as the last shareholder hasn't changed.
NEW
1464
        self.storage
×
NEW
1465
            .store_secret_share(share, self.churp_id, status.handoff)
×
1466
    }
1467
}
1468

1469
/// Replaces the given error with `Ok(None)`.
1470
fn ignore_error<T>(err: anyhow::Error, ignore: Error) -> Result<Option<T>> {
×
1471
    match err.downcast_ref::<Error>() {
×
1472
        Some(error) if error == &ignore => Ok(None),
×
1473
        _ => Err(err),
×
1474
    }
1475
}
1476

1477
/// Fetches data from the given nodes by calling the provided function
1478
/// for each node.
1479
fn fetch<F>(f: F, node_ids: &[PublicKey]) -> Result<FetchResponse>
×
1480
where
1481
    F: Fn(PublicKey) -> Result<bool>,
1482
{
1483
    let mut completed = false;
×
1484
    let mut succeeded = vec![];
×
1485
    let mut failed = vec![];
×
1486

1487
    for &node_id in node_ids {
×
1488
        if completed {
×
1489
            break;
×
1490
        }
1491

1492
        match f(node_id) {
×
1493
            Ok(done) => {
×
1494
                completed = done;
×
1495
                succeeded.push(node_id);
×
1496
            }
1497
            Err(_) => {
×
1498
                failed.push(node_id);
×
1499
            }
1500
        }
1501
    }
1502

1503
    Ok(FetchResponse {
×
1504
        completed,
×
1505
        succeeded,
×
1506
        failed,
×
1507
    })
1508
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc