• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

oasisprotocol / oasis-core / #5373

11 Oct 2024 12:48PM UTC coverage: 47.696% (+0.05%) from 47.643%
#5373

Pull #5897

peternose
secret-sharing/src/churp/switch: Verify combined bivariate polynomial

After all bivariate shares are collected and the switch either
creates a new shareholder or proactivates the share of an existing
one, the new share should be verified to ensure that the verification
matrix of the combined bivariate polynomial satisfies the non-zero
leading term requirements.
Pull Request #5897: secret-sharing/src/churp/switch: Verify combined bivariate polynomial

33 of 42 new or added lines in 5 files covered. (78.57%)

1 existing line in 1 file now uncovered.

4503 of 9441 relevant lines covered (47.7%)

1.11 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/keymanager/src/churp/handler.rs
1
//! CHURP handler.
2
use std::{
3
    cmp,
4
    collections::HashMap,
5
    convert::TryInto,
6
    sync::{Arc, Mutex},
7
};
8

9
use anyhow::Result;
10
use group::{Group, GroupEncoding};
11
use rand::rngs::OsRng;
12
use sp800_185::KMac;
13

14
use oasis_core_runtime::{
15
    common::{
16
        crypto::{
17
            hash::Hash,
18
            signature::{PublicKey, Signer},
19
        },
20
        namespace::Namespace,
21
        sgx::EnclaveIdentity,
22
    },
23
    consensus::{
24
        beacon::EpochTime,
25
        keymanager::churp::{SignedPolicySGX, Status, SuiteId},
26
        verifier::Verifier,
27
    },
28
    enclave_rpc::Context as RpcContext,
29
    future::block_on,
30
    identity::Identity,
31
    protocol::ProtocolUntrustedLocalStorage,
32
    Protocol,
33
};
34

35
use secret_sharing::{
36
    churp::{
37
        encode_shareholder, CommitteeChanged, CommitteeUnchanged, Dealer, DealingPhase, Handoff,
38
        HandoffKind, Shareholder, VerifiableSecretShare,
39
    },
40
    kdc::KeySharer,
41
    poly::{scalar_from_bytes, scalar_to_bytes},
42
    suites::{p384, Suite},
43
    vss::VerificationMatrix,
44
};
45

46
use crate::{
47
    beacon::State as BeaconState,
48
    client::{KeyManagerClient, RemoteClient},
49
    registry::State as RegistryState,
50
};
51

52
use super::{
53
    storage::Storage, ApplicationRequest, ConfirmationRequest, EncodedEncryptedPoint,
54
    EncodedVerifiableSecretShare, Error, FetchRequest, FetchResponse, HandoffRequest,
55
    KeyShareRequest, QueryRequest, SignedApplicationRequest, SignedConfirmationRequest,
56
    State as ChurpState, VerifiedPolicies,
57
};
58

59
/// A handoff interval that disables handoffs.
60
pub const HANDOFFS_DISABLED: EpochTime = 0xffffffffffffffff;
61

62
/// Signature context for signing application requests.
63
const APPLICATION_REQUEST_SIGNATURE_CONTEXT: &[u8] =
64
    b"oasis-core/keymanager/churp: application request";
65

66
/// Signature context for signing confirmation requests.
67
const CONFIRMATION_REQUEST_SIGNATURE_CONTEXT: &[u8] =
68
    b"oasis-core/keymanager/churp: confirmation request";
69

70
/// Custom KMAC domain separation for checksums of verification matrices.
71
const CHECKSUM_VERIFICATION_MATRIX_CUSTOM: &[u8] =
72
    b"oasis-core/keymanager/churp: verification matrix";
73

74
/// Domain separation tag for encoding shareholder identifiers.
75
const ENCODE_SHAREHOLDER_CONTEXT: &[u8] = b"oasis-core/keymanager/churp: encode shareholder";
76

77
/// Domain separation tag for encoding key identifiers for key share derivation
78
/// approved by an SGX policy.
79
///
80
/// SGX policies specify which enclave identities are authorized to access
81
/// runtime key shares.
82
const ENCODE_SGX_POLICY_KEY_ID_CONTEXT: &[u8] =
83
    b"oasis-core/keymanager/churp: encode SGX policy key ID";
84

85
/// Domain separation tag for encoding key identifiers for key share derivation
86
/// approved by a custom policy.
87
///
88
/// Custom policies allow access to key shares only for clients that submit
89
/// a proof, which can be validated against the policy. The hash of the policy
90
/// is part of the key identifier and is integral to the key derivation process.
91
#[allow(dead_code)]
92
const ENCODE_CUSTOM_POLICY_KEY_ID_CONTEXT: &[u8] =
93
    b"oasis-core/keymanager/churp: encode custom policy key ID";
94

95
/// The runtime separator used to add additional domain separation based
96
/// on the runtime ID.
97
const RUNTIME_CONTEXT_SEPARATOR: &[u8] = b" for runtime ";
98

99
/// The churp separator used to add additional domain separation based
100
/// on the churp ID.
101
const CHURP_CONTEXT_SEPARATOR: &[u8] = b" for churp ";
102

103
/// The number of blocks a remote client is allowed to be behind when querying
104
/// past key shares.
105
const ALLOWED_BLOCKS_BEHIND: u64 = 5;
106

107
/// Represents information about a dealer.
108
struct DealerInfo<G: Group + GroupEncoding> {
109
    /// The epoch during which this dealer is active.
110
    epoch: EpochTime,
111
    /// The dealer associated with this information.
112
    dealer: Arc<Dealer<G>>,
113
}
114

115
/// Represents information about a handoff.
116
struct HandoffInfo<G: Group + GroupEncoding> {
117
    /// The handoff epoch.
118
    epoch: EpochTime,
119
    /// The handoff associated with this information.
120
    handoff: Arc<Box<dyn Handoff<G>>>,
121
}
122

123
pub(crate) trait Handler: Send + Sync {
124
    /// Returns the verification matrix of the shared secret bivariate
125
    /// polynomial from the last successfully completed handoff.
126
    ///
127
    /// The verification matrix is a matrix of dimensions t_n x t_m, where
128
    /// t_n = threshold and t_m = 2 * threshold + 1. It contains encrypted
129
    /// coefficients of the secret bivariate polynomial whose zero coefficient
130
    /// represents the shared secret.
131
    ///
132
    /// Verification matrix:
133
    /// ```text
134
    ///     M = [b_{i,j} * G]
135
    /// ```
136
    /// Bivariate polynomial:
137
    /// ```text
138
    ///     B(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j
139
    /// ```
140
    /// Shared secret:
141
    /// ```text
142
    ///     Secret = B(0, 0)
143
    /// ```
144
    ///
145
    /// This matrix is used to verify switch points derived from the bivariate
146
    /// polynomial share in handoffs.
147
    ///
148
    /// NOTE: This method can be called over an insecure channel, as the matrix
149
    /// does not contain any sensitive information. However, the checksum
150
    /// of the matrix should always be verified against the consensus layer.
151
    fn verification_matrix(&self, req: &QueryRequest) -> Result<Vec<u8>>;
152

153
    /// Returns switch point for share reduction for the calling node.
154
    ///
155
    /// The point is evaluation of the shared secret bivariate polynomial
156
    /// at the given x (me) and y value (node ID).
157
    ///
158
    /// Switch point:
159
    /// ```text
160
    ///     Point = B(me, node_id)
161
    /// ```
162
    /// Bivariate polynomial:
163
    /// ```text
164
    ///     B(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j
165
    /// ```
166
    ///
167
    /// WARNING: This method must be called over a secure channel as the point
168
    /// needs to be kept secret and generated only for authorized nodes.
169
    fn share_reduction_switch_point(&self, ctx: &RpcContext, req: &QueryRequest)
170
        -> Result<Vec<u8>>;
171

172
    /// Returns switch point for full share distribution for the calling node.
173
    ///
174
    /// The point is evaluation of the proactivized shared secret bivariate
175
    /// polynomial at the given x (node ID) and y value (me).
176
    ///
177
    /// Switch point:
178
    /// ```text
179
    ///     Point = B(node_id, me) + \sum Q_i(node_id, me)
180
    /// ```
181
    /// Bivariate polynomial:
182
    /// ```text
183
    ///     B(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j
184
    /// ```
185
    /// Proactive bivariate polynomial:
186
    /// ```text
187
    ///     Q_i(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j
188
    /// ```
189
    ///
190
    /// WARNING: This method must be called over a secure channel as the point
191
    /// needs to be kept secret and generated only for authorized nodes.
192
    fn share_distribution_switch_point(
193
        &self,
194
        ctx: &RpcContext,
195
        req: &QueryRequest,
196
    ) -> Result<Vec<u8>>;
197

198
    /// Returns proactive bivariate polynomial share for the calling node.
199
    ///
200
    /// A bivariate share is a partial evaluation of a randomly selected
201
    /// bivariate polynomial at a specified x or y value (node ID). Each node
202
    /// interested in joining the new committee selects a bivariate polynomial
203
    /// before the next handoff and commits to it by submitting the checksum
204
    /// of the corresponding verification matrix to the consensus layer.
205
    /// The latter can be used to verify the received bivariate shares.
206
    ///
207
    /// Bivariate polynomial share:
208
    /// ```text
209
    ///     S_i(y) = Q_i(node_id, y) (dealing phase or unchanged committee)
210
    ///     S_i(x) = Q_i(x, node_id) (committee changes)
211
    /// ```
212
    /// Proactive bivariate polynomial:
213
    /// ```text
214
    ///     Q_i(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j
215
    /// ```
216
    ///
217
    /// WARNING: This method must be called over a secure channel as
218
    /// the polynomial needs to be kept secret and generated only
219
    /// for authorized nodes.
220
    fn bivariate_share(
221
        &self,
222
        ctx: &RpcContext,
223
        req: &QueryRequest,
224
    ) -> Result<EncodedVerifiableSecretShare>;
225

226
    /// Returns the key share for the given key ID generated by the key
227
    /// derivation center.
228
    ///
229
    /// Key share:
230
    /// ```text
231
    ///     KS_i = s_i * H(key_id)
232
    /// ```
233
    ///
234
    /// WARNING: This method must be called over a secure channel as the key
235
    /// share needs to be kept secret and generated only for authorized nodes.
236
    fn sgx_policy_key_share(
237
        &self,
238
        ctx: &RpcContext,
239
        req: &KeyShareRequest,
240
    ) -> Result<EncodedEncryptedPoint>;
241

242
    /// Prepare CHURP for participation in the given handoff of the protocol.
243
    ///
244
    /// Initialization randomly selects a bivariate polynomial for the given
245
    /// handoff, computes the corresponding verification matrix and its
246
    /// checksum, and signs the latter.
247
    ///
248
    /// Bivariate polynomial:
249
    ///     B(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j
250
    ///
251
    /// Verification matrix:
252
    ///     M = [b_{i,j} * G]
253
    ///
254
    /// Checksum:
255
    ///     H = KMAC256(M, runtime ID, handoff)
256
    ///
257
    /// The bivariate polynomial is zero-hole in all handoffs expect in the
258
    /// first one (dealing phase).
259
    ///
260
    /// This method must be called locally.
261
    fn apply(&self, req: &HandoffRequest) -> Result<SignedApplicationRequest>;
262

263
    /// Tries to fetch switch points for share reduction from the given nodes.
264
    ///
265
    /// Switch points should be obtained from (at least) t distinct nodes
266
    /// belonging to the old committee, verified against verification matrix
267
    /// whose checksum was published in the consensus layer, merged into
268
    /// a reduced share using Lagrange interpolation and proactivized with
269
    /// bivariate shares.
270
    ///
271
    /// Switch point:
272
    /// ```text
273
    ///     P_i = B(node_i, me)
274
    ///```
275
    /// Reduced share:
276
    /// ```text
277
    ///     RS(x) = B(x, me)
278
    /// ````
279
    /// Proactive reduced share:
280
    /// ```text
281
    ///     QR(x) = RS(x) + \sum Q_i(x, me)
282
    /// ````
283
    fn share_reduction(&self, req: &FetchRequest) -> Result<FetchResponse>;
284

285
    /// Tries to fetch switch data points for full share distribution from
286
    /// the given nodes.
287
    ///
288
    /// Switch points should be obtained from (at least) 2t distinct nodes
289
    /// belonging to the new committee, verified against the sum of the
290
    /// verification matrix and the verification matrices of proactive
291
    /// bivariate shares, whose checksums were published in the consensus
292
    /// layer, and merged into a full share using Lagrange interpolation.
293
    ///
294
    /// Switch point:
295
    /// ```text
296
    ///     P_i = B(me, node_i) + \sum Q_i(me, node_i)
297
    ///```
298
    /// Full share:
299
    /// ```text
300
    ///     FS(x) = B(me, y) + \sum Q_i(me, y) = B'(me, y)
301
    /// ````
302
    fn share_distribution(&self, req: &FetchRequest) -> Result<FetchResponse>;
303

304
    /// Tries to fetch proactive bivariate shares from the given nodes.
305
    ///
306
    /// Bivariate shares should be fetched from all candidates for the new
307
    /// committee, including our own, verified against verification matrices
308
    /// whose checksums were published in the consensus layer, and summed
309
    /// into a bivariate polynomial.
310
    ///
311
    /// Bivariate polynomial share:
312
    /// ```text
313
    ///     S_i(y) = Q_i(me, y) (dealing phase or unchanged committee)
314
    ///     S_i(x) = Q_i(x, me) (committee changes)
315
    /// ```
316
    fn proactivization(&self, req: &FetchRequest) -> Result<FetchResponse>;
317

318
    /// Returns a signed confirmation request containing the checksum
319
    /// of the merged verification matrix.
320
    fn confirmation(&self, req: &HandoffRequest) -> Result<SignedConfirmationRequest>;
321

322
    /// Finalizes the specified scheme by cleaning up obsolete dealers,
323
    /// handoffs, and shareholders. If the handoff was just completed,
324
    /// the shareholder is made available, and its share is persisted
325
    /// to the local storage.
326
    fn finalize(&self, req: &HandoffRequest) -> Result<()>;
327
}
328

329
/// Key manager application that implements churn-robust proactive secret
330
/// sharing scheme (CHURP).
331
pub struct Churp {
332
    /// Host node identifier.
333
    node_id: PublicKey,
334
    /// Key manager runtime ID.
335
    runtime_id: Namespace,
336
    /// Runtime identity.
337
    identity: Arc<Identity>,
338
    /// Low-level access to the underlying Runtime Host Protocol.
339
    protocol: Arc<Protocol>,
340
    /// Consensus verifier.
341
    consensus_verifier: Arc<dyn Verifier>,
342
    /// Verified churp state.
343
    churp_state: ChurpState,
344

345
    /// Cached instances.
346
    instances: Mutex<HashMap<u8, Arc<dyn Handler>>>,
347
    /// Cached verified policies.
348
    policies: Arc<VerifiedPolicies>,
349
}
350

351
impl Churp {
352
    pub fn new(
×
353
        node_id: PublicKey,
354
        identity: Arc<Identity>,
355
        protocol: Arc<Protocol>,
356
        consensus_verifier: Arc<dyn Verifier>,
357
    ) -> Self {
358
        let runtime_id = protocol.get_runtime_id();
×
359
        let churp_state = ChurpState::new(consensus_verifier.clone());
×
360
        let instances = Mutex::new(HashMap::new());
×
361
        let policies = Arc::new(VerifiedPolicies::new());
×
362

363
        Self {
364
            node_id,
365
            runtime_id,
366
            identity,
367
            protocol,
368
            consensus_verifier,
369
            churp_state,
370
            instances,
371
            policies,
372
        }
373
    }
374

375
    fn get_instance(&self, churp_id: u8, runtime_id: Namespace) -> Result<Arc<dyn Handler>> {
×
376
        // Ensure runtime_id matches.
377
        if self.runtime_id != runtime_id {
×
378
            return Err(Error::RuntimeMismatch.into());
×
379
        }
380

381
        // Return the instance if it exists.
382
        let mut instances = self.instances.lock().unwrap();
×
383
        if let Some(instance) = instances.get(&churp_id) {
×
384
            return Ok(instance.clone());
×
385
        }
386

387
        // Create a new instance based on the suite type.
388
        let status = self.churp_state.status(self.runtime_id, churp_id)?;
×
389
        let instance = match status.suite_id {
390
            SuiteId::NistP384Sha3_384 => Instance::<p384::Sha3_384>::new(
391
                churp_id,
392
                self.node_id,
×
393
                self.identity.clone(),
×
394
                self.protocol.clone(),
×
395
                self.consensus_verifier.clone(),
×
396
                self.policies.clone(),
×
397
            ),
398
        };
399

400
        // Load secret shares and bivariate share.
401
        instance.init(&status)?;
×
402

403
        // Store the new instance.
404
        let instance = Arc::new(instance);
×
405
        instances.insert(churp_id, instance.clone());
×
406

407
        Ok(instance)
×
408
    }
409
}
410

411
impl Handler for Churp {
412
    fn verification_matrix(&self, req: &QueryRequest) -> Result<Vec<u8>> {
×
413
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
414
        instance.verification_matrix(req)
×
415
    }
416

417
    fn share_reduction_switch_point(
×
418
        &self,
419
        ctx: &RpcContext,
420
        req: &QueryRequest,
421
    ) -> Result<Vec<u8>> {
422
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
423
        instance.share_reduction_switch_point(ctx, req)
×
424
    }
425

426
    fn share_distribution_switch_point(
×
427
        &self,
428
        ctx: &RpcContext,
429
        req: &QueryRequest,
430
    ) -> Result<Vec<u8>> {
431
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
432
        instance.share_distribution_switch_point(ctx, req)
×
433
    }
434

435
    fn bivariate_share(
×
436
        &self,
437
        ctx: &RpcContext,
438
        req: &QueryRequest,
439
    ) -> Result<EncodedVerifiableSecretShare> {
440
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
441
        instance.bivariate_share(ctx, req)
×
442
    }
443

444
    fn sgx_policy_key_share(
×
445
        &self,
446
        ctx: &RpcContext,
447
        req: &KeyShareRequest,
448
    ) -> Result<EncodedEncryptedPoint> {
449
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
450
        instance.sgx_policy_key_share(ctx, req)
×
451
    }
452

453
    fn apply(&self, req: &HandoffRequest) -> Result<SignedApplicationRequest> {
×
454
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
455
        instance.apply(req)
×
456
    }
457

458
    fn share_reduction(&self, req: &FetchRequest) -> Result<FetchResponse> {
×
459
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
460
        instance.share_reduction(req)
×
461
    }
462

463
    fn share_distribution(&self, req: &FetchRequest) -> Result<FetchResponse> {
×
464
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
465
        instance.share_distribution(req)
×
466
    }
467

468
    fn proactivization(&self, req: &FetchRequest) -> Result<FetchResponse> {
×
469
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
470
        instance.proactivization(req)
×
471
    }
472

473
    fn confirmation(&self, req: &HandoffRequest) -> Result<SignedConfirmationRequest> {
×
474
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
475
        instance.confirmation(req)
×
476
    }
477

478
    fn finalize(&self, req: &HandoffRequest) -> Result<()> {
×
479
        let instance = self.get_instance(req.id, req.runtime_id)?;
×
480
        instance.finalize(req)
×
481
    }
482
}
483

484
struct Instance<S: Suite> {
485
    /// Host node identifier.
486
    node_id: PublicKey,
487
    /// Instance identifier.
488
    churp_id: u8,
489
    /// Key manager runtime ID.
490
    runtime_id: Namespace,
491
    /// Runtime identity.
492
    identity: Arc<Identity>,
493
    /// Runtime attestation key signer.
494
    signer: Arc<dyn Signer>,
495

496
    /// Storage handler.
497
    storage: Storage,
498
    /// Consensus verifier.
499
    consensus_verifier: Arc<dyn Verifier>,
500
    /// Low-level access to the underlying Runtime Host Protocol.
501
    protocol: Arc<Protocol>,
502

503
    /// Verified beacon state.
504
    beacon_state: BeaconState,
505
    /// Verified churp state.
506
    churp_state: ChurpState,
507
    /// Verified registry state.
508
    registry_state: RegistryState,
509

510
    /// Shareholders with secret shares for completed handoffs.
511
    ///
512
    /// The map may also contain shareholders for failed or unfinished
513
    /// handoffs, so always verify if the handoff succeeded in the consensus.
514
    shareholders: Mutex<HashMap<EpochTime, Arc<Shareholder<S::Group>>>>,
515
    /// Dealer of bivariate shares for the next handoff.
516
    dealer: Mutex<Option<DealerInfo<S::Group>>>,
517
    /// Next handoff.
518
    handoff: Mutex<Option<HandoffInfo<S::Group>>>,
519

520
    /// Cached verified policies.
521
    policies: Arc<VerifiedPolicies>,
522

523
    /// Domain separation tag for encoding shareholder identifiers.
524
    shareholder_dst: Vec<u8>,
525
    /// Domain separation tag for encoding key identifiers for key share
526
    /// derivation approved by an SGX policy.
527
    sgx_policy_key_id_dst: Vec<u8>,
528
}
529

530
impl<S: Suite> Instance<S> {
531
    /// Creates a new CHURP instance.
532
    pub fn new(
×
533
        churp_id: u8,
534
        node_id: PublicKey,
535
        identity: Arc<Identity>,
536
        protocol: Arc<Protocol>,
537
        consensus_verifier: Arc<dyn Verifier>,
538
        policies: Arc<VerifiedPolicies>,
539
    ) -> Self {
540
        let runtime_id = protocol.get_runtime_id();
×
541
        let storage = Storage::new(Arc::new(ProtocolUntrustedLocalStorage::new(
×
542
            protocol.clone(),
×
543
        )));
544
        let signer: Arc<dyn Signer> = identity.clone();
×
545

546
        let beacon_state = BeaconState::new(consensus_verifier.clone());
×
547
        let churp_state = ChurpState::new(consensus_verifier.clone());
×
548
        let registry_state = RegistryState::new(consensus_verifier.clone());
×
549

550
        let shareholders = Mutex::new(HashMap::new());
×
551
        let dealer = Mutex::new(None);
×
552
        let handoff = Mutex::new(None);
×
553

554
        let shareholder_dst =
×
555
            Self::domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, &runtime_id, churp_id);
×
556
        let sgx_policy_key_id_dst =
×
557
            Self::domain_separation_tag(ENCODE_SGX_POLICY_KEY_ID_CONTEXT, &runtime_id, churp_id);
×
558

559
        Self {
560
            churp_id,
561
            identity,
562
            signer,
563
            node_id,
564
            runtime_id,
565
            protocol,
566
            consensus_verifier,
567
            storage,
568
            beacon_state,
569
            shareholders,
570
            churp_state,
571
            registry_state,
572
            dealer,
573
            handoff,
574
            policies,
575
            shareholder_dst,
576
            sgx_policy_key_id_dst,
577
        }
578
    }
579

580
    /// Initializes the instance by loading the shareholder for the last
581
    /// successfully completed handoff, as well as the shareholder and
582
    /// the dealer for the upcoming handoff, if they are available.
583
    pub fn init(&self, status: &Status) -> Result<()> {
×
584
        let checksum = status
×
585
            .applications
×
586
            .get(&self.node_id)
×
587
            .map(|app| app.checksum);
×
588

589
        self.load_shareholder(status.handoff)?;
×
590
        self.load_next_shareholder(status.next_handoff)?;
×
591
        self.load_dealer(status.next_handoff, checksum)
×
592
    }
593

594
    /// Tries to fetch switch point for share reduction from the given node.
595
    pub fn fetch_share_reduction_switch_point(
×
596
        &self,
597
        node_id: PublicKey,
598
        status: &Status,
599
        handoff: &Arc<Box<dyn Handoff<S::Group>>>,
600
        client: &RemoteClient,
601
    ) -> Result<bool> {
602
        let x = encode_shareholder::<S>(&node_id.0, &self.shareholder_dst)?;
×
603

604
        if !handoff.needs_share_reduction_switch_point(&x)? {
×
605
            return Err(Error::InvalidShareholder.into());
×
606
        }
607

608
        // Fetch from the host node.
609
        if node_id == self.node_id {
×
610
            let shareholder = self.get_shareholder(status.handoff)?;
×
611
            let point = shareholder.switch_point(&x);
×
612

613
            if handoff.needs_verification_matrix()? {
×
614
                // Local verification matrix is trusted.
615
                let vm = shareholder.verifiable_share().verification_matrix().clone();
×
616
                handoff.set_verification_matrix(vm)?;
×
617
            }
618

619
            return handoff.add_share_reduction_switch_point(x, point);
×
620
        }
621

622
        // Fetch from the remote node.
623
        if handoff.needs_verification_matrix()? {
×
624
            // The remote verification matrix needs to be verified.
625
            let vm = block_on(client.churp_verification_matrix(
×
626
                self.churp_id,
×
627
                status.handoff,
×
628
                vec![node_id],
×
629
            ))?;
630
            let checksum = self.checksum_verification_matrix_bytes(&vm, status.handoff);
×
631
            let status_checksum = status.checksum.ok_or(Error::InvalidHandoff)?; // Should never happen.
×
632
            if checksum != status_checksum {
×
633
                return Err(Error::InvalidVerificationMatrixChecksum.into());
×
634
            }
635

636
            let vm = VerificationMatrix::from_bytes(&vm)
×
637
                .ok_or(Error::VerificationMatrixDecodingFailed)?;
×
638
            handoff.set_verification_matrix(vm)?;
×
639
        }
640

641
        let point = block_on(client.churp_share_reduction_point(
×
642
            self.churp_id,
×
643
            status.next_handoff,
×
644
            self.node_id,
×
645
            vec![node_id],
×
646
        ))?;
647
        let point = scalar_from_bytes(&point).ok_or(Error::PointDecodingFailed)?;
×
648

649
        handoff.add_share_reduction_switch_point(x, point)
×
650
    }
651

652
    /// Tries to fetch switch point for share reduction from the given node.
653
    pub fn fetch_share_distribution_switch_point(
×
654
        &self,
655
        node_id: PublicKey,
656
        status: &Status,
657
        handoff: &Arc<Box<dyn Handoff<S::Group>>>,
658
        client: &RemoteClient,
659
    ) -> Result<bool> {
660
        let x = encode_shareholder::<S>(&node_id.0, &self.shareholder_dst)?;
×
661

662
        if !handoff.needs_full_share_distribution_switch_point(&x)? {
×
663
            return Err(Error::InvalidShareholder.into());
×
664
        }
665

666
        // Fetch from the host node.
667
        if node_id == self.node_id {
×
668
            let shareholder = handoff.get_reduced_shareholder()?;
×
669
            let point = shareholder.switch_point(&x);
×
670

671
            return handoff.add_full_share_distribution_switch_point(x, point);
×
672
        }
673

674
        // Fetch from the remote node.
675
        let point = block_on(client.churp_share_distribution_point(
×
676
            self.churp_id,
×
677
            status.next_handoff,
×
678
            self.node_id,
×
679
            vec![node_id],
×
680
        ))?;
681
        let point = scalar_from_bytes(&point).ok_or(Error::PointDecodingFailed)?;
×
682

683
        handoff.add_full_share_distribution_switch_point(x, point)
×
684
    }
685

686
    /// Tries to fetch proactive bivariate share from the given node.
687
    pub fn fetch_bivariate_share(
×
688
        &self,
689
        node_id: PublicKey,
690
        status: &Status,
691
        handoff: &Arc<Box<dyn Handoff<S::Group>>>,
692
        client: &RemoteClient,
693
    ) -> Result<bool> {
694
        let x = encode_shareholder::<S>(&node_id.0, &self.shareholder_dst)?;
×
695

696
        if !handoff.needs_bivariate_share(&x)? {
×
697
            return Err(Error::InvalidShareholder.into());
×
698
        }
699

700
        // Fetch from the host node.
701
        if node_id == self.node_id {
×
702
            let kind = Self::handoff_kind(status);
×
703
            let dealer = self.get_dealer(status.next_handoff)?;
×
704
            let share = dealer.make_share(x, kind);
×
705
            let vm = dealer.verification_matrix().clone();
×
706
            let verifiable_share = VerifiableSecretShare::new(share, vm);
×
707

708
            return handoff.add_bivariate_share(&x, verifiable_share);
×
709
        }
710

711
        // Fetch from the remote node.
712
        let share = block_on(client.churp_bivariate_share(
×
713
            self.churp_id,
×
714
            status.next_handoff,
×
715
            self.node_id,
×
716
            vec![node_id],
×
717
        ))?;
718

719
        // The remote verification matrix needs to be verified.
720
        let checksum = self
×
721
            .checksum_verification_matrix_bytes(&share.verification_matrix, status.next_handoff);
×
722
        let application = status
×
723
            .applications
×
724
            .get(&node_id)
×
725
            .ok_or(Error::InvalidShareholder)?; // Should never happen, as we verify if we require this share.
×
726

727
        if checksum != application.checksum {
×
728
            return Err(Error::InvalidVerificationMatrixChecksum.into());
×
729
        }
730

731
        let verifiable_share: VerifiableSecretShare<S::Group> = share.try_into()?;
×
732

733
        handoff.add_bivariate_share(&x, verifiable_share)
×
734
    }
735

736
    /// Returns the shareholder for the given epoch.
737
    fn get_shareholder(&self, epoch: EpochTime) -> Result<Arc<Shareholder<S::Group>>> {
×
738
        let shareholders = self.shareholders.lock().unwrap();
×
739
        shareholders
×
740
            .get(&epoch)
×
741
            .cloned()
742
            .ok_or(Error::ShareholderNotFound.into())
×
743
    }
744

745
    /// Adds a shareholder for the given epoch.
746
    fn add_shareholder(&self, shareholder: Arc<Shareholder<S::Group>>, epoch: EpochTime) {
×
747
        let mut shareholders = self.shareholders.lock().unwrap();
×
748
        shareholders.insert(epoch, shareholder);
×
749
    }
750

751
    /// Keeps only the shareholder for the given epoch and the shareholder
752
    /// preceding that one.
753
    fn clean_shareholders(&self, epoch: EpochTime) {
×
754
        let mut shareholders = self.shareholders.lock().unwrap();
×
755
        let second_last = shareholders.keys().filter(|&&e| e < epoch).max().cloned();
×
756
        shareholders.retain(|&e, _| e == epoch || Some(e) == second_last);
×
757
    }
758

759
    /// Loads the shareholder from local storage for the given epoch.
760
    fn load_shareholder(&self, epoch: EpochTime) -> Result<()> {
×
761
        // Skip if no handoffs have been completed so far.
762
        if epoch == 0 {
×
763
            return Ok(());
×
764
        }
765

766
        let share = self
×
767
            .storage
×
768
            .load_secret_share(self.churp_id, epoch)
×
769
            .or_else(|err| ignore_error(err, Error::InvalidSecretShare))?; // Ignore previous shares.
×
770

771
        // If the secret share is not available, check if the next handoff
772
        // succeeded as it might have been confirmed while we were away.
773
        let share = match share {
×
774
            Some(share) => Some(share),
×
775
            None => {
×
776
                let share = self
×
777
                    .storage
×
778
                    .load_next_secret_share(self.churp_id, epoch)
×
779
                    .or_else(|err| ignore_error(err, Error::InvalidSecretShare))?; // Ignore previous shares.
×
780

781
                // // Back up the secret share, if it is valid.
782
                if let Some(share) = share.as_ref() {
×
783
                    self.storage
×
784
                        .store_secret_share(share, self.churp_id, epoch)?;
×
785
                }
786

787
                share
×
788
            }
789
        };
790

791
        self.verify_and_add_shareholder(share, epoch)
×
792
    }
793

794
    /// Loads the next shareholder from local storage for the given epoch.
795
    fn load_next_shareholder(&self, epoch: EpochTime) -> Result<()> {
×
796
        let share = self
×
797
            .storage
×
798
            .load_next_secret_share(self.churp_id, epoch)
×
799
            .or_else(|err| ignore_error(err, Error::InvalidSecretShare))?; // Ignore previous shares.
×
800

801
        self.verify_and_add_shareholder(share, epoch)
×
802
    }
803

804
    fn verify_and_add_shareholder(
×
805
        &self,
806
        share: Option<VerifiableSecretShare<S::Group>>,
807
        epoch: EpochTime,
808
    ) -> Result<()> {
809
        let share = match share {
×
810
            Some(share) => share,
×
811
            None => return Ok(()),
×
812
        };
813

814
        // Verify that the host hasn't changed.
815
        let me = encode_shareholder::<S>(&self.node_id.0, &self.shareholder_dst)?;
×
816
        if share.secret_share().coordinate_x() != &me {
×
817
            return Err(Error::InvalidHost.into());
×
818
        }
819

820
        // Create a new shareholder.
821
        let shareholder = Arc::new(Shareholder::from(share));
×
822

823
        // Store the shareholder.
824
        self.add_shareholder(shareholder, epoch);
×
825

826
        Ok(())
×
827
    }
828

829
    /// Returns the dealer for the given epoch.
830
    fn get_dealer(&self, epoch: EpochTime) -> Result<Arc<Dealer<S::Group>>> {
×
831
        let dealer_guard = self.dealer.lock().unwrap();
×
832

833
        let dealer_info = match dealer_guard.as_ref() {
×
834
            Some(dealer_info) => dealer_info,
×
835
            None => return Err(Error::DealerNotFound.into()),
×
836
        };
837
        if dealer_info.epoch != epoch {
×
838
            return Err(Error::DealerNotFound.into());
×
839
        }
840

841
        Ok(dealer_info.dealer.clone())
×
842
    }
843

844
    /// Adds a dealer for the given epoch. If a dealer is already set,
845
    /// it will be overwritten.
846
    fn add_dealer(&self, dealer: Arc<Dealer<S::Group>>, epoch: EpochTime) {
×
847
        let mut dealer_guard = self.dealer.lock().unwrap();
×
848
        *dealer_guard = Some(DealerInfo { epoch, dealer });
×
849
    }
850

851
    /// Creates a new dealer for the given epoch.
852
    ///
853
    /// If a dealer for the same or any other epoch already exists, it will
854
    /// be removed, its bivariate polynomial overwritten, and permanently
855
    /// lost.
856
    ///
857
    /// Warning: Since the host controls the local storage, he can restart
858
    /// the enclave to create multiple dealers for the same epoch and then
859
    /// replace the last backup with a bivariate polynomial from a dealer
860
    /// of his choice. Therefore, it is essential to verify the bivariate
861
    /// polynomial after loading or when deriving bivariate shares.
862
    fn create_dealer(
×
863
        &self,
864
        epoch: EpochTime,
865
        threshold: u8,
866
        dealing_phase: bool,
867
    ) -> Result<Arc<Dealer<S::Group>>> {
868
        // Create a new dealer.
NEW
869
        let dealer = match dealing_phase {
×
NEW
870
            true => Dealer::new(threshold, &mut OsRng),
×
NEW
871
            false => Dealer::new_proactive(threshold, &mut OsRng),
×
872
        }?;
UNCOV
873
        let dealer = Arc::new(dealer);
×
874

875
        // Encrypt and store the polynomial in case of a restart.
876
        let polynomial = dealer.bivariate_polynomial();
×
877
        self.storage
×
878
            .store_bivariate_polynomial(polynomial, self.churp_id, epoch)?;
×
879

880
        // Store the dealer.
881
        self.add_dealer(dealer.clone(), epoch);
×
882

883
        Ok(dealer)
×
884
    }
885

886
    /// Loads the dealer for the given epoch from the local storage and verifies
887
    /// it against the provided checksum.
888
    fn load_dealer(&self, epoch: EpochTime, checksum: Option<Hash>) -> Result<()> {
×
889
        // Skip if handoffs are disabled.
890
        if epoch == HANDOFFS_DISABLED {
×
891
            return Ok(());
×
892
        }
893

894
        // Load untrusted polynomial.
895
        let polynomial = self
×
896
            .storage
×
897
            .load_bivariate_polynomial(self.churp_id, epoch)
×
898
            .or_else(|err| ignore_error(err, Error::InvalidBivariatePolynomial))?; // Ignore previous dealers.
×
899

900
        let polynomial = match polynomial {
×
901
            Some(polynomial) => polynomial,
×
902
            None => return Ok(()),
×
903
        };
904

905
        // Create untrusted dealer.
906
        let dealer = Arc::new(Dealer::from(polynomial));
×
907

908
        // Verify that the host hasn't created multiple dealers for the same
909
        // epoch and replaced the polynomial that was used to prepare
910
        // the application.
911
        if let Some(checksum) = checksum {
×
912
            let verification_matrix = dealer.verification_matrix();
×
913
            let computed_checksum = self.checksum_verification_matrix(verification_matrix, epoch);
×
914

915
            if checksum != computed_checksum {
×
916
                return Err(Error::InvalidBivariatePolynomial.into());
×
917
            }
918
        }
919

920
        // Store the dealer.
921
        self.add_dealer(dealer, epoch);
×
922

923
        Ok(())
×
924
    }
925

926
    /// Removes the dealer if it belongs to a handoff that occurred
927
    /// at or before the given epoch.
928
    fn remove_dealer(&self, max_epoch: EpochTime) {
×
929
        let mut dealer_guard = self.dealer.lock().unwrap();
×
930
        if let Some(dealer_info) = dealer_guard.as_ref() {
×
931
            if dealer_info.epoch <= max_epoch {
×
932
                *dealer_guard = None;
×
933
            }
934
        }
935
    }
936

937
    /// Returns the handoff for the given epoch.
938
    fn get_handoff(&self, epoch: EpochTime) -> Result<Arc<Box<dyn Handoff<S::Group>>>> {
×
939
        let handoff_guard = self.handoff.lock().unwrap();
×
940

941
        let handoff_info = handoff_guard
×
942
            .as_ref()
943
            .filter(|hi| hi.epoch == epoch)
×
944
            .ok_or(Error::HandoffNotFound)?;
×
945

946
        Ok(handoff_info.handoff.clone())
×
947
    }
948

949
    /// Creates a handoff for the next handoff epoch. If a handoff already
950
    /// exists, the existing one is returned.
951
    fn get_or_create_handoff(&self, status: &Status) -> Result<Arc<Box<dyn Handoff<S::Group>>>> {
×
952
        // Make sure to lock the handoff so that we don't create two handoffs
953
        // for the same epoch.
954
        let mut handoff_guard = self.handoff.lock().unwrap();
×
955

956
        if let Some(handoff_info) = handoff_guard.as_ref() {
×
957
            match status.next_handoff.cmp(&handoff_info.epoch) {
×
958
                cmp::Ordering::Less => return Err(Error::InvalidHandoff.into()),
×
959
                cmp::Ordering::Equal => return Ok(handoff_info.handoff.clone()),
×
960
                cmp::Ordering::Greater => (),
×
961
            }
962
        }
963

964
        // Create a new handoff.
965
        let threshold = status.threshold;
×
966
        let me = encode_shareholder::<S>(&self.node_id.0, &self.shareholder_dst)?;
×
967
        let mut shareholders = Vec::with_capacity(status.applications.len());
×
968
        for id in status.applications.keys() {
×
969
            let x = encode_shareholder::<S>(&id.0, &self.shareholder_dst)?;
×
970
            shareholders.push(x);
×
971
        }
972
        let kind = Self::handoff_kind(status);
×
973
        let handoff: Arc<Box<dyn Handoff<S::Group>>> = match kind {
×
974
            HandoffKind::DealingPhase => {
×
975
                Arc::new(Box::new(DealingPhase::new(threshold, me, shareholders)?))
×
976
            }
977
            HandoffKind::CommitteeUnchanged => Arc::new(Box::new(CommitteeUnchanged::new(
×
978
                threshold,
×
979
                me,
×
980
                shareholders,
×
981
            )?)),
982
            HandoffKind::CommitteeChanged => Arc::new(Box::new(CommitteeChanged::new(
×
983
                threshold,
×
984
                me,
×
985
                shareholders,
×
986
            )?)),
987
        };
988

989
        // If the committee hasn't changed, we need the latest shareholder
990
        // to randomize its share.
991
        if kind == HandoffKind::CommitteeUnchanged {
×
992
            let shareholder = self.get_shareholder(status.handoff)?;
×
993
            handoff.set_shareholder(shareholder)?;
×
994
        }
995

996
        // Store the handoff.
997
        *handoff_guard = Some(HandoffInfo {
×
998
            epoch: status.next_handoff,
×
999
            handoff: handoff.clone(),
×
1000
        });
1001

1002
        Ok(handoff)
×
1003
    }
1004

1005
    // Removes the handoff if it happened at or before the given epoch.
1006
    fn remove_handoff(&self, max_epoch: EpochTime) {
×
1007
        let mut handoff_guard = self.handoff.lock().unwrap();
×
1008
        if let Some(handoff_info) = handoff_guard.as_ref() {
×
1009
            if handoff_info.epoch <= max_epoch {
×
1010
                *handoff_guard = None;
×
1011
            }
1012
        }
1013
    }
1014

1015
    /// Verifies parameters of the last successfully completed handoff against
1016
    /// the latest status.
1017
    fn verify_last_handoff(&self, epoch: EpochTime) -> Result<Status> {
×
1018
        let status = self.churp_state.status(self.runtime_id, self.churp_id)?;
×
1019
        if status.handoff != epoch {
×
1020
            return Err(Error::HandoffMismatch.into());
×
1021
        }
1022

1023
        Ok(status)
×
1024
    }
1025

1026
    /// Verifies parameters of the next handoff against the latest status
1027
    /// and checks whether the handoff is in progress.
1028
    fn verify_next_handoff(&self, epoch: EpochTime) -> Result<Status> {
×
1029
        let status = self.churp_state.status(self.runtime_id, self.churp_id)?;
×
1030
        if status.next_handoff != epoch {
×
1031
            return Err(Error::HandoffMismatch.into());
×
1032
        }
1033

1034
        let now = self.beacon_state.epoch()?;
×
1035
        if status.next_handoff != now {
×
1036
            return Err(Error::HandoffClosed.into());
×
1037
        }
1038

1039
        Ok(status)
×
1040
    }
1041

1042
    /// Verifies the node ID by comparing the session's runtime attestation
1043
    /// key (RAK) with the one published in the consensus layer.
1044
    fn verify_node_id(&self, ctx: &RpcContext, node_id: &PublicKey) -> Result<()> {
×
1045
        if !cfg!(any(target_env = "sgx", feature = "debug-mock-sgx")) {
×
1046
            // Skip verification in non-SGX environments because those
1047
            // nodes do not publish RAK in the consensus nor do they
1048
            // send RAK binding when establishing Noise sessions.
1049
            return Ok(());
×
1050
        }
1051

1052
        let remote_rak = Self::remote_rak(ctx)?;
×
1053
        let rak = self
×
1054
            .registry_state
×
1055
            .rak(node_id, &self.runtime_id)?
×
1056
            .ok_or(Error::NotAuthenticated)?;
×
1057

1058
        if remote_rak != rak {
×
1059
            return Err(Error::NotAuthorized.into());
×
1060
        }
1061

1062
        Ok(())
×
1063
    }
1064

1065
    /// Authorizes the remote key manager enclave so that secret data is never
1066
    /// revealed to an unauthorized enclave.
1067
    fn verify_km_enclave(&self, ctx: &RpcContext, policy: &SignedPolicySGX) -> Result<()> {
×
1068
        if Self::ignore_policy() {
×
1069
            return Ok(());
×
1070
        }
1071
        let remote_enclave = Self::remote_enclave(ctx)?;
×
1072
        let policy = self.policies.verify(policy)?;
×
1073
        if !policy.may_join(remote_enclave) {
×
1074
            return Err(Error::NotAuthorized.into());
×
1075
        }
1076
        Ok(())
×
1077
    }
1078

1079
    /// Authorizes the remote runtime enclave so that secret data is never
1080
    /// revealed to an unauthorized enclave.
1081
    fn verify_rt_enclave(
×
1082
        &self,
1083
        ctx: &RpcContext,
1084
        policy: &SignedPolicySGX,
1085
        runtime_id: &Namespace,
1086
    ) -> Result<()> {
1087
        if Self::ignore_policy() {
×
1088
            return Ok(());
×
1089
        }
1090
        let remote_enclave = Self::remote_enclave(ctx)?;
×
1091
        let policy = self.policies.verify(policy)?;
×
1092
        if !policy.may_query(remote_enclave, runtime_id) {
×
1093
            return Err(Error::NotAuthorized.into());
×
1094
        }
1095
        Ok(())
×
1096
    }
1097

1098
    /// Returns the session RAK of the remote enclave.
1099
    fn remote_rak(ctx: &RpcContext) -> Result<PublicKey> {
×
1100
        let si = ctx.session_info.as_ref();
×
1101
        let si = si.ok_or(Error::NotAuthenticated)?;
×
1102
        Ok(si.rak_binding.rak_pub())
×
1103
    }
1104

1105
    /// Returns the identity of the remote enclave.
1106
    fn remote_enclave(ctx: &RpcContext) -> Result<&EnclaveIdentity> {
×
1107
        let si = ctx.session_info.as_ref();
×
1108
        let si = si.ok_or(Error::NotAuthenticated)?;
×
1109
        Ok(&si.verified_attestation.quote.identity)
×
1110
    }
1111

1112
    /// Returns true if key manager policies should be ignored.
1113
    fn ignore_policy() -> bool {
×
1114
        option_env!("OASIS_UNSAFE_SKIP_KM_POLICY").is_some()
×
1115
    }
1116

1117
    /// Returns a key manager client that connects only to enclaves eligible
1118
    /// to form a new committee or to enclaves belonging the old committee.
1119
    fn key_manager_client(&self, status: &Status, new_committee: bool) -> Result<RemoteClient> {
×
1120
        let enclaves = if Self::ignore_policy() {
×
1121
            None
×
1122
        } else {
1123
            let policy = self.policies.verify(&status.policy)?;
×
1124
            let enclaves = match new_committee {
×
1125
                true => policy.may_join.clone(),
×
1126
                false => policy.may_share.clone(),
×
1127
            };
1128
            Some(enclaves)
×
1129
        };
1130

1131
        let client = RemoteClient::new_runtime_with_enclaves_and_policy(
1132
            self.runtime_id,
×
1133
            Some(self.runtime_id),
×
1134
            enclaves,
×
1135
            self.identity.quote_policy(),
×
1136
            self.protocol.clone(),
×
1137
            self.consensus_verifier.clone(),
×
1138
            self.identity.clone(),
×
1139
            1, // Not used, doesn't matter.
1140
        );
1141

1142
        Ok(client)
×
1143
    }
1144

1145
    /// Computes the checksum of the verification matrix.
1146
    fn checksum_verification_matrix<G>(
×
1147
        &self,
1148
        matrix: &VerificationMatrix<G>,
1149
        epoch: EpochTime,
1150
    ) -> Hash
1151
    where
1152
        G: Group + GroupEncoding,
1153
    {
1154
        self.checksum_verification_matrix_bytes(&matrix.to_bytes(), epoch)
×
1155
    }
1156

1157
    /// Computes the checksum of the verification matrix bytes.
1158
    fn checksum_verification_matrix_bytes(&self, bytes: &Vec<u8>, epoch: EpochTime) -> Hash {
×
1159
        let mut checksum = [0u8; 32];
×
1160
        let mut f = KMac::new_kmac256(bytes, CHECKSUM_VERIFICATION_MATRIX_CUSTOM);
×
1161
        f.update(&self.runtime_id.0);
×
1162
        f.update(&[self.churp_id]);
×
1163
        f.update(&epoch.to_le_bytes());
×
1164
        f.finalize(&mut checksum);
×
1165
        Hash(checksum)
×
1166
    }
1167

1168
    /// Returns the type of the next handoff depending on which nodes submitted
1169
    /// an application to form the next committee.
1170
    fn handoff_kind(status: &Status) -> HandoffKind {
×
1171
        if status.committee.is_empty() {
×
1172
            return HandoffKind::DealingPhase;
×
1173
        }
1174
        if status.committee.len() != status.applications.len() {
×
1175
            return HandoffKind::CommitteeChanged;
×
1176
        }
1177
        if status
×
1178
            .committee
×
1179
            .iter()
1180
            .all(|value| status.applications.contains_key(value))
×
1181
        {
1182
            return HandoffKind::CommitteeUnchanged;
×
1183
        }
1184
        HandoffKind::CommitteeChanged
×
1185
    }
1186

1187
    /// Extends the given domain separation tag with key manager runtime ID
1188
    /// and churp ID.
1189
    fn domain_separation_tag(context: &[u8], runtime_id: &Namespace, churp_id: u8) -> Vec<u8> {
×
1190
        let mut dst = context.to_vec();
×
1191
        dst.extend(RUNTIME_CONTEXT_SEPARATOR);
×
1192
        dst.extend(runtime_id.0);
×
1193
        dst.extend(CHURP_CONTEXT_SEPARATOR);
×
1194
        dst.extend(&[churp_id]);
×
1195
        dst
×
1196
    }
1197
}
1198

1199
impl<S: Suite> Handler for Instance<S> {
1200
    fn verification_matrix(&self, req: &QueryRequest) -> Result<Vec<u8>> {
×
1201
        let status = self.verify_last_handoff(req.epoch)?;
×
1202
        if !status.committee.contains(&self.node_id) {
×
1203
            return Err(Error::NotInCommittee.into());
×
1204
        }
1205

1206
        let shareholder = self.get_shareholder(status.handoff)?;
×
1207
        let vm = shareholder
×
1208
            .verifiable_share()
1209
            .verification_matrix()
1210
            .to_bytes();
1211

1212
        Ok(vm)
×
1213
    }
1214

1215
    fn share_reduction_switch_point(
×
1216
        &self,
1217
        ctx: &RpcContext,
1218
        req: &QueryRequest,
1219
    ) -> Result<Vec<u8>> {
1220
        let status = self.verify_next_handoff(req.epoch)?;
×
1221
        if !status.committee.contains(&self.node_id) {
×
1222
            return Err(Error::NotInCommittee.into());
×
1223
        }
1224

1225
        let kind = Self::handoff_kind(&status);
×
1226
        if !matches!(kind, HandoffKind::CommitteeChanged) {
×
1227
            return Err(Error::InvalidHandoff.into());
×
1228
        }
1229

1230
        let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?;
×
1231
        if !status.applications.contains_key(node_id) {
×
1232
            return Err(Error::NotInCommittee.into());
×
1233
        }
1234

1235
        self.verify_node_id(ctx, node_id)?;
×
1236
        self.verify_km_enclave(ctx, &status.policy)?;
×
1237

1238
        let x = encode_shareholder::<S>(&node_id.0, &self.shareholder_dst)?;
×
1239
        let shareholder = self.get_shareholder(status.handoff)?;
×
1240
        let point = shareholder.switch_point(&x);
×
1241
        let point = scalar_to_bytes(&point);
×
1242

1243
        Ok(point)
×
1244
    }
1245

1246
    fn share_distribution_switch_point(
×
1247
        &self,
1248
        ctx: &RpcContext,
1249
        req: &QueryRequest,
1250
    ) -> Result<Vec<u8>> {
1251
        let status = self.verify_next_handoff(req.epoch)?;
×
1252
        if !status.applications.contains_key(&self.node_id) {
×
1253
            return Err(Error::NotInCommittee.into());
×
1254
        }
1255

1256
        let kind = Self::handoff_kind(&status);
×
1257
        if !matches!(kind, HandoffKind::CommitteeChanged) {
×
1258
            return Err(Error::InvalidHandoff.into());
×
1259
        }
1260

1261
        let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?;
×
1262
        if !status.applications.contains_key(node_id) {
×
1263
            return Err(Error::NotInCommittee.into());
×
1264
        }
1265

1266
        self.verify_node_id(ctx, node_id)?;
×
1267
        self.verify_km_enclave(ctx, &status.policy)?;
×
1268

1269
        let x = encode_shareholder::<S>(&node_id.0, &self.shareholder_dst)?;
×
1270
        let handoff = self.get_handoff(status.next_handoff)?;
×
1271
        let shareholder = handoff.get_reduced_shareholder()?;
×
1272
        let point = shareholder.switch_point(&x);
×
1273
        let point = scalar_to_bytes(&point);
×
1274

1275
        Ok(point)
×
1276
    }
1277

1278
    fn bivariate_share(
×
1279
        &self,
1280
        ctx: &RpcContext,
1281
        req: &QueryRequest,
1282
    ) -> Result<EncodedVerifiableSecretShare> {
1283
        let status = self.verify_next_handoff(req.epoch)?;
×
1284
        if !status.applications.contains_key(&self.node_id) {
×
1285
            return Err(Error::NotInCommittee.into());
×
1286
        }
1287

1288
        let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?;
×
1289
        if !status.applications.contains_key(node_id) {
×
1290
            return Err(Error::NotInCommittee.into());
×
1291
        };
1292

1293
        let application = status
×
1294
            .applications
×
1295
            .get(&self.node_id)
×
1296
            .ok_or(Error::NotInCommittee)?;
×
1297

1298
        self.verify_node_id(ctx, node_id)?;
×
1299
        self.verify_km_enclave(ctx, &status.policy)?;
×
1300

1301
        let x = encode_shareholder::<S>(&node_id.0, &self.shareholder_dst)?;
×
1302
        let kind = Self::handoff_kind(&status);
×
1303
        let dealer = self.get_dealer(status.next_handoff)?;
×
1304
        let share = dealer.make_share(x, kind);
×
1305
        let share = (&share).into();
×
1306
        let verification_matrix = dealer.verification_matrix().to_bytes();
×
1307

1308
        // Verify that the host hasn't created multiple dealers for the same
1309
        // epoch and replaced the polynomial that was used to prepare
1310
        // the application.
1311
        let computed_checksum =
×
1312
            self.checksum_verification_matrix_bytes(&verification_matrix, status.next_handoff);
×
1313
        if application.checksum != computed_checksum {
×
1314
            return Err(Error::InvalidBivariatePolynomial.into());
×
1315
        }
1316

1317
        Ok(EncodedVerifiableSecretShare {
×
1318
            share,
×
1319
            verification_matrix,
×
1320
        })
1321
    }
1322

1323
    fn sgx_policy_key_share(
×
1324
        &self,
1325
        ctx: &RpcContext,
1326
        req: &KeyShareRequest,
1327
    ) -> Result<EncodedEncryptedPoint> {
1328
        let status = self.churp_state.status(self.runtime_id, self.churp_id)?;
×
1329
        let status = if status.handoff != req.epoch {
×
1330
            // Allow querying past key shares if the client is a few blocks behind.
1331
            self.churp_state
×
1332
                .status_before(self.runtime_id, self.churp_id, ALLOWED_BLOCKS_BEHIND)?
×
1333
        } else {
1334
            status
×
1335
        };
1336

1337
        if status.handoff != req.epoch {
×
1338
            return Err(Error::HandoffMismatch.into());
×
1339
        }
1340
        if !status.committee.contains(&self.node_id) {
×
1341
            return Err(Error::NotInCommittee.into());
×
1342
        }
1343

1344
        // Note that querying past key shares can fail at this point
1345
        // if the policy has changed.
1346
        self.verify_rt_enclave(ctx, &status.policy, &req.key_runtime_id)?;
×
1347

1348
        // Prepare key share.
1349
        let shareholder = self.get_shareholder(status.handoff)?;
×
1350
        let point = shareholder.make_key_share::<S>(&req.key_id.0, &self.sgx_policy_key_id_dst)?;
×
1351

1352
        Ok((&point).into())
×
1353
    }
1354

1355
    fn apply(&self, req: &HandoffRequest) -> Result<SignedApplicationRequest> {
×
1356
        let status = self.churp_state.status(self.runtime_id, self.churp_id)?;
×
1357
        if status.next_handoff != req.epoch {
×
1358
            return Err(Error::HandoffMismatch.into());
×
1359
        }
1360
        if status.next_handoff == HANDOFFS_DISABLED {
×
1361
            return Err(Error::HandoffsDisabled.into());
×
1362
        }
1363
        if status.applications.contains_key(&self.node_id) {
×
1364
            return Err(Error::ApplicationSubmitted.into());
×
1365
        }
1366

1367
        // Ensure application is submitted one epoch before the next handoff.
1368
        let now = self.beacon_state.epoch()?;
×
1369
        if status.next_handoff != now + 1 {
×
1370
            return Err(Error::ApplicationsClosed.into());
×
1371
        }
1372

1373
        // Create a new dealer.
1374
        let dealing_phase = status.committee.is_empty();
×
1375
        let dealer = self.create_dealer(status.next_handoff, status.threshold, dealing_phase)?;
×
1376

1377
        // Fetch verification matrix and compute its checksum.
1378
        let matrix = dealer.verification_matrix();
×
1379
        let checksum = self.checksum_verification_matrix(matrix, req.epoch);
×
1380

1381
        // Prepare response and sign it with RAK.
1382
        let application = ApplicationRequest {
1383
            id: self.churp_id,
×
1384
            runtime_id: self.runtime_id,
×
1385
            epoch: status.next_handoff,
×
1386
            checksum,
1387
        };
1388
        let body = cbor::to_vec(application.clone());
×
1389
        let signature = self
×
1390
            .signer
×
1391
            .sign(APPLICATION_REQUEST_SIGNATURE_CONTEXT, &body)?;
×
1392

1393
        Ok(SignedApplicationRequest {
×
1394
            application,
×
1395
            signature,
×
1396
        })
1397
    }
1398

1399
    fn share_reduction(&self, req: &FetchRequest) -> Result<FetchResponse> {
×
1400
        let status = self.verify_next_handoff(req.epoch)?;
×
1401

1402
        let handoff = self.get_or_create_handoff(&status)?;
×
1403
        let client = self.key_manager_client(&status, false)?;
×
1404
        let f =
×
1405
            |node_id| self.fetch_share_reduction_switch_point(node_id, &status, &handoff, &client);
×
1406
        fetch(f, &req.node_ids)
×
1407
    }
1408

1409
    fn share_distribution(&self, req: &FetchRequest) -> Result<FetchResponse> {
×
1410
        let status = self.verify_next_handoff(req.epoch)?;
×
1411
        let handoff = self.get_handoff(status.next_handoff)?;
×
1412
        let client = self.key_manager_client(&status, true)?;
×
1413
        let f = |node_id| {
×
1414
            self.fetch_share_distribution_switch_point(node_id, &status, &handoff, &client)
×
1415
        };
1416
        fetch(f, &req.node_ids)
×
1417
    }
1418

1419
    fn proactivization(&self, req: &FetchRequest) -> Result<FetchResponse> {
×
1420
        let status = self.verify_next_handoff(req.epoch)?;
×
1421
        let handoff = match Self::handoff_kind(&status) {
×
1422
            HandoffKind::CommitteeChanged => self.get_handoff(status.next_handoff)?,
×
1423
            _ => self.get_or_create_handoff(&status)?,
×
1424
        };
1425
        let client = self.key_manager_client(&status, true)?;
×
1426
        let f = |node_id| self.fetch_bivariate_share(node_id, &status, &handoff, &client);
×
1427
        fetch(f, &req.node_ids)
×
1428
    }
1429

1430
    fn confirmation(&self, req: &HandoffRequest) -> Result<SignedConfirmationRequest> {
×
1431
        let status = self.verify_next_handoff(req.epoch)?;
×
1432

1433
        if !status.applications.contains_key(&self.node_id) {
×
1434
            return Err(Error::ApplicationNotSubmitted.into());
×
1435
        }
1436

1437
        // Fetch the next shareholder and its secret share.
1438
        let handoff = self.get_handoff(status.next_handoff)?;
×
1439
        let shareholder = handoff.get_full_shareholder()?;
×
1440
        let share = shareholder.verifiable_share();
×
1441

1442
        // Back up the secret share before sending confirmation.
1443
        self.storage
×
1444
            .store_next_secret_share(share, self.churp_id, status.next_handoff)?;
×
1445

1446
        // Store the shareholder. Observe that we are adding the shareholder
1447
        // before the consensus has confirmed that the handoff was completed.
1448
        // This is fine, as we always verify the handoff epoch before fetching
1449
        // a shareholder.
1450
        self.add_shareholder(shareholder.clone(), status.next_handoff);
×
1451

1452
        // Prepare response and sign it with RAK.
1453
        let vm = share.verification_matrix();
×
1454
        let checksum = self.checksum_verification_matrix(vm, status.next_handoff);
×
1455
        let confirmation = ConfirmationRequest {
1456
            id: self.churp_id,
×
1457
            runtime_id: self.runtime_id,
×
1458
            epoch: status.next_handoff,
×
1459
            checksum,
1460
        };
1461
        let body = cbor::to_vec(confirmation.clone());
×
1462
        let signature = self
×
1463
            .signer
×
1464
            .sign(CONFIRMATION_REQUEST_SIGNATURE_CONTEXT, &body)?;
×
1465

1466
        Ok(SignedConfirmationRequest {
×
1467
            confirmation,
×
1468
            signature,
×
1469
        })
1470
    }
1471

1472
    fn finalize(&self, req: &HandoffRequest) -> Result<()> {
×
1473
        let status = self.verify_last_handoff(req.epoch)?;
×
1474

1475
        // Keep only the last two shareholders. The second-last shareholder
1476
        // could be removed after a few blocks, as we need it only to serve
1477
        // clients that are lagging behind.
1478
        self.clean_shareholders(status.handoff);
×
1479

1480
        // Cleaning up dealers and handoffs is optional,
1481
        // as they are overwritten during the next handoff.
1482
        let max_epoch = status.next_handoff.saturating_sub(1);
×
1483
        self.remove_dealer(max_epoch);
×
1484
        self.remove_handoff(max_epoch);
×
1485

1486
        // Fetch the last shareholder and its secret share.
1487
        let shareholder = match self.get_shareholder(status.handoff) {
×
1488
            Ok(shareholder) => shareholder,
×
1489
            Err(_) => return Ok(()), // Not found.
×
1490
        };
1491
        let share = shareholder.verifiable_share();
×
1492

1493
        // Back up the secret share. This operation will be a no-op
1494
        // if the handoff failed, as the last shareholder hasn't changed.
1495
        self.storage
×
1496
            .store_secret_share(share, self.churp_id, status.handoff)
×
1497
    }
1498
}
1499

1500
/// Replaces the given error with `Ok(None)`.
1501
fn ignore_error<T>(err: anyhow::Error, ignore: Error) -> Result<Option<T>> {
×
1502
    match err.downcast_ref::<Error>() {
×
1503
        Some(error) if error == &ignore => Ok(None),
×
1504
        _ => Err(err),
×
1505
    }
1506
}
1507

1508
/// Fetches data from the given nodes by calling the provided function
1509
/// for each node.
1510
fn fetch<F>(f: F, node_ids: &[PublicKey]) -> Result<FetchResponse>
×
1511
where
1512
    F: Fn(PublicKey) -> Result<bool>,
1513
{
1514
    let mut completed = false;
×
1515
    let mut succeeded = vec![];
×
1516
    let mut failed = vec![];
×
1517

1518
    for &node_id in node_ids {
×
1519
        if completed {
×
1520
            break;
×
1521
        }
1522

1523
        match f(node_id) {
×
1524
            Ok(done) => {
×
1525
                completed = done;
×
1526
                succeeded.push(node_id);
×
1527
            }
1528
            Err(_) => {
×
1529
                failed.push(node_id);
×
1530
            }
1531
        }
1532
    }
1533

1534
    Ok(FetchResponse {
×
1535
        completed,
×
1536
        succeeded,
×
1537
        failed,
×
1538
    })
1539
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc