Skip to content

Commit

Permalink
aarch64 crefine: make proof generic in cacheLineBits
Browse files Browse the repository at this point in the history
On AArch64 we have so far only seen cacheLineBits = 6 in the kernel.

To be future-proof, bring AArch64 proofs into line with AArch32 anyway,
rename cacheLineSize to cacheLineBits to stay consistent, and make proof
generic in cacheLineBits.

Signed-off-by: Gerwin Klein <gerwin.klein@proofcraft.systems>
  • Loading branch information
lsf37 committed Sep 2, 2024
1 parent 5d70c47 commit 0526d2c
Show file tree
Hide file tree
Showing 5 changed files with 53 additions and 35 deletions.
4 changes: 2 additions & 2 deletions proof/crefine/AARCH64/Arch_C.thy
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ lemma clearMemory_PT_setObject_PTE_ccorres:
apply (clarsimp simp: guard_is_UNIV_def bit_simps split: if_split)
apply clarsimp
apply (frule is_aligned_addrFromPPtr_n, simp)
apply (simp add: is_aligned_no_overflow' addrFromPPtr_mask_cacheLineSize)
apply (simp add: is_aligned_no_overflow' addrFromPPtr_mask_cacheLineBits)
apply (rule conjI)
apply (simp add: unat_mask_eq flip: mask_2pm1)
apply (simp add: mask_eq_exp_minus_1)
Expand Down Expand Up @@ -1604,7 +1604,7 @@ definition flushtype_relation :: "flush_type \<Rightarrow> machine_word \<Righta
label \<in> scast ` {Kernel_C.ARMPageUnify_Instruction, Kernel_C.ARMVSpaceUnify_Instruction}"

lemma doFlush_ccorres:
"ccorres dc xfdc (\<lambda>s. vs \<le> ve \<and> ps \<le> ps + (ve - vs) \<and> vs && mask cacheLineSize = ps && mask cacheLineSize
"ccorres dc xfdc (\<lambda>s. vs \<le> ve \<and> ps \<le> ps + (ve - vs) \<and> vs && mask cacheLineBits = ps && mask cacheLineBits
\<and> ptrFromPAddr ps \<le> ptrFromPAddr ps + (ve - vs)
\<and> unat (ve - vs) \<le> gsMaxObjectSize s)
(\<lbrace>flushtype_relation t \<acute>invLabel\<rbrace> \<inter> \<lbrace>\<acute>start = vs\<rbrace> \<inter> \<lbrace>\<acute>end = ve\<rbrace> \<inter> \<lbrace>\<acute>pstart = ps\<rbrace>) []
Expand Down
2 changes: 1 addition & 1 deletion proof/crefine/AARCH64/Invoke_C.thy
Original file line number Diff line number Diff line change
Expand Up @@ -1685,7 +1685,7 @@ lemma clearMemory_untyped_ccorres:
word_of_nat_less Kernel_Config.resetChunkBits_def
word_bits_def unat_2p_sub_1)
apply (strengthen is_aligned_no_wrap'[where sz=sz] is_aligned_addrFromPPtr_n)+
apply (simp add: addrFromPPtr_mask_cacheLineSize pptrBaseOffset_alignment_def)
apply (simp add: addrFromPPtr_mask_cacheLineBits pptrBaseOffset_alignment_def)
apply (cases "ptr = 0"; simp)
apply (drule subsetD, rule intvl_self, simp)
apply simp
Expand Down
10 changes: 5 additions & 5 deletions proof/crefine/AARCH64/Machine_C.thy
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ assumes cleanByVA_PoU_ccorres:

assumes cleanCacheRange_RAM_ccorres:
"ccorres dc xfdc (\<lambda>s. w1 \<le> w2 \<and> w3 \<le> w3 + (w2 - w1)
\<and> w1 && mask cacheLineSize = w3 && mask cacheLineSize
\<and> w1 && mask cacheLineBits = w3 && mask cacheLineBits
\<and> unat (w2 - w1) \<le> gsMaxObjectSize s)
(\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace> \<inter> \<lbrace>\<acute>pstart = w3\<rbrace>) []
(doMachineOp (cleanCacheRange_RAM w1 w2 w3))
Expand All @@ -165,30 +165,30 @@ assumes cleanCacheRange_RAM_ccorres:
assumes cleanCacheRange_PoU_ccorres:
"ccorres dc xfdc (\<lambda>s. unat (w2 - w1) \<le> gsMaxObjectSize s
\<and> w1 \<le> w2 \<and> w3 \<le> w3 + (w2 - w1)
\<and> w1 && mask cacheLineSize = w3 && mask cacheLineSize)
\<and> w1 && mask cacheLineBits = w3 && mask cacheLineBits)
(\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace> \<inter> \<lbrace>\<acute>pstart = w3\<rbrace>) []
(doMachineOp (cleanCacheRange_PoU w1 w2 w3))
(Call cleanCacheRange_PoU_'proc)"

assumes cleanInvalidateCacheRange_RAM_ccorres:
"ccorres dc xfdc (\<lambda>s. unat (w2 - w1) \<le> gsMaxObjectSize s
\<and> w1 \<le> w2 \<and> w3 \<le> w3 + (w2 - w1)
\<and> w1 && mask cacheLineSize = w3 && mask cacheLineSize)
\<and> w1 && mask cacheLineBits = w3 && mask cacheLineBits)
(\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace> \<inter> \<lbrace>\<acute>pstart = w3\<rbrace>) []
(doMachineOp (cleanInvalidateCacheRange_RAM w1 w2 w3))
(Call cleanInvalidateCacheRange_RAM_'proc)"

assumes invalidateCacheRange_RAM_ccorres:
"ccorres dc xfdc ((\<lambda>s. unat (w2 - w1) \<le> gsMaxObjectSize s)
and (\<lambda>_. w1 \<le> w2 \<and> w3 \<le> w3 + (w2 - w1)
\<and> w1 && mask cacheLineSize = w3 && mask cacheLineSize))
\<and> w1 && mask cacheLineBits = w3 && mask cacheLineBits))
(\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace> \<inter> \<lbrace>\<acute>pstart = w3\<rbrace>) []
(doMachineOp (invalidateCacheRange_RAM w1 w2 w3))
(Call invalidateCacheRange_RAM_'proc)"

assumes invalidateCacheRange_I_ccorres:
"ccorres dc xfdc (\<lambda>_. w1 \<le> w2 \<and> w3 \<le> w3 + (w2 - w1)
\<and> w1 && mask cacheLineSize = w3 && mask cacheLineSize)
\<and> w1 && mask cacheLineBits = w3 && mask cacheLineBits)
(\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace> \<inter> \<lbrace>\<acute>pstart = w3\<rbrace>) []
(doMachineOp (invalidateCacheRange_I w1 w2 w3))
(Call invalidateCacheRange_I_'proc)"
Expand Down
8 changes: 1 addition & 7 deletions proof/crefine/AARCH64/Recycle_C.thy
Original file line number Diff line number Diff line change
Expand Up @@ -387,13 +387,7 @@ lemma clearMemory_PageCap_ccorres:
capAligned_def word_of_nat_less)
apply (frule is_aligned_addrFromPPtr_n, simp add: pageBitsForSize_def split: vmpage_size.splits)
apply (simp add: bit_simps pptrBaseOffset_alignment_def)+
apply (simp add: is_aligned_no_overflow')
apply (rule conjI)
subgoal
apply (prop_tac "cacheLineSize \<le> pageBitsForSize sz")
apply (simp add: pageBitsForSize_def bit_simps cacheLineSize_def split: vmpage_size.splits)
apply (simp add: is_aligned_mask[THEN iffD1] is_aligned_weaken)
done
apply (simp add: is_aligned_no_overflow' addrFromPPtr_mask_cacheLineBits)
apply (simp add: pageBitsForSize_def bit_simps split: vmpage_size.splits)
done

Expand Down
64 changes: 44 additions & 20 deletions proof/crefine/AARCH64/Wellformed_C.thy
Original file line number Diff line number Diff line change
Expand Up @@ -562,6 +562,50 @@ lemma ucast_irq_array_guard[unfolded irq_array_size_val, simplified]:
done


text \<open>cacheLineBits interface\<close>

lemmas cacheLineBits_val =
cacheLineBits_def[unfolded Kernel_Config.CONFIG_L1_CACHE_LINE_SIZE_BITS_def]

lemma cacheLineBits_leq_pageBits:
"cacheLineBits \<le> pageBits"
using cacheLineBits_sanity
by (simp add: pageBits_def)

lemma pageBits_leq_pptrBaseOffset_alignment:
"pageBits \<le> pptrBaseOffset_alignment"
by (simp add: pageBits_def pptrBaseOffset_alignment_def)

lemma cacheLineBits_leq_pptrBaseOffset_alignment:
"cacheLineBits \<le> pptrBaseOffset_alignment"
by (rule order.trans, rule cacheLineBits_leq_pageBits, rule pageBits_leq_pptrBaseOffset_alignment)

lemma cacheLineBits_leq_pbfs:
"cacheLineBits \<le> pageBitsForSize sz"
by (rule order.trans, rule cacheLineBits_leq_pageBits, rule pbfs_atleast_pageBits)

lemma addrFromPPtr_mask_pptrBaseOffset_alignment:
"n \<le> pptrBaseOffset_alignment
\<Longrightarrow> addrFromPPtr ptr && mask n = ptr && mask n"
unfolding addrFromPPtr_def
by (metis is_aligned_weaken mask_add_aligned pptrBaseOffset_aligned zadd_diff_inverse)

lemma addrFromPPtr_mask_cacheLineBits:
"addrFromPPtr ptr && mask cacheLineBits = ptr && mask cacheLineBits"
by (rule addrFromPPtr_mask_pptrBaseOffset_alignment,
rule cacheLineBits_leq_pptrBaseOffset_alignment)

lemma pptrBaseOffset_cacheLineBits_aligned[simp]:
"pptrBaseOffset && mask cacheLineBits = 0"
unfolding is_aligned_mask[symmetric]
by (rule is_aligned_weaken[OF pptrBaseOffset_aligned cacheLineBits_leq_pptrBaseOffset_alignment])

lemma ptrFromPAddr_mask_cacheLineBits[simp]:
"ptrFromPAddr v && mask cacheLineBits = v && mask cacheLineBits"
by (simp add: ptrFromPAddr_def add_mask_ignore)

(* ----- *)

(* Input abbreviations for API object types *)
(* disambiguates names *)

Expand Down Expand Up @@ -645,26 +689,6 @@ abbreviation(input)
where
"prioInvalid == seL4_InvalidPrio"

(* caches *)

definition cacheLineSize :: nat where
"cacheLineSize \<equiv> 6"

lemma addrFromPPtr_mask_cacheLineSize:
"addrFromPPtr ptr && mask cacheLineSize = ptr && mask cacheLineSize"
apply (simp add: addrFromPPtr_def AARCH64.pptrBase_def pptrBaseOffset_def canonical_bit_def
paddrBase_def cacheLineSize_def mask_def)
apply word_bitwise
done

lemma pptrBaseOffset_cacheLineSize_aligned[simp]:
"pptrBaseOffset && mask cacheLineSize = 0"
by (simp add: pptrBaseOffset_def paddrBase_def pptrBase_def cacheLineSize_def mask_def)

lemma ptrFromPAddr_mask_cacheLineSize[simp]:
"ptrFromPAddr v && mask cacheLineSize = v && mask cacheLineSize"
by (simp add: ptrFromPAddr_def add_mask_ignore)

(* The magic 4 comes out of the bitfield generator -- this applies to all versions of the kernel. *)
lemma ThreadState_Restart_mask[simp]:
"(scast ThreadState_Restart::machine_word) && mask 4 = scast ThreadState_Restart"
Expand Down

0 comments on commit 0526d2c

Please sign in to comment.